From 8081159a417842da85d836984fe802e1334e4d75 Mon Sep 17 00:00:00 2001 From: Simon Pichugin Date: Mar 04 2016 17:23:06 +0000 Subject: Ticket 48368 - Resolve the py.test conflicts with the create_test.py issue Bug description: When we run: py.test -m "some marks" dirsrvtests/ It conflicts with dirsrvtests/create_test.py: Missing required ticket number/suite name Fix description: ds/dirsrvtest/tests -> contains tickets, suites, data, etc. then leave create_test.py in dirsrvtests/ All tests are passed with the same result as before restructuring. https://fedorahosted.org/389/ticket/48368 Reviewed by: mreynolds (Thanks!) --- diff --git a/dirsrvtests/data/README b/dirsrvtests/data/README deleted file mode 100644 index 4261f92..0000000 --- a/dirsrvtests/data/README +++ /dev/null @@ -1,11 +0,0 @@ -DATA DIRECTORY README - -This directory is used for storing LDIF files used by the dirsrvtests scripts. -This directory can be retrieved via getDir() from the DirSrv class. - -Example: - - data_dir_path = topology.standalone.getDir(__file__, DATA_DIR) - - ldif_file = data_dir_path + "ticket44444/1000entries.ldif" - diff --git a/dirsrvtests/data/basic/dse.ldif.broken b/dirsrvtests/data/basic/dse.ldif.broken deleted file mode 100644 index 489b443..0000000 --- a/dirsrvtests/data/basic/dse.ldif.broken +++ /dev/null @@ -1,95 +0,0 @@ -dn: -objectClass: top -aci: (targetattr != "aci")(version 3.0; aci "rootdse anon read access"; allow( - read,search,compare) userdn="ldap:///anyone";) -creatorsName: cn=server,cn=plugins,cn=config -modifiersName: cn=server,cn=plugins,cn=config -createTimestamp: 20150204165610Z -modifyTimestamp: 20150204165610Z - -dn: cn=config -cn: config -objectClass: top -objectClass: extensibleObject -objectClass: nsslapdConfig -nsslapd-schemadir: /etc/dirsrv/slapd-localhost/schema -nsslapd-lockdir: /var/lock/dirsrv/slapd-localhost -nsslapd-tmpdir: /tmp -nsslapd-certdir: /etc/dirsrv/slapd-localhost -nsslapd-ldifdir: /var/lib/dirsrv/slapd-localhost/ldif -nsslapd-bakdir: /var/lib/dirsrv/slapd-localhost/bak -nsslapd-rundir: /var/run/dirsrv -nsslapd-instancedir: /usr/lib64/dirsrv/slapd-localhost -nsslapd-accesslog-logging-enabled: on -nsslapd-accesslog-maxlogsperdir: 10 -nsslapd-accesslog-mode: 600 -nsslapd-accesslog-maxlogsize: 100 -nsslapd-accesslog-logrotationtime: 1 -nsslapd-accesslog-logrotationtimeunit: day -nsslapd-accesslog-logrotationsync-enabled: off -nsslapd-accesslog-logrotationsynchour: 0 -nsslapd-accesslog-logrotationsyncmin: 0 -nsslapd-accesslog: /var/log/dirsrv/slapd-localhost/access -nsslapd-enquote-sup-oc: off -nsslapd-localhost: localhost.localdomain -nsslapd-schemacheck: on -nsslapd-syntaxcheck: on -nsslapd-dn-validate-strict: off -nsslapd-rewrite-rfc1274: off -nsslapd-return-exact-case: on -nsslapd-ssl-check-hostname: on -nsslapd-validate-cert: warn -nsslapd-allow-unauthenticated-binds: off -nsslapd-require-secure-binds: off -nsslapd-allow-anonymous####-access: on -nsslapd-localssf: 71 -nsslapd-minssf: 0 -nsslapd-port: 389 -nsslapd-localuser: nobody -nsslapd-errorlog-logging-enabled: on -nsslapd-errorlog-mode: 600 -nsslapd-errorlog-maxlogsperdir: 2 -nsslapd-errorlog-maxlogsize: 100 -nsslapd-errorlog-logrotationtime: 1 -nsslapd-errorlog-logrotationtimeunit: week -nsslapd-errorlog-logrotationsync-enabled: off -nsslapd-errorlog-logrotationsynchour: 0 -nsslapd-errorlog-logrotationsyncmin: 0 -nsslapd-errorlog: /var/log/dirsrv/slapd-localhost/errors -nsslapd-auditlog: /var/log/dirsrv/slapd-localhost/audit -nsslapd-auditlog-mode: 600 -nsslapd-auditlog-maxlogsize: 100 -nsslapd-auditlog-logrotationtime: 1 -nsslapd-auditlog-logrotationtimeunit: day -nsslapd-rootdn: cn=dm -nsslapd-maxdescriptors: 1024 -nsslapd-max-filter-nest-level: 40 -nsslapd-ndn-cache-enabled: on -nsslapd-sasl-mapping-fallback: off -nsslapd-dynamic-plugins: off -nsslapd-allow-hashed-passwords: off -nsslapd-ldapifilepath: /var/run/slapd-localhost.socket -nsslapd-ldapilisten: off -nsslapd-ldapiautobind: off -nsslapd-ldapimaprootdn: cn=dm -nsslapd-ldapimaptoentries: off -nsslapd-ldapiuidnumbertype: uidNumber -nsslapd-ldapigidnumbertype: gidNumber -nsslapd-ldapientrysearchbase: dc=example,dc=com -nsslapd-defaultnamingcontext: dc=example,dc=com -aci: (targetattr="*")(version 3.0; acl "Configuration Administrators Group"; a - llow (all) groupdn="ldap:///cn=Configuration Administrators,ou=Groups,ou=Topo - logyManagement,o=NetscapeRoot";) -aci: (targetattr="*")(version 3.0; acl "Configuration Administrator"; allow (a - ll) userdn="ldap:///uid=admin,ou=Administrators,ou=TopologyManagement,o=Netsc - apeRoot";) -aci: (targetattr = "*")(version 3.0; acl "SIE Group"; allow (all) groupdn = "l - dap:///cn=slapd-localhost,cn=389 Directory Server,cn=Server Group,cn=localhos - t.localdomain,ou=example.com,o=NetscapeRoot";) -modifiersName: cn=dm -modifyTimestamp: 20150205195242Z -nsslapd-auditlog-logging-enabled: on -nsslapd-auditlog-logging-hide-unhashed-pw: off -nsslapd-rootpw: {SSHA}AQH9bTYZW4kfkfyHg1k+lG88H2dFOuwakzFEpw== -numSubordinates: 10 - diff --git a/dirsrvtests/data/ticket47953/ticket47953.ldif b/dirsrvtests/data/ticket47953/ticket47953.ldif deleted file mode 100644 index e59977e..0000000 --- a/dirsrvtests/data/ticket47953/ticket47953.ldif +++ /dev/null @@ -1,27 +0,0 @@ -dn: dc=example,dc=com -objectClass: top -objectClass: domain -dc: example -aci: (targetattr!="userPassword")(version 3.0; acl "Enable anonymous access"; - allow (read, search, compare) userdn="ldap:///anyone";) -aci: (targetattr="carLicense || description || displayName || facsimileTelepho - neNumber || homePhone || homePostalAddress || initials || jpegPhoto || labele - dURI || mail || mobile || pager || photo || postOfficeBox || postalAddress || - postalCode || preferredDeliveryMethod || preferredLanguage || registeredAddr - ess || roomNumber || secretary || seeAlso || st || street || telephoneNumber - || telexNumber || title || userCertificate || userPassword || userSMIMECertif - icate || x500UniqueIdentifier")(version 3.0; acl "Enable self write for commo - n attributes"; allow (write) userdn="ldap:///self";) -aci: (targetattr ="fffff")(version 3.0;acl "Directory Administrators Group";al - low (all) (groupdn = "ldap:///cn=Directory Administrators, dc=example,dc=com" - );) -aci: (targetattr="*")(version 3.0; acl "Configuration Administrators Group"; a - llow (all) groupdn="ldap:///cn=Configuration Administrators,ou=Groups,ou=Topo - logyManagement,o=NetscapeRoot";) -aci: (targetattr="*")(version 3.0; acl "Configuration Administrator"; allow (a - ll) userdn="ldap:///uid=admin,ou=Administrators,ou=TopologyManagement,o=Netsc - apeRoot";) -aci: (targetattr = "*")(version 3.0; acl "TEST ACI"; allow (writ - e) groupdn = "ldap:///cn=slapd-localhost,cn=389 Directory Server,cn=Server Gr - oup,cn=localhost.localdomain,ou=example.com,o=NetscapeRoot";) - diff --git a/dirsrvtests/data/ticket47988/schema_ipa3.3.tar.gz b/dirsrvtests/data/ticket47988/schema_ipa3.3.tar.gz deleted file mode 100644 index 2b309a0..0000000 Binary files a/dirsrvtests/data/ticket47988/schema_ipa3.3.tar.gz and /dev/null differ diff --git a/dirsrvtests/data/ticket47988/schema_ipa4.1.tar.gz b/dirsrvtests/data/ticket47988/schema_ipa4.1.tar.gz deleted file mode 100644 index 84de0e9..0000000 Binary files a/dirsrvtests/data/ticket47988/schema_ipa4.1.tar.gz and /dev/null differ diff --git a/dirsrvtests/data/ticket48212/example1k_posix.ldif b/dirsrvtests/data/ticket48212/example1k_posix.ldif deleted file mode 100644 index 50000f2..0000000 --- a/dirsrvtests/data/ticket48212/example1k_posix.ldif +++ /dev/null @@ -1,17017 +0,0 @@ -dn: dc=example,dc=com -objectClass: top -objectClass: domain -dc: example -aci: (target=ldap:///dc=example,dc=com)(targetattr=*)(version 3.0; acl "acl1"; allow(write) userdn = "ldap:///self";) -aci: (target=ldap:///dc=example,dc=com)(targetattr=*)(version 3.0; acl "acl2"; allow(read, search, compare) userdn = "ldap:///anyone";) - -dn: ou=People,dc=example,dc=com -objectClass: top -objectClass: organizationalunit -ou: People - -dn: ou=Groups,dc=example,dc=com -objectClass: top -objectClass: organizationalunit -ou: Groups - -dn: cn=user0,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user0 -sn: user0 -uid: uid0 -givenname: givenname0 -description: description0 -userPassword: password0 -mail: uid0 -uidnumber: 0 -gidnumber: 0 -homeDirectory: /home/uid0 - -dn: cn=user1,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user1 -sn: user1 -uid: uid1 -givenname: givenname1 -description: description1 -userPassword: password1 -mail: uid1 -uidnumber: 1 -gidnumber: 1 -homeDirectory: /home/uid1 - -dn: cn=user2,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user2 -sn: user2 -uid: uid2 -givenname: givenname2 -description: description2 -userPassword: password2 -mail: uid2 -uidnumber: 2 -gidnumber: 2 -homeDirectory: /home/uid2 - -dn: cn=user3,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user3 -sn: user3 -uid: uid3 -givenname: givenname3 -description: description3 -userPassword: password3 -mail: uid3 -uidnumber: 3 -gidnumber: 3 -homeDirectory: /home/uid3 - -dn: cn=user4,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user4 -sn: user4 -uid: uid4 -givenname: givenname4 -description: description4 -userPassword: password4 -mail: uid4 -uidnumber: 4 -gidnumber: 4 -homeDirectory: /home/uid4 - -dn: cn=user5,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user5 -sn: user5 -uid: uid5 -givenname: givenname5 -description: description5 -userPassword: password5 -mail: uid5 -uidnumber: 5 -gidnumber: 5 -homeDirectory: /home/uid5 - -dn: cn=user6,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user6 -sn: user6 -uid: uid6 -givenname: givenname6 -description: description6 -userPassword: password6 -mail: uid6 -uidnumber: 6 -gidnumber: 6 -homeDirectory: /home/uid6 - -dn: cn=user7,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user7 -sn: user7 -uid: uid7 -givenname: givenname7 -description: description7 -userPassword: password7 -mail: uid7 -uidnumber: 7 -gidnumber: 7 -homeDirectory: /home/uid7 - -dn: cn=user8,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user8 -sn: user8 -uid: uid8 -givenname: givenname8 -description: description8 -userPassword: password8 -mail: uid8 -uidnumber: 8 -gidnumber: 8 -homeDirectory: /home/uid8 - -dn: cn=user9,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user9 -sn: user9 -uid: uid9 -givenname: givenname9 -description: description9 -userPassword: password9 -mail: uid9 -uidnumber: 9 -gidnumber: 9 -homeDirectory: /home/uid9 - -dn: cn=user10,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user10 -sn: user10 -uid: uid10 -givenname: givenname10 -description: description10 -userPassword: password10 -mail: uid10 -uidnumber: 10 -gidnumber: 10 -homeDirectory: /home/uid10 - -dn: cn=user11,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user11 -sn: user11 -uid: uid11 -givenname: givenname11 -description: description11 -userPassword: password11 -mail: uid11 -uidnumber: 11 -gidnumber: 11 -homeDirectory: /home/uid11 - -dn: cn=user12,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user12 -sn: user12 -uid: uid12 -givenname: givenname12 -description: description12 -userPassword: password12 -mail: uid12 -uidnumber: 12 -gidnumber: 12 -homeDirectory: /home/uid12 - -dn: cn=user13,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user13 -sn: user13 -uid: uid13 -givenname: givenname13 -description: description13 -userPassword: password13 -mail: uid13 -uidnumber: 13 -gidnumber: 13 -homeDirectory: /home/uid13 - -dn: cn=user14,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user14 -sn: user14 -uid: uid14 -givenname: givenname14 -description: description14 -userPassword: password14 -mail: uid14 -uidnumber: 14 -gidnumber: 14 -homeDirectory: /home/uid14 - -dn: cn=user15,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user15 -sn: user15 -uid: uid15 -givenname: givenname15 -description: description15 -userPassword: password15 -mail: uid15 -uidnumber: 15 -gidnumber: 15 -homeDirectory: /home/uid15 - -dn: cn=user16,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user16 -sn: user16 -uid: uid16 -givenname: givenname16 -description: description16 -userPassword: password16 -mail: uid16 -uidnumber: 16 -gidnumber: 16 -homeDirectory: /home/uid16 - -dn: cn=user17,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user17 -sn: user17 -uid: uid17 -givenname: givenname17 -description: description17 -userPassword: password17 -mail: uid17 -uidnumber: 17 -gidnumber: 17 -homeDirectory: /home/uid17 - -dn: cn=user18,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user18 -sn: user18 -uid: uid18 -givenname: givenname18 -description: description18 -userPassword: password18 -mail: uid18 -uidnumber: 18 -gidnumber: 18 -homeDirectory: /home/uid18 - -dn: cn=user19,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user19 -sn: user19 -uid: uid19 -givenname: givenname19 -description: description19 -userPassword: password19 -mail: uid19 -uidnumber: 19 -gidnumber: 19 -homeDirectory: /home/uid19 - -dn: cn=user20,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user20 -sn: user20 -uid: uid20 -givenname: givenname20 -description: description20 -userPassword: password20 -mail: uid20 -uidnumber: 20 -gidnumber: 20 -homeDirectory: /home/uid20 - -dn: cn=user21,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user21 -sn: user21 -uid: uid21 -givenname: givenname21 -description: description21 -userPassword: password21 -mail: uid21 -uidnumber: 21 -gidnumber: 21 -homeDirectory: /home/uid21 - -dn: cn=user22,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user22 -sn: user22 -uid: uid22 -givenname: givenname22 -description: description22 -userPassword: password22 -mail: uid22 -uidnumber: 22 -gidnumber: 22 -homeDirectory: /home/uid22 - -dn: cn=user23,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user23 -sn: user23 -uid: uid23 -givenname: givenname23 -description: description23 -userPassword: password23 -mail: uid23 -uidnumber: 23 -gidnumber: 23 -homeDirectory: /home/uid23 - -dn: cn=user24,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user24 -sn: user24 -uid: uid24 -givenname: givenname24 -description: description24 -userPassword: password24 -mail: uid24 -uidnumber: 24 -gidnumber: 24 -homeDirectory: /home/uid24 - -dn: cn=user25,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user25 -sn: user25 -uid: uid25 -givenname: givenname25 -description: description25 -userPassword: password25 -mail: uid25 -uidnumber: 25 -gidnumber: 25 -homeDirectory: /home/uid25 - -dn: cn=user26,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user26 -sn: user26 -uid: uid26 -givenname: givenname26 -description: description26 -userPassword: password26 -mail: uid26 -uidnumber: 26 -gidnumber: 26 -homeDirectory: /home/uid26 - -dn: cn=user27,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user27 -sn: user27 -uid: uid27 -givenname: givenname27 -description: description27 -userPassword: password27 -mail: uid27 -uidnumber: 27 -gidnumber: 27 -homeDirectory: /home/uid27 - -dn: cn=user28,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user28 -sn: user28 -uid: uid28 -givenname: givenname28 -description: description28 -userPassword: password28 -mail: uid28 -uidnumber: 28 -gidnumber: 28 -homeDirectory: /home/uid28 - -dn: cn=user29,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user29 -sn: user29 -uid: uid29 -givenname: givenname29 -description: description29 -userPassword: password29 -mail: uid29 -uidnumber: 29 -gidnumber: 29 -homeDirectory: /home/uid29 - -dn: cn=user30,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user30 -sn: user30 -uid: uid30 -givenname: givenname30 -description: description30 -userPassword: password30 -mail: uid30 -uidnumber: 30 -gidnumber: 30 -homeDirectory: /home/uid30 - -dn: cn=user31,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user31 -sn: user31 -uid: uid31 -givenname: givenname31 -description: description31 -userPassword: password31 -mail: uid31 -uidnumber: 31 -gidnumber: 31 -homeDirectory: /home/uid31 - -dn: cn=user32,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user32 -sn: user32 -uid: uid32 -givenname: givenname32 -description: description32 -userPassword: password32 -mail: uid32 -uidnumber: 32 -gidnumber: 32 -homeDirectory: /home/uid32 - -dn: cn=user33,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user33 -sn: user33 -uid: uid33 -givenname: givenname33 -description: description33 -userPassword: password33 -mail: uid33 -uidnumber: 33 -gidnumber: 33 -homeDirectory: /home/uid33 - -dn: cn=user34,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user34 -sn: user34 -uid: uid34 -givenname: givenname34 -description: description34 -userPassword: password34 -mail: uid34 -uidnumber: 34 -gidnumber: 34 -homeDirectory: /home/uid34 - -dn: cn=user35,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user35 -sn: user35 -uid: uid35 -givenname: givenname35 -description: description35 -userPassword: password35 -mail: uid35 -uidnumber: 35 -gidnumber: 35 -homeDirectory: /home/uid35 - -dn: cn=user36,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user36 -sn: user36 -uid: uid36 -givenname: givenname36 -description: description36 -userPassword: password36 -mail: uid36 -uidnumber: 36 -gidnumber: 36 -homeDirectory: /home/uid36 - -dn: cn=user37,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user37 -sn: user37 -uid: uid37 -givenname: givenname37 -description: description37 -userPassword: password37 -mail: uid37 -uidnumber: 37 -gidnumber: 37 -homeDirectory: /home/uid37 - -dn: cn=user38,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user38 -sn: user38 -uid: uid38 -givenname: givenname38 -description: description38 -userPassword: password38 -mail: uid38 -uidnumber: 38 -gidnumber: 38 -homeDirectory: /home/uid38 - -dn: cn=user39,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user39 -sn: user39 -uid: uid39 -givenname: givenname39 -description: description39 -userPassword: password39 -mail: uid39 -uidnumber: 39 -gidnumber: 39 -homeDirectory: /home/uid39 - -dn: cn=user40,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user40 -sn: user40 -uid: uid40 -givenname: givenname40 -description: description40 -userPassword: password40 -mail: uid40 -uidnumber: 40 -gidnumber: 40 -homeDirectory: /home/uid40 - -dn: cn=user41,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user41 -sn: user41 -uid: uid41 -givenname: givenname41 -description: description41 -userPassword: password41 -mail: uid41 -uidnumber: 41 -gidnumber: 41 -homeDirectory: /home/uid41 - -dn: cn=user42,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user42 -sn: user42 -uid: uid42 -givenname: givenname42 -description: description42 -userPassword: password42 -mail: uid42 -uidnumber: 42 -gidnumber: 42 -homeDirectory: /home/uid42 - -dn: cn=user43,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user43 -sn: user43 -uid: uid43 -givenname: givenname43 -description: description43 -userPassword: password43 -mail: uid43 -uidnumber: 43 -gidnumber: 43 -homeDirectory: /home/uid43 - -dn: cn=user44,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user44 -sn: user44 -uid: uid44 -givenname: givenname44 -description: description44 -userPassword: password44 -mail: uid44 -uidnumber: 44 -gidnumber: 44 -homeDirectory: /home/uid44 - -dn: cn=user45,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user45 -sn: user45 -uid: uid45 -givenname: givenname45 -description: description45 -userPassword: password45 -mail: uid45 -uidnumber: 45 -gidnumber: 45 -homeDirectory: /home/uid45 - -dn: cn=user46,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user46 -sn: user46 -uid: uid46 -givenname: givenname46 -description: description46 -userPassword: password46 -mail: uid46 -uidnumber: 46 -gidnumber: 46 -homeDirectory: /home/uid46 - -dn: cn=user47,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user47 -sn: user47 -uid: uid47 -givenname: givenname47 -description: description47 -userPassword: password47 -mail: uid47 -uidnumber: 47 -gidnumber: 47 -homeDirectory: /home/uid47 - -dn: cn=user48,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user48 -sn: user48 -uid: uid48 -givenname: givenname48 -description: description48 -userPassword: password48 -mail: uid48 -uidnumber: 48 -gidnumber: 48 -homeDirectory: /home/uid48 - -dn: cn=user49,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user49 -sn: user49 -uid: uid49 -givenname: givenname49 -description: description49 -userPassword: password49 -mail: uid49 -uidnumber: 49 -gidnumber: 49 -homeDirectory: /home/uid49 - -dn: cn=user50,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user50 -sn: user50 -uid: uid50 -givenname: givenname50 -description: description50 -userPassword: password50 -mail: uid50 -uidnumber: 50 -gidnumber: 50 -homeDirectory: /home/uid50 - -dn: cn=user51,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user51 -sn: user51 -uid: uid51 -givenname: givenname51 -description: description51 -userPassword: password51 -mail: uid51 -uidnumber: 51 -gidnumber: 51 -homeDirectory: /home/uid51 - -dn: cn=user52,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user52 -sn: user52 -uid: uid52 -givenname: givenname52 -description: description52 -userPassword: password52 -mail: uid52 -uidnumber: 52 -gidnumber: 52 -homeDirectory: /home/uid52 - -dn: cn=user53,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user53 -sn: user53 -uid: uid53 -givenname: givenname53 -description: description53 -userPassword: password53 -mail: uid53 -uidnumber: 53 -gidnumber: 53 -homeDirectory: /home/uid53 - -dn: cn=user54,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user54 -sn: user54 -uid: uid54 -givenname: givenname54 -description: description54 -userPassword: password54 -mail: uid54 -uidnumber: 54 -gidnumber: 54 -homeDirectory: /home/uid54 - -dn: cn=user55,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user55 -sn: user55 -uid: uid55 -givenname: givenname55 -description: description55 -userPassword: password55 -mail: uid55 -uidnumber: 55 -gidnumber: 55 -homeDirectory: /home/uid55 - -dn: cn=user56,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user56 -sn: user56 -uid: uid56 -givenname: givenname56 -description: description56 -userPassword: password56 -mail: uid56 -uidnumber: 56 -gidnumber: 56 -homeDirectory: /home/uid56 - -dn: cn=user57,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user57 -sn: user57 -uid: uid57 -givenname: givenname57 -description: description57 -userPassword: password57 -mail: uid57 -uidnumber: 57 -gidnumber: 57 -homeDirectory: /home/uid57 - -dn: cn=user58,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user58 -sn: user58 -uid: uid58 -givenname: givenname58 -description: description58 -userPassword: password58 -mail: uid58 -uidnumber: 58 -gidnumber: 58 -homeDirectory: /home/uid58 - -dn: cn=user59,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user59 -sn: user59 -uid: uid59 -givenname: givenname59 -description: description59 -userPassword: password59 -mail: uid59 -uidnumber: 59 -gidnumber: 59 -homeDirectory: /home/uid59 - -dn: cn=user60,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user60 -sn: user60 -uid: uid60 -givenname: givenname60 -description: description60 -userPassword: password60 -mail: uid60 -uidnumber: 60 -gidnumber: 60 -homeDirectory: /home/uid60 - -dn: cn=user61,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user61 -sn: user61 -uid: uid61 -givenname: givenname61 -description: description61 -userPassword: password61 -mail: uid61 -uidnumber: 61 -gidnumber: 61 -homeDirectory: /home/uid61 - -dn: cn=user62,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user62 -sn: user62 -uid: uid62 -givenname: givenname62 -description: description62 -userPassword: password62 -mail: uid62 -uidnumber: 62 -gidnumber: 62 -homeDirectory: /home/uid62 - -dn: cn=user63,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user63 -sn: user63 -uid: uid63 -givenname: givenname63 -description: description63 -userPassword: password63 -mail: uid63 -uidnumber: 63 -gidnumber: 63 -homeDirectory: /home/uid63 - -dn: cn=user64,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user64 -sn: user64 -uid: uid64 -givenname: givenname64 -description: description64 -userPassword: password64 -mail: uid64 -uidnumber: 64 -gidnumber: 64 -homeDirectory: /home/uid64 - -dn: cn=user65,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user65 -sn: user65 -uid: uid65 -givenname: givenname65 -description: description65 -userPassword: password65 -mail: uid65 -uidnumber: 65 -gidnumber: 65 -homeDirectory: /home/uid65 - -dn: cn=user66,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user66 -sn: user66 -uid: uid66 -givenname: givenname66 -description: description66 -userPassword: password66 -mail: uid66 -uidnumber: 66 -gidnumber: 66 -homeDirectory: /home/uid66 - -dn: cn=user67,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user67 -sn: user67 -uid: uid67 -givenname: givenname67 -description: description67 -userPassword: password67 -mail: uid67 -uidnumber: 67 -gidnumber: 67 -homeDirectory: /home/uid67 - -dn: cn=user68,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user68 -sn: user68 -uid: uid68 -givenname: givenname68 -description: description68 -userPassword: password68 -mail: uid68 -uidnumber: 68 -gidnumber: 68 -homeDirectory: /home/uid68 - -dn: cn=user69,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user69 -sn: user69 -uid: uid69 -givenname: givenname69 -description: description69 -userPassword: password69 -mail: uid69 -uidnumber: 69 -gidnumber: 69 -homeDirectory: /home/uid69 - -dn: cn=user70,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user70 -sn: user70 -uid: uid70 -givenname: givenname70 -description: description70 -userPassword: password70 -mail: uid70 -uidnumber: 70 -gidnumber: 70 -homeDirectory: /home/uid70 - -dn: cn=user71,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user71 -sn: user71 -uid: uid71 -givenname: givenname71 -description: description71 -userPassword: password71 -mail: uid71 -uidnumber: 71 -gidnumber: 71 -homeDirectory: /home/uid71 - -dn: cn=user72,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user72 -sn: user72 -uid: uid72 -givenname: givenname72 -description: description72 -userPassword: password72 -mail: uid72 -uidnumber: 72 -gidnumber: 72 -homeDirectory: /home/uid72 - -dn: cn=user73,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user73 -sn: user73 -uid: uid73 -givenname: givenname73 -description: description73 -userPassword: password73 -mail: uid73 -uidnumber: 73 -gidnumber: 73 -homeDirectory: /home/uid73 - -dn: cn=user74,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user74 -sn: user74 -uid: uid74 -givenname: givenname74 -description: description74 -userPassword: password74 -mail: uid74 -uidnumber: 74 -gidnumber: 74 -homeDirectory: /home/uid74 - -dn: cn=user75,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user75 -sn: user75 -uid: uid75 -givenname: givenname75 -description: description75 -userPassword: password75 -mail: uid75 -uidnumber: 75 -gidnumber: 75 -homeDirectory: /home/uid75 - -dn: cn=user76,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user76 -sn: user76 -uid: uid76 -givenname: givenname76 -description: description76 -userPassword: password76 -mail: uid76 -uidnumber: 76 -gidnumber: 76 -homeDirectory: /home/uid76 - -dn: cn=user77,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user77 -sn: user77 -uid: uid77 -givenname: givenname77 -description: description77 -userPassword: password77 -mail: uid77 -uidnumber: 77 -gidnumber: 77 -homeDirectory: /home/uid77 - -dn: cn=user78,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user78 -sn: user78 -uid: uid78 -givenname: givenname78 -description: description78 -userPassword: password78 -mail: uid78 -uidnumber: 78 -gidnumber: 78 -homeDirectory: /home/uid78 - -dn: cn=user79,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user79 -sn: user79 -uid: uid79 -givenname: givenname79 -description: description79 -userPassword: password79 -mail: uid79 -uidnumber: 79 -gidnumber: 79 -homeDirectory: /home/uid79 - -dn: cn=user80,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user80 -sn: user80 -uid: uid80 -givenname: givenname80 -description: description80 -userPassword: password80 -mail: uid80 -uidnumber: 80 -gidnumber: 80 -homeDirectory: /home/uid80 - -dn: cn=user81,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user81 -sn: user81 -uid: uid81 -givenname: givenname81 -description: description81 -userPassword: password81 -mail: uid81 -uidnumber: 81 -gidnumber: 81 -homeDirectory: /home/uid81 - -dn: cn=user82,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user82 -sn: user82 -uid: uid82 -givenname: givenname82 -description: description82 -userPassword: password82 -mail: uid82 -uidnumber: 82 -gidnumber: 82 -homeDirectory: /home/uid82 - -dn: cn=user83,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user83 -sn: user83 -uid: uid83 -givenname: givenname83 -description: description83 -userPassword: password83 -mail: uid83 -uidnumber: 83 -gidnumber: 83 -homeDirectory: /home/uid83 - -dn: cn=user84,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user84 -sn: user84 -uid: uid84 -givenname: givenname84 -description: description84 -userPassword: password84 -mail: uid84 -uidnumber: 84 -gidnumber: 84 -homeDirectory: /home/uid84 - -dn: cn=user85,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user85 -sn: user85 -uid: uid85 -givenname: givenname85 -description: description85 -userPassword: password85 -mail: uid85 -uidnumber: 85 -gidnumber: 85 -homeDirectory: /home/uid85 - -dn: cn=user86,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user86 -sn: user86 -uid: uid86 -givenname: givenname86 -description: description86 -userPassword: password86 -mail: uid86 -uidnumber: 86 -gidnumber: 86 -homeDirectory: /home/uid86 - -dn: cn=user87,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user87 -sn: user87 -uid: uid87 -givenname: givenname87 -description: description87 -userPassword: password87 -mail: uid87 -uidnumber: 87 -gidnumber: 87 -homeDirectory: /home/uid87 - -dn: cn=user88,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user88 -sn: user88 -uid: uid88 -givenname: givenname88 -description: description88 -userPassword: password88 -mail: uid88 -uidnumber: 88 -gidnumber: 88 -homeDirectory: /home/uid88 - -dn: cn=user89,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user89 -sn: user89 -uid: uid89 -givenname: givenname89 -description: description89 -userPassword: password89 -mail: uid89 -uidnumber: 89 -gidnumber: 89 -homeDirectory: /home/uid89 - -dn: cn=user90,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user90 -sn: user90 -uid: uid90 -givenname: givenname90 -description: description90 -userPassword: password90 -mail: uid90 -uidnumber: 90 -gidnumber: 90 -homeDirectory: /home/uid90 - -dn: cn=user91,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user91 -sn: user91 -uid: uid91 -givenname: givenname91 -description: description91 -userPassword: password91 -mail: uid91 -uidnumber: 91 -gidnumber: 91 -homeDirectory: /home/uid91 - -dn: cn=user92,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user92 -sn: user92 -uid: uid92 -givenname: givenname92 -description: description92 -userPassword: password92 -mail: uid92 -uidnumber: 92 -gidnumber: 92 -homeDirectory: /home/uid92 - -dn: cn=user93,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user93 -sn: user93 -uid: uid93 -givenname: givenname93 -description: description93 -userPassword: password93 -mail: uid93 -uidnumber: 93 -gidnumber: 93 -homeDirectory: /home/uid93 - -dn: cn=user94,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user94 -sn: user94 -uid: uid94 -givenname: givenname94 -description: description94 -userPassword: password94 -mail: uid94 -uidnumber: 94 -gidnumber: 94 -homeDirectory: /home/uid94 - -dn: cn=user95,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user95 -sn: user95 -uid: uid95 -givenname: givenname95 -description: description95 -userPassword: password95 -mail: uid95 -uidnumber: 95 -gidnumber: 95 -homeDirectory: /home/uid95 - -dn: cn=user96,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user96 -sn: user96 -uid: uid96 -givenname: givenname96 -description: description96 -userPassword: password96 -mail: uid96 -uidnumber: 96 -gidnumber: 96 -homeDirectory: /home/uid96 - -dn: cn=user97,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user97 -sn: user97 -uid: uid97 -givenname: givenname97 -description: description97 -userPassword: password97 -mail: uid97 -uidnumber: 97 -gidnumber: 97 -homeDirectory: /home/uid97 - -dn: cn=user98,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user98 -sn: user98 -uid: uid98 -givenname: givenname98 -description: description98 -userPassword: password98 -mail: uid98 -uidnumber: 98 -gidnumber: 98 -homeDirectory: /home/uid98 - -dn: cn=user99,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user99 -sn: user99 -uid: uid99 -givenname: givenname99 -description: description99 -userPassword: password99 -mail: uid99 -uidnumber: 99 -gidnumber: 99 -homeDirectory: /home/uid99 - -dn: cn=user100,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user100 -sn: user100 -uid: uid100 -givenname: givenname100 -description: description100 -userPassword: password100 -mail: uid100 -uidnumber: 100 -gidnumber: 100 -homeDirectory: /home/uid100 - -dn: cn=user101,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user101 -sn: user101 -uid: uid101 -givenname: givenname101 -description: description101 -userPassword: password101 -mail: uid101 -uidnumber: 101 -gidnumber: 101 -homeDirectory: /home/uid101 - -dn: cn=user102,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user102 -sn: user102 -uid: uid102 -givenname: givenname102 -description: description102 -userPassword: password102 -mail: uid102 -uidnumber: 102 -gidnumber: 102 -homeDirectory: /home/uid102 - -dn: cn=user103,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user103 -sn: user103 -uid: uid103 -givenname: givenname103 -description: description103 -userPassword: password103 -mail: uid103 -uidnumber: 103 -gidnumber: 103 -homeDirectory: /home/uid103 - -dn: cn=user104,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user104 -sn: user104 -uid: uid104 -givenname: givenname104 -description: description104 -userPassword: password104 -mail: uid104 -uidnumber: 104 -gidnumber: 104 -homeDirectory: /home/uid104 - -dn: cn=user105,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user105 -sn: user105 -uid: uid105 -givenname: givenname105 -description: description105 -userPassword: password105 -mail: uid105 -uidnumber: 105 -gidnumber: 105 -homeDirectory: /home/uid105 - -dn: cn=user106,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user106 -sn: user106 -uid: uid106 -givenname: givenname106 -description: description106 -userPassword: password106 -mail: uid106 -uidnumber: 106 -gidnumber: 106 -homeDirectory: /home/uid106 - -dn: cn=user107,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user107 -sn: user107 -uid: uid107 -givenname: givenname107 -description: description107 -userPassword: password107 -mail: uid107 -uidnumber: 107 -gidnumber: 107 -homeDirectory: /home/uid107 - -dn: cn=user108,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user108 -sn: user108 -uid: uid108 -givenname: givenname108 -description: description108 -userPassword: password108 -mail: uid108 -uidnumber: 108 -gidnumber: 108 -homeDirectory: /home/uid108 - -dn: cn=user109,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user109 -sn: user109 -uid: uid109 -givenname: givenname109 -description: description109 -userPassword: password109 -mail: uid109 -uidnumber: 109 -gidnumber: 109 -homeDirectory: /home/uid109 - -dn: cn=user110,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user110 -sn: user110 -uid: uid110 -givenname: givenname110 -description: description110 -userPassword: password110 -mail: uid110 -uidnumber: 110 -gidnumber: 110 -homeDirectory: /home/uid110 - -dn: cn=user111,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user111 -sn: user111 -uid: uid111 -givenname: givenname111 -description: description111 -userPassword: password111 -mail: uid111 -uidnumber: 111 -gidnumber: 111 -homeDirectory: /home/uid111 - -dn: cn=user112,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user112 -sn: user112 -uid: uid112 -givenname: givenname112 -description: description112 -userPassword: password112 -mail: uid112 -uidnumber: 112 -gidnumber: 112 -homeDirectory: /home/uid112 - -dn: cn=user113,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user113 -sn: user113 -uid: uid113 -givenname: givenname113 -description: description113 -userPassword: password113 -mail: uid113 -uidnumber: 113 -gidnumber: 113 -homeDirectory: /home/uid113 - -dn: cn=user114,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user114 -sn: user114 -uid: uid114 -givenname: givenname114 -description: description114 -userPassword: password114 -mail: uid114 -uidnumber: 114 -gidnumber: 114 -homeDirectory: /home/uid114 - -dn: cn=user115,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user115 -sn: user115 -uid: uid115 -givenname: givenname115 -description: description115 -userPassword: password115 -mail: uid115 -uidnumber: 115 -gidnumber: 115 -homeDirectory: /home/uid115 - -dn: cn=user116,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user116 -sn: user116 -uid: uid116 -givenname: givenname116 -description: description116 -userPassword: password116 -mail: uid116 -uidnumber: 116 -gidnumber: 116 -homeDirectory: /home/uid116 - -dn: cn=user117,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user117 -sn: user117 -uid: uid117 -givenname: givenname117 -description: description117 -userPassword: password117 -mail: uid117 -uidnumber: 117 -gidnumber: 117 -homeDirectory: /home/uid117 - -dn: cn=user118,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user118 -sn: user118 -uid: uid118 -givenname: givenname118 -description: description118 -userPassword: password118 -mail: uid118 -uidnumber: 118 -gidnumber: 118 -homeDirectory: /home/uid118 - -dn: cn=user119,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user119 -sn: user119 -uid: uid119 -givenname: givenname119 -description: description119 -userPassword: password119 -mail: uid119 -uidnumber: 119 -gidnumber: 119 -homeDirectory: /home/uid119 - -dn: cn=user120,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user120 -sn: user120 -uid: uid120 -givenname: givenname120 -description: description120 -userPassword: password120 -mail: uid120 -uidnumber: 120 -gidnumber: 120 -homeDirectory: /home/uid120 - -dn: cn=user121,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user121 -sn: user121 -uid: uid121 -givenname: givenname121 -description: description121 -userPassword: password121 -mail: uid121 -uidnumber: 121 -gidnumber: 121 -homeDirectory: /home/uid121 - -dn: cn=user122,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user122 -sn: user122 -uid: uid122 -givenname: givenname122 -description: description122 -userPassword: password122 -mail: uid122 -uidnumber: 122 -gidnumber: 122 -homeDirectory: /home/uid122 - -dn: cn=user123,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user123 -sn: user123 -uid: uid123 -givenname: givenname123 -description: description123 -userPassword: password123 -mail: uid123 -uidnumber: 123 -gidnumber: 123 -homeDirectory: /home/uid123 - -dn: cn=user124,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user124 -sn: user124 -uid: uid124 -givenname: givenname124 -description: description124 -userPassword: password124 -mail: uid124 -uidnumber: 124 -gidnumber: 124 -homeDirectory: /home/uid124 - -dn: cn=user125,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user125 -sn: user125 -uid: uid125 -givenname: givenname125 -description: description125 -userPassword: password125 -mail: uid125 -uidnumber: 125 -gidnumber: 125 -homeDirectory: /home/uid125 - -dn: cn=user126,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user126 -sn: user126 -uid: uid126 -givenname: givenname126 -description: description126 -userPassword: password126 -mail: uid126 -uidnumber: 126 -gidnumber: 126 -homeDirectory: /home/uid126 - -dn: cn=user127,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user127 -sn: user127 -uid: uid127 -givenname: givenname127 -description: description127 -userPassword: password127 -mail: uid127 -uidnumber: 127 -gidnumber: 127 -homeDirectory: /home/uid127 - -dn: cn=user128,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user128 -sn: user128 -uid: uid128 -givenname: givenname128 -description: description128 -userPassword: password128 -mail: uid128 -uidnumber: 128 -gidnumber: 128 -homeDirectory: /home/uid128 - -dn: cn=user129,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user129 -sn: user129 -uid: uid129 -givenname: givenname129 -description: description129 -userPassword: password129 -mail: uid129 -uidnumber: 129 -gidnumber: 129 -homeDirectory: /home/uid129 - -dn: cn=user130,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user130 -sn: user130 -uid: uid130 -givenname: givenname130 -description: description130 -userPassword: password130 -mail: uid130 -uidnumber: 130 -gidnumber: 130 -homeDirectory: /home/uid130 - -dn: cn=user131,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user131 -sn: user131 -uid: uid131 -givenname: givenname131 -description: description131 -userPassword: password131 -mail: uid131 -uidnumber: 131 -gidnumber: 131 -homeDirectory: /home/uid131 - -dn: cn=user132,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user132 -sn: user132 -uid: uid132 -givenname: givenname132 -description: description132 -userPassword: password132 -mail: uid132 -uidnumber: 132 -gidnumber: 132 -homeDirectory: /home/uid132 - -dn: cn=user133,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user133 -sn: user133 -uid: uid133 -givenname: givenname133 -description: description133 -userPassword: password133 -mail: uid133 -uidnumber: 133 -gidnumber: 133 -homeDirectory: /home/uid133 - -dn: cn=user134,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user134 -sn: user134 -uid: uid134 -givenname: givenname134 -description: description134 -userPassword: password134 -mail: uid134 -uidnumber: 134 -gidnumber: 134 -homeDirectory: /home/uid134 - -dn: cn=user135,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user135 -sn: user135 -uid: uid135 -givenname: givenname135 -description: description135 -userPassword: password135 -mail: uid135 -uidnumber: 135 -gidnumber: 135 -homeDirectory: /home/uid135 - -dn: cn=user136,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user136 -sn: user136 -uid: uid136 -givenname: givenname136 -description: description136 -userPassword: password136 -mail: uid136 -uidnumber: 136 -gidnumber: 136 -homeDirectory: /home/uid136 - -dn: cn=user137,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user137 -sn: user137 -uid: uid137 -givenname: givenname137 -description: description137 -userPassword: password137 -mail: uid137 -uidnumber: 137 -gidnumber: 137 -homeDirectory: /home/uid137 - -dn: cn=user138,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user138 -sn: user138 -uid: uid138 -givenname: givenname138 -description: description138 -userPassword: password138 -mail: uid138 -uidnumber: 138 -gidnumber: 138 -homeDirectory: /home/uid138 - -dn: cn=user139,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user139 -sn: user139 -uid: uid139 -givenname: givenname139 -description: description139 -userPassword: password139 -mail: uid139 -uidnumber: 139 -gidnumber: 139 -homeDirectory: /home/uid139 - -dn: cn=user140,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user140 -sn: user140 -uid: uid140 -givenname: givenname140 -description: description140 -userPassword: password140 -mail: uid140 -uidnumber: 140 -gidnumber: 140 -homeDirectory: /home/uid140 - -dn: cn=user141,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user141 -sn: user141 -uid: uid141 -givenname: givenname141 -description: description141 -userPassword: password141 -mail: uid141 -uidnumber: 141 -gidnumber: 141 -homeDirectory: /home/uid141 - -dn: cn=user142,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user142 -sn: user142 -uid: uid142 -givenname: givenname142 -description: description142 -userPassword: password142 -mail: uid142 -uidnumber: 142 -gidnumber: 142 -homeDirectory: /home/uid142 - -dn: cn=user143,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user143 -sn: user143 -uid: uid143 -givenname: givenname143 -description: description143 -userPassword: password143 -mail: uid143 -uidnumber: 143 -gidnumber: 143 -homeDirectory: /home/uid143 - -dn: cn=user144,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user144 -sn: user144 -uid: uid144 -givenname: givenname144 -description: description144 -userPassword: password144 -mail: uid144 -uidnumber: 144 -gidnumber: 144 -homeDirectory: /home/uid144 - -dn: cn=user145,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user145 -sn: user145 -uid: uid145 -givenname: givenname145 -description: description145 -userPassword: password145 -mail: uid145 -uidnumber: 145 -gidnumber: 145 -homeDirectory: /home/uid145 - -dn: cn=user146,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user146 -sn: user146 -uid: uid146 -givenname: givenname146 -description: description146 -userPassword: password146 -mail: uid146 -uidnumber: 146 -gidnumber: 146 -homeDirectory: /home/uid146 - -dn: cn=user147,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user147 -sn: user147 -uid: uid147 -givenname: givenname147 -description: description147 -userPassword: password147 -mail: uid147 -uidnumber: 147 -gidnumber: 147 -homeDirectory: /home/uid147 - -dn: cn=user148,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user148 -sn: user148 -uid: uid148 -givenname: givenname148 -description: description148 -userPassword: password148 -mail: uid148 -uidnumber: 148 -gidnumber: 148 -homeDirectory: /home/uid148 - -dn: cn=user149,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user149 -sn: user149 -uid: uid149 -givenname: givenname149 -description: description149 -userPassword: password149 -mail: uid149 -uidnumber: 149 -gidnumber: 149 -homeDirectory: /home/uid149 - -dn: cn=user150,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user150 -sn: user150 -uid: uid150 -givenname: givenname150 -description: description150 -userPassword: password150 -mail: uid150 -uidnumber: 150 -gidnumber: 150 -homeDirectory: /home/uid150 - -dn: cn=user151,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user151 -sn: user151 -uid: uid151 -givenname: givenname151 -description: description151 -userPassword: password151 -mail: uid151 -uidnumber: 151 -gidnumber: 151 -homeDirectory: /home/uid151 - -dn: cn=user152,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user152 -sn: user152 -uid: uid152 -givenname: givenname152 -description: description152 -userPassword: password152 -mail: uid152 -uidnumber: 152 -gidnumber: 152 -homeDirectory: /home/uid152 - -dn: cn=user153,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user153 -sn: user153 -uid: uid153 -givenname: givenname153 -description: description153 -userPassword: password153 -mail: uid153 -uidnumber: 153 -gidnumber: 153 -homeDirectory: /home/uid153 - -dn: cn=user154,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user154 -sn: user154 -uid: uid154 -givenname: givenname154 -description: description154 -userPassword: password154 -mail: uid154 -uidnumber: 154 -gidnumber: 154 -homeDirectory: /home/uid154 - -dn: cn=user155,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user155 -sn: user155 -uid: uid155 -givenname: givenname155 -description: description155 -userPassword: password155 -mail: uid155 -uidnumber: 155 -gidnumber: 155 -homeDirectory: /home/uid155 - -dn: cn=user156,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user156 -sn: user156 -uid: uid156 -givenname: givenname156 -description: description156 -userPassword: password156 -mail: uid156 -uidnumber: 156 -gidnumber: 156 -homeDirectory: /home/uid156 - -dn: cn=user157,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user157 -sn: user157 -uid: uid157 -givenname: givenname157 -description: description157 -userPassword: password157 -mail: uid157 -uidnumber: 157 -gidnumber: 157 -homeDirectory: /home/uid157 - -dn: cn=user158,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user158 -sn: user158 -uid: uid158 -givenname: givenname158 -description: description158 -userPassword: password158 -mail: uid158 -uidnumber: 158 -gidnumber: 158 -homeDirectory: /home/uid158 - -dn: cn=user159,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user159 -sn: user159 -uid: uid159 -givenname: givenname159 -description: description159 -userPassword: password159 -mail: uid159 -uidnumber: 159 -gidnumber: 159 -homeDirectory: /home/uid159 - -dn: cn=user160,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user160 -sn: user160 -uid: uid160 -givenname: givenname160 -description: description160 -userPassword: password160 -mail: uid160 -uidnumber: 160 -gidnumber: 160 -homeDirectory: /home/uid160 - -dn: cn=user161,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user161 -sn: user161 -uid: uid161 -givenname: givenname161 -description: description161 -userPassword: password161 -mail: uid161 -uidnumber: 161 -gidnumber: 161 -homeDirectory: /home/uid161 - -dn: cn=user162,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user162 -sn: user162 -uid: uid162 -givenname: givenname162 -description: description162 -userPassword: password162 -mail: uid162 -uidnumber: 162 -gidnumber: 162 -homeDirectory: /home/uid162 - -dn: cn=user163,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user163 -sn: user163 -uid: uid163 -givenname: givenname163 -description: description163 -userPassword: password163 -mail: uid163 -uidnumber: 163 -gidnumber: 163 -homeDirectory: /home/uid163 - -dn: cn=user164,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user164 -sn: user164 -uid: uid164 -givenname: givenname164 -description: description164 -userPassword: password164 -mail: uid164 -uidnumber: 164 -gidnumber: 164 -homeDirectory: /home/uid164 - -dn: cn=user165,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user165 -sn: user165 -uid: uid165 -givenname: givenname165 -description: description165 -userPassword: password165 -mail: uid165 -uidnumber: 165 -gidnumber: 165 -homeDirectory: /home/uid165 - -dn: cn=user166,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user166 -sn: user166 -uid: uid166 -givenname: givenname166 -description: description166 -userPassword: password166 -mail: uid166 -uidnumber: 166 -gidnumber: 166 -homeDirectory: /home/uid166 - -dn: cn=user167,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user167 -sn: user167 -uid: uid167 -givenname: givenname167 -description: description167 -userPassword: password167 -mail: uid167 -uidnumber: 167 -gidnumber: 167 -homeDirectory: /home/uid167 - -dn: cn=user168,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user168 -sn: user168 -uid: uid168 -givenname: givenname168 -description: description168 -userPassword: password168 -mail: uid168 -uidnumber: 168 -gidnumber: 168 -homeDirectory: /home/uid168 - -dn: cn=user169,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user169 -sn: user169 -uid: uid169 -givenname: givenname169 -description: description169 -userPassword: password169 -mail: uid169 -uidnumber: 169 -gidnumber: 169 -homeDirectory: /home/uid169 - -dn: cn=user170,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user170 -sn: user170 -uid: uid170 -givenname: givenname170 -description: description170 -userPassword: password170 -mail: uid170 -uidnumber: 170 -gidnumber: 170 -homeDirectory: /home/uid170 - -dn: cn=user171,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user171 -sn: user171 -uid: uid171 -givenname: givenname171 -description: description171 -userPassword: password171 -mail: uid171 -uidnumber: 171 -gidnumber: 171 -homeDirectory: /home/uid171 - -dn: cn=user172,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user172 -sn: user172 -uid: uid172 -givenname: givenname172 -description: description172 -userPassword: password172 -mail: uid172 -uidnumber: 172 -gidnumber: 172 -homeDirectory: /home/uid172 - -dn: cn=user173,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user173 -sn: user173 -uid: uid173 -givenname: givenname173 -description: description173 -userPassword: password173 -mail: uid173 -uidnumber: 173 -gidnumber: 173 -homeDirectory: /home/uid173 - -dn: cn=user174,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user174 -sn: user174 -uid: uid174 -givenname: givenname174 -description: description174 -userPassword: password174 -mail: uid174 -uidnumber: 174 -gidnumber: 174 -homeDirectory: /home/uid174 - -dn: cn=user175,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user175 -sn: user175 -uid: uid175 -givenname: givenname175 -description: description175 -userPassword: password175 -mail: uid175 -uidnumber: 175 -gidnumber: 175 -homeDirectory: /home/uid175 - -dn: cn=user176,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user176 -sn: user176 -uid: uid176 -givenname: givenname176 -description: description176 -userPassword: password176 -mail: uid176 -uidnumber: 176 -gidnumber: 176 -homeDirectory: /home/uid176 - -dn: cn=user177,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user177 -sn: user177 -uid: uid177 -givenname: givenname177 -description: description177 -userPassword: password177 -mail: uid177 -uidnumber: 177 -gidnumber: 177 -homeDirectory: /home/uid177 - -dn: cn=user178,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user178 -sn: user178 -uid: uid178 -givenname: givenname178 -description: description178 -userPassword: password178 -mail: uid178 -uidnumber: 178 -gidnumber: 178 -homeDirectory: /home/uid178 - -dn: cn=user179,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user179 -sn: user179 -uid: uid179 -givenname: givenname179 -description: description179 -userPassword: password179 -mail: uid179 -uidnumber: 179 -gidnumber: 179 -homeDirectory: /home/uid179 - -dn: cn=user180,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user180 -sn: user180 -uid: uid180 -givenname: givenname180 -description: description180 -userPassword: password180 -mail: uid180 -uidnumber: 180 -gidnumber: 180 -homeDirectory: /home/uid180 - -dn: cn=user181,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user181 -sn: user181 -uid: uid181 -givenname: givenname181 -description: description181 -userPassword: password181 -mail: uid181 -uidnumber: 181 -gidnumber: 181 -homeDirectory: /home/uid181 - -dn: cn=user182,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user182 -sn: user182 -uid: uid182 -givenname: givenname182 -description: description182 -userPassword: password182 -mail: uid182 -uidnumber: 182 -gidnumber: 182 -homeDirectory: /home/uid182 - -dn: cn=user183,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user183 -sn: user183 -uid: uid183 -givenname: givenname183 -description: description183 -userPassword: password183 -mail: uid183 -uidnumber: 183 -gidnumber: 183 -homeDirectory: /home/uid183 - -dn: cn=user184,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user184 -sn: user184 -uid: uid184 -givenname: givenname184 -description: description184 -userPassword: password184 -mail: uid184 -uidnumber: 184 -gidnumber: 184 -homeDirectory: /home/uid184 - -dn: cn=user185,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user185 -sn: user185 -uid: uid185 -givenname: givenname185 -description: description185 -userPassword: password185 -mail: uid185 -uidnumber: 185 -gidnumber: 185 -homeDirectory: /home/uid185 - -dn: cn=user186,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user186 -sn: user186 -uid: uid186 -givenname: givenname186 -description: description186 -userPassword: password186 -mail: uid186 -uidnumber: 186 -gidnumber: 186 -homeDirectory: /home/uid186 - -dn: cn=user187,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user187 -sn: user187 -uid: uid187 -givenname: givenname187 -description: description187 -userPassword: password187 -mail: uid187 -uidnumber: 187 -gidnumber: 187 -homeDirectory: /home/uid187 - -dn: cn=user188,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user188 -sn: user188 -uid: uid188 -givenname: givenname188 -description: description188 -userPassword: password188 -mail: uid188 -uidnumber: 188 -gidnumber: 188 -homeDirectory: /home/uid188 - -dn: cn=user189,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user189 -sn: user189 -uid: uid189 -givenname: givenname189 -description: description189 -userPassword: password189 -mail: uid189 -uidnumber: 189 -gidnumber: 189 -homeDirectory: /home/uid189 - -dn: cn=user190,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user190 -sn: user190 -uid: uid190 -givenname: givenname190 -description: description190 -userPassword: password190 -mail: uid190 -uidnumber: 190 -gidnumber: 190 -homeDirectory: /home/uid190 - -dn: cn=user191,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user191 -sn: user191 -uid: uid191 -givenname: givenname191 -description: description191 -userPassword: password191 -mail: uid191 -uidnumber: 191 -gidnumber: 191 -homeDirectory: /home/uid191 - -dn: cn=user192,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user192 -sn: user192 -uid: uid192 -givenname: givenname192 -description: description192 -userPassword: password192 -mail: uid192 -uidnumber: 192 -gidnumber: 192 -homeDirectory: /home/uid192 - -dn: cn=user193,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user193 -sn: user193 -uid: uid193 -givenname: givenname193 -description: description193 -userPassword: password193 -mail: uid193 -uidnumber: 193 -gidnumber: 193 -homeDirectory: /home/uid193 - -dn: cn=user194,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user194 -sn: user194 -uid: uid194 -givenname: givenname194 -description: description194 -userPassword: password194 -mail: uid194 -uidnumber: 194 -gidnumber: 194 -homeDirectory: /home/uid194 - -dn: cn=user195,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user195 -sn: user195 -uid: uid195 -givenname: givenname195 -description: description195 -userPassword: password195 -mail: uid195 -uidnumber: 195 -gidnumber: 195 -homeDirectory: /home/uid195 - -dn: cn=user196,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user196 -sn: user196 -uid: uid196 -givenname: givenname196 -description: description196 -userPassword: password196 -mail: uid196 -uidnumber: 196 -gidnumber: 196 -homeDirectory: /home/uid196 - -dn: cn=user197,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user197 -sn: user197 -uid: uid197 -givenname: givenname197 -description: description197 -userPassword: password197 -mail: uid197 -uidnumber: 197 -gidnumber: 197 -homeDirectory: /home/uid197 - -dn: cn=user198,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user198 -sn: user198 -uid: uid198 -givenname: givenname198 -description: description198 -userPassword: password198 -mail: uid198 -uidnumber: 198 -gidnumber: 198 -homeDirectory: /home/uid198 - -dn: cn=user199,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user199 -sn: user199 -uid: uid199 -givenname: givenname199 -description: description199 -userPassword: password199 -mail: uid199 -uidnumber: 199 -gidnumber: 199 -homeDirectory: /home/uid199 - -dn: cn=user200,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user200 -sn: user200 -uid: uid200 -givenname: givenname200 -description: description200 -userPassword: password200 -mail: uid200 -uidnumber: 200 -gidnumber: 200 -homeDirectory: /home/uid200 - -dn: cn=user201,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user201 -sn: user201 -uid: uid201 -givenname: givenname201 -description: description201 -userPassword: password201 -mail: uid201 -uidnumber: 201 -gidnumber: 201 -homeDirectory: /home/uid201 - -dn: cn=user202,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user202 -sn: user202 -uid: uid202 -givenname: givenname202 -description: description202 -userPassword: password202 -mail: uid202 -uidnumber: 202 -gidnumber: 202 -homeDirectory: /home/uid202 - -dn: cn=user203,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user203 -sn: user203 -uid: uid203 -givenname: givenname203 -description: description203 -userPassword: password203 -mail: uid203 -uidnumber: 203 -gidnumber: 203 -homeDirectory: /home/uid203 - -dn: cn=user204,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user204 -sn: user204 -uid: uid204 -givenname: givenname204 -description: description204 -userPassword: password204 -mail: uid204 -uidnumber: 204 -gidnumber: 204 -homeDirectory: /home/uid204 - -dn: cn=user205,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user205 -sn: user205 -uid: uid205 -givenname: givenname205 -description: description205 -userPassword: password205 -mail: uid205 -uidnumber: 205 -gidnumber: 205 -homeDirectory: /home/uid205 - -dn: cn=user206,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user206 -sn: user206 -uid: uid206 -givenname: givenname206 -description: description206 -userPassword: password206 -mail: uid206 -uidnumber: 206 -gidnumber: 206 -homeDirectory: /home/uid206 - -dn: cn=user207,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user207 -sn: user207 -uid: uid207 -givenname: givenname207 -description: description207 -userPassword: password207 -mail: uid207 -uidnumber: 207 -gidnumber: 207 -homeDirectory: /home/uid207 - -dn: cn=user208,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user208 -sn: user208 -uid: uid208 -givenname: givenname208 -description: description208 -userPassword: password208 -mail: uid208 -uidnumber: 208 -gidnumber: 208 -homeDirectory: /home/uid208 - -dn: cn=user209,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user209 -sn: user209 -uid: uid209 -givenname: givenname209 -description: description209 -userPassword: password209 -mail: uid209 -uidnumber: 209 -gidnumber: 209 -homeDirectory: /home/uid209 - -dn: cn=user210,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user210 -sn: user210 -uid: uid210 -givenname: givenname210 -description: description210 -userPassword: password210 -mail: uid210 -uidnumber: 210 -gidnumber: 210 -homeDirectory: /home/uid210 - -dn: cn=user211,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user211 -sn: user211 -uid: uid211 -givenname: givenname211 -description: description211 -userPassword: password211 -mail: uid211 -uidnumber: 211 -gidnumber: 211 -homeDirectory: /home/uid211 - -dn: cn=user212,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user212 -sn: user212 -uid: uid212 -givenname: givenname212 -description: description212 -userPassword: password212 -mail: uid212 -uidnumber: 212 -gidnumber: 212 -homeDirectory: /home/uid212 - -dn: cn=user213,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user213 -sn: user213 -uid: uid213 -givenname: givenname213 -description: description213 -userPassword: password213 -mail: uid213 -uidnumber: 213 -gidnumber: 213 -homeDirectory: /home/uid213 - -dn: cn=user214,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user214 -sn: user214 -uid: uid214 -givenname: givenname214 -description: description214 -userPassword: password214 -mail: uid214 -uidnumber: 214 -gidnumber: 214 -homeDirectory: /home/uid214 - -dn: cn=user215,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user215 -sn: user215 -uid: uid215 -givenname: givenname215 -description: description215 -userPassword: password215 -mail: uid215 -uidnumber: 215 -gidnumber: 215 -homeDirectory: /home/uid215 - -dn: cn=user216,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user216 -sn: user216 -uid: uid216 -givenname: givenname216 -description: description216 -userPassword: password216 -mail: uid216 -uidnumber: 216 -gidnumber: 216 -homeDirectory: /home/uid216 - -dn: cn=user217,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user217 -sn: user217 -uid: uid217 -givenname: givenname217 -description: description217 -userPassword: password217 -mail: uid217 -uidnumber: 217 -gidnumber: 217 -homeDirectory: /home/uid217 - -dn: cn=user218,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user218 -sn: user218 -uid: uid218 -givenname: givenname218 -description: description218 -userPassword: password218 -mail: uid218 -uidnumber: 218 -gidnumber: 218 -homeDirectory: /home/uid218 - -dn: cn=user219,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user219 -sn: user219 -uid: uid219 -givenname: givenname219 -description: description219 -userPassword: password219 -mail: uid219 -uidnumber: 219 -gidnumber: 219 -homeDirectory: /home/uid219 - -dn: cn=user220,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user220 -sn: user220 -uid: uid220 -givenname: givenname220 -description: description220 -userPassword: password220 -mail: uid220 -uidnumber: 220 -gidnumber: 220 -homeDirectory: /home/uid220 - -dn: cn=user221,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user221 -sn: user221 -uid: uid221 -givenname: givenname221 -description: description221 -userPassword: password221 -mail: uid221 -uidnumber: 221 -gidnumber: 221 -homeDirectory: /home/uid221 - -dn: cn=user222,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user222 -sn: user222 -uid: uid222 -givenname: givenname222 -description: description222 -userPassword: password222 -mail: uid222 -uidnumber: 222 -gidnumber: 222 -homeDirectory: /home/uid222 - -dn: cn=user223,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user223 -sn: user223 -uid: uid223 -givenname: givenname223 -description: description223 -userPassword: password223 -mail: uid223 -uidnumber: 223 -gidnumber: 223 -homeDirectory: /home/uid223 - -dn: cn=user224,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user224 -sn: user224 -uid: uid224 -givenname: givenname224 -description: description224 -userPassword: password224 -mail: uid224 -uidnumber: 224 -gidnumber: 224 -homeDirectory: /home/uid224 - -dn: cn=user225,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user225 -sn: user225 -uid: uid225 -givenname: givenname225 -description: description225 -userPassword: password225 -mail: uid225 -uidnumber: 225 -gidnumber: 225 -homeDirectory: /home/uid225 - -dn: cn=user226,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user226 -sn: user226 -uid: uid226 -givenname: givenname226 -description: description226 -userPassword: password226 -mail: uid226 -uidnumber: 226 -gidnumber: 226 -homeDirectory: /home/uid226 - -dn: cn=user227,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user227 -sn: user227 -uid: uid227 -givenname: givenname227 -description: description227 -userPassword: password227 -mail: uid227 -uidnumber: 227 -gidnumber: 227 -homeDirectory: /home/uid227 - -dn: cn=user228,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user228 -sn: user228 -uid: uid228 -givenname: givenname228 -description: description228 -userPassword: password228 -mail: uid228 -uidnumber: 228 -gidnumber: 228 -homeDirectory: /home/uid228 - -dn: cn=user229,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user229 -sn: user229 -uid: uid229 -givenname: givenname229 -description: description229 -userPassword: password229 -mail: uid229 -uidnumber: 229 -gidnumber: 229 -homeDirectory: /home/uid229 - -dn: cn=user230,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user230 -sn: user230 -uid: uid230 -givenname: givenname230 -description: description230 -userPassword: password230 -mail: uid230 -uidnumber: 230 -gidnumber: 230 -homeDirectory: /home/uid230 - -dn: cn=user231,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user231 -sn: user231 -uid: uid231 -givenname: givenname231 -description: description231 -userPassword: password231 -mail: uid231 -uidnumber: 231 -gidnumber: 231 -homeDirectory: /home/uid231 - -dn: cn=user232,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user232 -sn: user232 -uid: uid232 -givenname: givenname232 -description: description232 -userPassword: password232 -mail: uid232 -uidnumber: 232 -gidnumber: 232 -homeDirectory: /home/uid232 - -dn: cn=user233,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user233 -sn: user233 -uid: uid233 -givenname: givenname233 -description: description233 -userPassword: password233 -mail: uid233 -uidnumber: 233 -gidnumber: 233 -homeDirectory: /home/uid233 - -dn: cn=user234,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user234 -sn: user234 -uid: uid234 -givenname: givenname234 -description: description234 -userPassword: password234 -mail: uid234 -uidnumber: 234 -gidnumber: 234 -homeDirectory: /home/uid234 - -dn: cn=user235,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user235 -sn: user235 -uid: uid235 -givenname: givenname235 -description: description235 -userPassword: password235 -mail: uid235 -uidnumber: 235 -gidnumber: 235 -homeDirectory: /home/uid235 - -dn: cn=user236,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user236 -sn: user236 -uid: uid236 -givenname: givenname236 -description: description236 -userPassword: password236 -mail: uid236 -uidnumber: 236 -gidnumber: 236 -homeDirectory: /home/uid236 - -dn: cn=user237,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user237 -sn: user237 -uid: uid237 -givenname: givenname237 -description: description237 -userPassword: password237 -mail: uid237 -uidnumber: 237 -gidnumber: 237 -homeDirectory: /home/uid237 - -dn: cn=user238,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user238 -sn: user238 -uid: uid238 -givenname: givenname238 -description: description238 -userPassword: password238 -mail: uid238 -uidnumber: 238 -gidnumber: 238 -homeDirectory: /home/uid238 - -dn: cn=user239,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user239 -sn: user239 -uid: uid239 -givenname: givenname239 -description: description239 -userPassword: password239 -mail: uid239 -uidnumber: 239 -gidnumber: 239 -homeDirectory: /home/uid239 - -dn: cn=user240,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user240 -sn: user240 -uid: uid240 -givenname: givenname240 -description: description240 -userPassword: password240 -mail: uid240 -uidnumber: 240 -gidnumber: 240 -homeDirectory: /home/uid240 - -dn: cn=user241,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user241 -sn: user241 -uid: uid241 -givenname: givenname241 -description: description241 -userPassword: password241 -mail: uid241 -uidnumber: 241 -gidnumber: 241 -homeDirectory: /home/uid241 - -dn: cn=user242,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user242 -sn: user242 -uid: uid242 -givenname: givenname242 -description: description242 -userPassword: password242 -mail: uid242 -uidnumber: 242 -gidnumber: 242 -homeDirectory: /home/uid242 - -dn: cn=user243,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user243 -sn: user243 -uid: uid243 -givenname: givenname243 -description: description243 -userPassword: password243 -mail: uid243 -uidnumber: 243 -gidnumber: 243 -homeDirectory: /home/uid243 - -dn: cn=user244,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user244 -sn: user244 -uid: uid244 -givenname: givenname244 -description: description244 -userPassword: password244 -mail: uid244 -uidnumber: 244 -gidnumber: 244 -homeDirectory: /home/uid244 - -dn: cn=user245,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user245 -sn: user245 -uid: uid245 -givenname: givenname245 -description: description245 -userPassword: password245 -mail: uid245 -uidnumber: 245 -gidnumber: 245 -homeDirectory: /home/uid245 - -dn: cn=user246,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user246 -sn: user246 -uid: uid246 -givenname: givenname246 -description: description246 -userPassword: password246 -mail: uid246 -uidnumber: 246 -gidnumber: 246 -homeDirectory: /home/uid246 - -dn: cn=user247,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user247 -sn: user247 -uid: uid247 -givenname: givenname247 -description: description247 -userPassword: password247 -mail: uid247 -uidnumber: 247 -gidnumber: 247 -homeDirectory: /home/uid247 - -dn: cn=user248,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user248 -sn: user248 -uid: uid248 -givenname: givenname248 -description: description248 -userPassword: password248 -mail: uid248 -uidnumber: 248 -gidnumber: 248 -homeDirectory: /home/uid248 - -dn: cn=user249,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user249 -sn: user249 -uid: uid249 -givenname: givenname249 -description: description249 -userPassword: password249 -mail: uid249 -uidnumber: 249 -gidnumber: 249 -homeDirectory: /home/uid249 - -dn: cn=user250,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user250 -sn: user250 -uid: uid250 -givenname: givenname250 -description: description250 -userPassword: password250 -mail: uid250 -uidnumber: 250 -gidnumber: 250 -homeDirectory: /home/uid250 - -dn: cn=user251,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user251 -sn: user251 -uid: uid251 -givenname: givenname251 -description: description251 -userPassword: password251 -mail: uid251 -uidnumber: 251 -gidnumber: 251 -homeDirectory: /home/uid251 - -dn: cn=user252,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user252 -sn: user252 -uid: uid252 -givenname: givenname252 -description: description252 -userPassword: password252 -mail: uid252 -uidnumber: 252 -gidnumber: 252 -homeDirectory: /home/uid252 - -dn: cn=user253,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user253 -sn: user253 -uid: uid253 -givenname: givenname253 -description: description253 -userPassword: password253 -mail: uid253 -uidnumber: 253 -gidnumber: 253 -homeDirectory: /home/uid253 - -dn: cn=user254,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user254 -sn: user254 -uid: uid254 -givenname: givenname254 -description: description254 -userPassword: password254 -mail: uid254 -uidnumber: 254 -gidnumber: 254 -homeDirectory: /home/uid254 - -dn: cn=user255,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user255 -sn: user255 -uid: uid255 -givenname: givenname255 -description: description255 -userPassword: password255 -mail: uid255 -uidnumber: 255 -gidnumber: 255 -homeDirectory: /home/uid255 - -dn: cn=user256,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user256 -sn: user256 -uid: uid256 -givenname: givenname256 -description: description256 -userPassword: password256 -mail: uid256 -uidnumber: 256 -gidnumber: 256 -homeDirectory: /home/uid256 - -dn: cn=user257,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user257 -sn: user257 -uid: uid257 -givenname: givenname257 -description: description257 -userPassword: password257 -mail: uid257 -uidnumber: 257 -gidnumber: 257 -homeDirectory: /home/uid257 - -dn: cn=user258,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user258 -sn: user258 -uid: uid258 -givenname: givenname258 -description: description258 -userPassword: password258 -mail: uid258 -uidnumber: 258 -gidnumber: 258 -homeDirectory: /home/uid258 - -dn: cn=user259,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user259 -sn: user259 -uid: uid259 -givenname: givenname259 -description: description259 -userPassword: password259 -mail: uid259 -uidnumber: 259 -gidnumber: 259 -homeDirectory: /home/uid259 - -dn: cn=user260,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user260 -sn: user260 -uid: uid260 -givenname: givenname260 -description: description260 -userPassword: password260 -mail: uid260 -uidnumber: 260 -gidnumber: 260 -homeDirectory: /home/uid260 - -dn: cn=user261,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user261 -sn: user261 -uid: uid261 -givenname: givenname261 -description: description261 -userPassword: password261 -mail: uid261 -uidnumber: 261 -gidnumber: 261 -homeDirectory: /home/uid261 - -dn: cn=user262,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user262 -sn: user262 -uid: uid262 -givenname: givenname262 -description: description262 -userPassword: password262 -mail: uid262 -uidnumber: 262 -gidnumber: 262 -homeDirectory: /home/uid262 - -dn: cn=user263,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user263 -sn: user263 -uid: uid263 -givenname: givenname263 -description: description263 -userPassword: password263 -mail: uid263 -uidnumber: 263 -gidnumber: 263 -homeDirectory: /home/uid263 - -dn: cn=user264,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user264 -sn: user264 -uid: uid264 -givenname: givenname264 -description: description264 -userPassword: password264 -mail: uid264 -uidnumber: 264 -gidnumber: 264 -homeDirectory: /home/uid264 - -dn: cn=user265,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user265 -sn: user265 -uid: uid265 -givenname: givenname265 -description: description265 -userPassword: password265 -mail: uid265 -uidnumber: 265 -gidnumber: 265 -homeDirectory: /home/uid265 - -dn: cn=user266,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user266 -sn: user266 -uid: uid266 -givenname: givenname266 -description: description266 -userPassword: password266 -mail: uid266 -uidnumber: 266 -gidnumber: 266 -homeDirectory: /home/uid266 - -dn: cn=user267,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user267 -sn: user267 -uid: uid267 -givenname: givenname267 -description: description267 -userPassword: password267 -mail: uid267 -uidnumber: 267 -gidnumber: 267 -homeDirectory: /home/uid267 - -dn: cn=user268,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user268 -sn: user268 -uid: uid268 -givenname: givenname268 -description: description268 -userPassword: password268 -mail: uid268 -uidnumber: 268 -gidnumber: 268 -homeDirectory: /home/uid268 - -dn: cn=user269,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user269 -sn: user269 -uid: uid269 -givenname: givenname269 -description: description269 -userPassword: password269 -mail: uid269 -uidnumber: 269 -gidnumber: 269 -homeDirectory: /home/uid269 - -dn: cn=user270,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user270 -sn: user270 -uid: uid270 -givenname: givenname270 -description: description270 -userPassword: password270 -mail: uid270 -uidnumber: 270 -gidnumber: 270 -homeDirectory: /home/uid270 - -dn: cn=user271,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user271 -sn: user271 -uid: uid271 -givenname: givenname271 -description: description271 -userPassword: password271 -mail: uid271 -uidnumber: 271 -gidnumber: 271 -homeDirectory: /home/uid271 - -dn: cn=user272,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user272 -sn: user272 -uid: uid272 -givenname: givenname272 -description: description272 -userPassword: password272 -mail: uid272 -uidnumber: 272 -gidnumber: 272 -homeDirectory: /home/uid272 - -dn: cn=user273,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user273 -sn: user273 -uid: uid273 -givenname: givenname273 -description: description273 -userPassword: password273 -mail: uid273 -uidnumber: 273 -gidnumber: 273 -homeDirectory: /home/uid273 - -dn: cn=user274,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user274 -sn: user274 -uid: uid274 -givenname: givenname274 -description: description274 -userPassword: password274 -mail: uid274 -uidnumber: 274 -gidnumber: 274 -homeDirectory: /home/uid274 - -dn: cn=user275,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user275 -sn: user275 -uid: uid275 -givenname: givenname275 -description: description275 -userPassword: password275 -mail: uid275 -uidnumber: 275 -gidnumber: 275 -homeDirectory: /home/uid275 - -dn: cn=user276,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user276 -sn: user276 -uid: uid276 -givenname: givenname276 -description: description276 -userPassword: password276 -mail: uid276 -uidnumber: 276 -gidnumber: 276 -homeDirectory: /home/uid276 - -dn: cn=user277,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user277 -sn: user277 -uid: uid277 -givenname: givenname277 -description: description277 -userPassword: password277 -mail: uid277 -uidnumber: 277 -gidnumber: 277 -homeDirectory: /home/uid277 - -dn: cn=user278,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user278 -sn: user278 -uid: uid278 -givenname: givenname278 -description: description278 -userPassword: password278 -mail: uid278 -uidnumber: 278 -gidnumber: 278 -homeDirectory: /home/uid278 - -dn: cn=user279,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user279 -sn: user279 -uid: uid279 -givenname: givenname279 -description: description279 -userPassword: password279 -mail: uid279 -uidnumber: 279 -gidnumber: 279 -homeDirectory: /home/uid279 - -dn: cn=user280,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user280 -sn: user280 -uid: uid280 -givenname: givenname280 -description: description280 -userPassword: password280 -mail: uid280 -uidnumber: 280 -gidnumber: 280 -homeDirectory: /home/uid280 - -dn: cn=user281,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user281 -sn: user281 -uid: uid281 -givenname: givenname281 -description: description281 -userPassword: password281 -mail: uid281 -uidnumber: 281 -gidnumber: 281 -homeDirectory: /home/uid281 - -dn: cn=user282,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user282 -sn: user282 -uid: uid282 -givenname: givenname282 -description: description282 -userPassword: password282 -mail: uid282 -uidnumber: 282 -gidnumber: 282 -homeDirectory: /home/uid282 - -dn: cn=user283,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user283 -sn: user283 -uid: uid283 -givenname: givenname283 -description: description283 -userPassword: password283 -mail: uid283 -uidnumber: 283 -gidnumber: 283 -homeDirectory: /home/uid283 - -dn: cn=user284,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user284 -sn: user284 -uid: uid284 -givenname: givenname284 -description: description284 -userPassword: password284 -mail: uid284 -uidnumber: 284 -gidnumber: 284 -homeDirectory: /home/uid284 - -dn: cn=user285,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user285 -sn: user285 -uid: uid285 -givenname: givenname285 -description: description285 -userPassword: password285 -mail: uid285 -uidnumber: 285 -gidnumber: 285 -homeDirectory: /home/uid285 - -dn: cn=user286,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user286 -sn: user286 -uid: uid286 -givenname: givenname286 -description: description286 -userPassword: password286 -mail: uid286 -uidnumber: 286 -gidnumber: 286 -homeDirectory: /home/uid286 - -dn: cn=user287,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user287 -sn: user287 -uid: uid287 -givenname: givenname287 -description: description287 -userPassword: password287 -mail: uid287 -uidnumber: 287 -gidnumber: 287 -homeDirectory: /home/uid287 - -dn: cn=user288,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user288 -sn: user288 -uid: uid288 -givenname: givenname288 -description: description288 -userPassword: password288 -mail: uid288 -uidnumber: 288 -gidnumber: 288 -homeDirectory: /home/uid288 - -dn: cn=user289,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user289 -sn: user289 -uid: uid289 -givenname: givenname289 -description: description289 -userPassword: password289 -mail: uid289 -uidnumber: 289 -gidnumber: 289 -homeDirectory: /home/uid289 - -dn: cn=user290,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user290 -sn: user290 -uid: uid290 -givenname: givenname290 -description: description290 -userPassword: password290 -mail: uid290 -uidnumber: 290 -gidnumber: 290 -homeDirectory: /home/uid290 - -dn: cn=user291,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user291 -sn: user291 -uid: uid291 -givenname: givenname291 -description: description291 -userPassword: password291 -mail: uid291 -uidnumber: 291 -gidnumber: 291 -homeDirectory: /home/uid291 - -dn: cn=user292,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user292 -sn: user292 -uid: uid292 -givenname: givenname292 -description: description292 -userPassword: password292 -mail: uid292 -uidnumber: 292 -gidnumber: 292 -homeDirectory: /home/uid292 - -dn: cn=user293,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user293 -sn: user293 -uid: uid293 -givenname: givenname293 -description: description293 -userPassword: password293 -mail: uid293 -uidnumber: 293 -gidnumber: 293 -homeDirectory: /home/uid293 - -dn: cn=user294,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user294 -sn: user294 -uid: uid294 -givenname: givenname294 -description: description294 -userPassword: password294 -mail: uid294 -uidnumber: 294 -gidnumber: 294 -homeDirectory: /home/uid294 - -dn: cn=user295,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user295 -sn: user295 -uid: uid295 -givenname: givenname295 -description: description295 -userPassword: password295 -mail: uid295 -uidnumber: 295 -gidnumber: 295 -homeDirectory: /home/uid295 - -dn: cn=user296,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user296 -sn: user296 -uid: uid296 -givenname: givenname296 -description: description296 -userPassword: password296 -mail: uid296 -uidnumber: 296 -gidnumber: 296 -homeDirectory: /home/uid296 - -dn: cn=user297,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user297 -sn: user297 -uid: uid297 -givenname: givenname297 -description: description297 -userPassword: password297 -mail: uid297 -uidnumber: 297 -gidnumber: 297 -homeDirectory: /home/uid297 - -dn: cn=user298,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user298 -sn: user298 -uid: uid298 -givenname: givenname298 -description: description298 -userPassword: password298 -mail: uid298 -uidnumber: 298 -gidnumber: 298 -homeDirectory: /home/uid298 - -dn: cn=user299,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user299 -sn: user299 -uid: uid299 -givenname: givenname299 -description: description299 -userPassword: password299 -mail: uid299 -uidnumber: 299 -gidnumber: 299 -homeDirectory: /home/uid299 - -dn: cn=user300,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user300 -sn: user300 -uid: uid300 -givenname: givenname300 -description: description300 -userPassword: password300 -mail: uid300 -uidnumber: 300 -gidnumber: 300 -homeDirectory: /home/uid300 - -dn: cn=user301,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user301 -sn: user301 -uid: uid301 -givenname: givenname301 -description: description301 -userPassword: password301 -mail: uid301 -uidnumber: 301 -gidnumber: 301 -homeDirectory: /home/uid301 - -dn: cn=user302,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user302 -sn: user302 -uid: uid302 -givenname: givenname302 -description: description302 -userPassword: password302 -mail: uid302 -uidnumber: 302 -gidnumber: 302 -homeDirectory: /home/uid302 - -dn: cn=user303,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user303 -sn: user303 -uid: uid303 -givenname: givenname303 -description: description303 -userPassword: password303 -mail: uid303 -uidnumber: 303 -gidnumber: 303 -homeDirectory: /home/uid303 - -dn: cn=user304,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user304 -sn: user304 -uid: uid304 -givenname: givenname304 -description: description304 -userPassword: password304 -mail: uid304 -uidnumber: 304 -gidnumber: 304 -homeDirectory: /home/uid304 - -dn: cn=user305,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user305 -sn: user305 -uid: uid305 -givenname: givenname305 -description: description305 -userPassword: password305 -mail: uid305 -uidnumber: 305 -gidnumber: 305 -homeDirectory: /home/uid305 - -dn: cn=user306,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user306 -sn: user306 -uid: uid306 -givenname: givenname306 -description: description306 -userPassword: password306 -mail: uid306 -uidnumber: 306 -gidnumber: 306 -homeDirectory: /home/uid306 - -dn: cn=user307,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user307 -sn: user307 -uid: uid307 -givenname: givenname307 -description: description307 -userPassword: password307 -mail: uid307 -uidnumber: 307 -gidnumber: 307 -homeDirectory: /home/uid307 - -dn: cn=user308,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user308 -sn: user308 -uid: uid308 -givenname: givenname308 -description: description308 -userPassword: password308 -mail: uid308 -uidnumber: 308 -gidnumber: 308 -homeDirectory: /home/uid308 - -dn: cn=user309,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user309 -sn: user309 -uid: uid309 -givenname: givenname309 -description: description309 -userPassword: password309 -mail: uid309 -uidnumber: 309 -gidnumber: 309 -homeDirectory: /home/uid309 - -dn: cn=user310,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user310 -sn: user310 -uid: uid310 -givenname: givenname310 -description: description310 -userPassword: password310 -mail: uid310 -uidnumber: 310 -gidnumber: 310 -homeDirectory: /home/uid310 - -dn: cn=user311,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user311 -sn: user311 -uid: uid311 -givenname: givenname311 -description: description311 -userPassword: password311 -mail: uid311 -uidnumber: 311 -gidnumber: 311 -homeDirectory: /home/uid311 - -dn: cn=user312,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user312 -sn: user312 -uid: uid312 -givenname: givenname312 -description: description312 -userPassword: password312 -mail: uid312 -uidnumber: 312 -gidnumber: 312 -homeDirectory: /home/uid312 - -dn: cn=user313,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user313 -sn: user313 -uid: uid313 -givenname: givenname313 -description: description313 -userPassword: password313 -mail: uid313 -uidnumber: 313 -gidnumber: 313 -homeDirectory: /home/uid313 - -dn: cn=user314,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user314 -sn: user314 -uid: uid314 -givenname: givenname314 -description: description314 -userPassword: password314 -mail: uid314 -uidnumber: 314 -gidnumber: 314 -homeDirectory: /home/uid314 - -dn: cn=user315,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user315 -sn: user315 -uid: uid315 -givenname: givenname315 -description: description315 -userPassword: password315 -mail: uid315 -uidnumber: 315 -gidnumber: 315 -homeDirectory: /home/uid315 - -dn: cn=user316,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user316 -sn: user316 -uid: uid316 -givenname: givenname316 -description: description316 -userPassword: password316 -mail: uid316 -uidnumber: 316 -gidnumber: 316 -homeDirectory: /home/uid316 - -dn: cn=user317,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user317 -sn: user317 -uid: uid317 -givenname: givenname317 -description: description317 -userPassword: password317 -mail: uid317 -uidnumber: 317 -gidnumber: 317 -homeDirectory: /home/uid317 - -dn: cn=user318,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user318 -sn: user318 -uid: uid318 -givenname: givenname318 -description: description318 -userPassword: password318 -mail: uid318 -uidnumber: 318 -gidnumber: 318 -homeDirectory: /home/uid318 - -dn: cn=user319,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user319 -sn: user319 -uid: uid319 -givenname: givenname319 -description: description319 -userPassword: password319 -mail: uid319 -uidnumber: 319 -gidnumber: 319 -homeDirectory: /home/uid319 - -dn: cn=user320,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user320 -sn: user320 -uid: uid320 -givenname: givenname320 -description: description320 -userPassword: password320 -mail: uid320 -uidnumber: 320 -gidnumber: 320 -homeDirectory: /home/uid320 - -dn: cn=user321,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user321 -sn: user321 -uid: uid321 -givenname: givenname321 -description: description321 -userPassword: password321 -mail: uid321 -uidnumber: 321 -gidnumber: 321 -homeDirectory: /home/uid321 - -dn: cn=user322,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user322 -sn: user322 -uid: uid322 -givenname: givenname322 -description: description322 -userPassword: password322 -mail: uid322 -uidnumber: 322 -gidnumber: 322 -homeDirectory: /home/uid322 - -dn: cn=user323,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user323 -sn: user323 -uid: uid323 -givenname: givenname323 -description: description323 -userPassword: password323 -mail: uid323 -uidnumber: 323 -gidnumber: 323 -homeDirectory: /home/uid323 - -dn: cn=user324,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user324 -sn: user324 -uid: uid324 -givenname: givenname324 -description: description324 -userPassword: password324 -mail: uid324 -uidnumber: 324 -gidnumber: 324 -homeDirectory: /home/uid324 - -dn: cn=user325,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user325 -sn: user325 -uid: uid325 -givenname: givenname325 -description: description325 -userPassword: password325 -mail: uid325 -uidnumber: 325 -gidnumber: 325 -homeDirectory: /home/uid325 - -dn: cn=user326,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user326 -sn: user326 -uid: uid326 -givenname: givenname326 -description: description326 -userPassword: password326 -mail: uid326 -uidnumber: 326 -gidnumber: 326 -homeDirectory: /home/uid326 - -dn: cn=user327,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user327 -sn: user327 -uid: uid327 -givenname: givenname327 -description: description327 -userPassword: password327 -mail: uid327 -uidnumber: 327 -gidnumber: 327 -homeDirectory: /home/uid327 - -dn: cn=user328,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user328 -sn: user328 -uid: uid328 -givenname: givenname328 -description: description328 -userPassword: password328 -mail: uid328 -uidnumber: 328 -gidnumber: 328 -homeDirectory: /home/uid328 - -dn: cn=user329,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user329 -sn: user329 -uid: uid329 -givenname: givenname329 -description: description329 -userPassword: password329 -mail: uid329 -uidnumber: 329 -gidnumber: 329 -homeDirectory: /home/uid329 - -dn: cn=user330,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user330 -sn: user330 -uid: uid330 -givenname: givenname330 -description: description330 -userPassword: password330 -mail: uid330 -uidnumber: 330 -gidnumber: 330 -homeDirectory: /home/uid330 - -dn: cn=user331,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user331 -sn: user331 -uid: uid331 -givenname: givenname331 -description: description331 -userPassword: password331 -mail: uid331 -uidnumber: 331 -gidnumber: 331 -homeDirectory: /home/uid331 - -dn: cn=user332,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user332 -sn: user332 -uid: uid332 -givenname: givenname332 -description: description332 -userPassword: password332 -mail: uid332 -uidnumber: 332 -gidnumber: 332 -homeDirectory: /home/uid332 - -dn: cn=user333,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user333 -sn: user333 -uid: uid333 -givenname: givenname333 -description: description333 -userPassword: password333 -mail: uid333 -uidnumber: 333 -gidnumber: 333 -homeDirectory: /home/uid333 - -dn: cn=user334,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user334 -sn: user334 -uid: uid334 -givenname: givenname334 -description: description334 -userPassword: password334 -mail: uid334 -uidnumber: 334 -gidnumber: 334 -homeDirectory: /home/uid334 - -dn: cn=user335,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user335 -sn: user335 -uid: uid335 -givenname: givenname335 -description: description335 -userPassword: password335 -mail: uid335 -uidnumber: 335 -gidnumber: 335 -homeDirectory: /home/uid335 - -dn: cn=user336,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user336 -sn: user336 -uid: uid336 -givenname: givenname336 -description: description336 -userPassword: password336 -mail: uid336 -uidnumber: 336 -gidnumber: 336 -homeDirectory: /home/uid336 - -dn: cn=user337,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user337 -sn: user337 -uid: uid337 -givenname: givenname337 -description: description337 -userPassword: password337 -mail: uid337 -uidnumber: 337 -gidnumber: 337 -homeDirectory: /home/uid337 - -dn: cn=user338,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user338 -sn: user338 -uid: uid338 -givenname: givenname338 -description: description338 -userPassword: password338 -mail: uid338 -uidnumber: 338 -gidnumber: 338 -homeDirectory: /home/uid338 - -dn: cn=user339,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user339 -sn: user339 -uid: uid339 -givenname: givenname339 -description: description339 -userPassword: password339 -mail: uid339 -uidnumber: 339 -gidnumber: 339 -homeDirectory: /home/uid339 - -dn: cn=user340,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user340 -sn: user340 -uid: uid340 -givenname: givenname340 -description: description340 -userPassword: password340 -mail: uid340 -uidnumber: 340 -gidnumber: 340 -homeDirectory: /home/uid340 - -dn: cn=user341,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user341 -sn: user341 -uid: uid341 -givenname: givenname341 -description: description341 -userPassword: password341 -mail: uid341 -uidnumber: 341 -gidnumber: 341 -homeDirectory: /home/uid341 - -dn: cn=user342,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user342 -sn: user342 -uid: uid342 -givenname: givenname342 -description: description342 -userPassword: password342 -mail: uid342 -uidnumber: 342 -gidnumber: 342 -homeDirectory: /home/uid342 - -dn: cn=user343,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user343 -sn: user343 -uid: uid343 -givenname: givenname343 -description: description343 -userPassword: password343 -mail: uid343 -uidnumber: 343 -gidnumber: 343 -homeDirectory: /home/uid343 - -dn: cn=user344,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user344 -sn: user344 -uid: uid344 -givenname: givenname344 -description: description344 -userPassword: password344 -mail: uid344 -uidnumber: 344 -gidnumber: 344 -homeDirectory: /home/uid344 - -dn: cn=user345,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user345 -sn: user345 -uid: uid345 -givenname: givenname345 -description: description345 -userPassword: password345 -mail: uid345 -uidnumber: 345 -gidnumber: 345 -homeDirectory: /home/uid345 - -dn: cn=user346,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user346 -sn: user346 -uid: uid346 -givenname: givenname346 -description: description346 -userPassword: password346 -mail: uid346 -uidnumber: 346 -gidnumber: 346 -homeDirectory: /home/uid346 - -dn: cn=user347,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user347 -sn: user347 -uid: uid347 -givenname: givenname347 -description: description347 -userPassword: password347 -mail: uid347 -uidnumber: 347 -gidnumber: 347 -homeDirectory: /home/uid347 - -dn: cn=user348,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user348 -sn: user348 -uid: uid348 -givenname: givenname348 -description: description348 -userPassword: password348 -mail: uid348 -uidnumber: 348 -gidnumber: 348 -homeDirectory: /home/uid348 - -dn: cn=user349,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user349 -sn: user349 -uid: uid349 -givenname: givenname349 -description: description349 -userPassword: password349 -mail: uid349 -uidnumber: 349 -gidnumber: 349 -homeDirectory: /home/uid349 - -dn: cn=user350,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user350 -sn: user350 -uid: uid350 -givenname: givenname350 -description: description350 -userPassword: password350 -mail: uid350 -uidnumber: 350 -gidnumber: 350 -homeDirectory: /home/uid350 - -dn: cn=user351,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user351 -sn: user351 -uid: uid351 -givenname: givenname351 -description: description351 -userPassword: password351 -mail: uid351 -uidnumber: 351 -gidnumber: 351 -homeDirectory: /home/uid351 - -dn: cn=user352,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user352 -sn: user352 -uid: uid352 -givenname: givenname352 -description: description352 -userPassword: password352 -mail: uid352 -uidnumber: 352 -gidnumber: 352 -homeDirectory: /home/uid352 - -dn: cn=user353,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user353 -sn: user353 -uid: uid353 -givenname: givenname353 -description: description353 -userPassword: password353 -mail: uid353 -uidnumber: 353 -gidnumber: 353 -homeDirectory: /home/uid353 - -dn: cn=user354,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user354 -sn: user354 -uid: uid354 -givenname: givenname354 -description: description354 -userPassword: password354 -mail: uid354 -uidnumber: 354 -gidnumber: 354 -homeDirectory: /home/uid354 - -dn: cn=user355,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user355 -sn: user355 -uid: uid355 -givenname: givenname355 -description: description355 -userPassword: password355 -mail: uid355 -uidnumber: 355 -gidnumber: 355 -homeDirectory: /home/uid355 - -dn: cn=user356,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user356 -sn: user356 -uid: uid356 -givenname: givenname356 -description: description356 -userPassword: password356 -mail: uid356 -uidnumber: 356 -gidnumber: 356 -homeDirectory: /home/uid356 - -dn: cn=user357,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user357 -sn: user357 -uid: uid357 -givenname: givenname357 -description: description357 -userPassword: password357 -mail: uid357 -uidnumber: 357 -gidnumber: 357 -homeDirectory: /home/uid357 - -dn: cn=user358,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user358 -sn: user358 -uid: uid358 -givenname: givenname358 -description: description358 -userPassword: password358 -mail: uid358 -uidnumber: 358 -gidnumber: 358 -homeDirectory: /home/uid358 - -dn: cn=user359,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user359 -sn: user359 -uid: uid359 -givenname: givenname359 -description: description359 -userPassword: password359 -mail: uid359 -uidnumber: 359 -gidnumber: 359 -homeDirectory: /home/uid359 - -dn: cn=user360,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user360 -sn: user360 -uid: uid360 -givenname: givenname360 -description: description360 -userPassword: password360 -mail: uid360 -uidnumber: 360 -gidnumber: 360 -homeDirectory: /home/uid360 - -dn: cn=user361,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user361 -sn: user361 -uid: uid361 -givenname: givenname361 -description: description361 -userPassword: password361 -mail: uid361 -uidnumber: 361 -gidnumber: 361 -homeDirectory: /home/uid361 - -dn: cn=user362,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user362 -sn: user362 -uid: uid362 -givenname: givenname362 -description: description362 -userPassword: password362 -mail: uid362 -uidnumber: 362 -gidnumber: 362 -homeDirectory: /home/uid362 - -dn: cn=user363,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user363 -sn: user363 -uid: uid363 -givenname: givenname363 -description: description363 -userPassword: password363 -mail: uid363 -uidnumber: 363 -gidnumber: 363 -homeDirectory: /home/uid363 - -dn: cn=user364,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user364 -sn: user364 -uid: uid364 -givenname: givenname364 -description: description364 -userPassword: password364 -mail: uid364 -uidnumber: 364 -gidnumber: 364 -homeDirectory: /home/uid364 - -dn: cn=user365,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user365 -sn: user365 -uid: uid365 -givenname: givenname365 -description: description365 -userPassword: password365 -mail: uid365 -uidnumber: 365 -gidnumber: 365 -homeDirectory: /home/uid365 - -dn: cn=user366,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user366 -sn: user366 -uid: uid366 -givenname: givenname366 -description: description366 -userPassword: password366 -mail: uid366 -uidnumber: 366 -gidnumber: 366 -homeDirectory: /home/uid366 - -dn: cn=user367,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user367 -sn: user367 -uid: uid367 -givenname: givenname367 -description: description367 -userPassword: password367 -mail: uid367 -uidnumber: 367 -gidnumber: 367 -homeDirectory: /home/uid367 - -dn: cn=user368,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user368 -sn: user368 -uid: uid368 -givenname: givenname368 -description: description368 -userPassword: password368 -mail: uid368 -uidnumber: 368 -gidnumber: 368 -homeDirectory: /home/uid368 - -dn: cn=user369,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user369 -sn: user369 -uid: uid369 -givenname: givenname369 -description: description369 -userPassword: password369 -mail: uid369 -uidnumber: 369 -gidnumber: 369 -homeDirectory: /home/uid369 - -dn: cn=user370,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user370 -sn: user370 -uid: uid370 -givenname: givenname370 -description: description370 -userPassword: password370 -mail: uid370 -uidnumber: 370 -gidnumber: 370 -homeDirectory: /home/uid370 - -dn: cn=user371,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user371 -sn: user371 -uid: uid371 -givenname: givenname371 -description: description371 -userPassword: password371 -mail: uid371 -uidnumber: 371 -gidnumber: 371 -homeDirectory: /home/uid371 - -dn: cn=user372,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user372 -sn: user372 -uid: uid372 -givenname: givenname372 -description: description372 -userPassword: password372 -mail: uid372 -uidnumber: 372 -gidnumber: 372 -homeDirectory: /home/uid372 - -dn: cn=user373,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user373 -sn: user373 -uid: uid373 -givenname: givenname373 -description: description373 -userPassword: password373 -mail: uid373 -uidnumber: 373 -gidnumber: 373 -homeDirectory: /home/uid373 - -dn: cn=user374,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user374 -sn: user374 -uid: uid374 -givenname: givenname374 -description: description374 -userPassword: password374 -mail: uid374 -uidnumber: 374 -gidnumber: 374 -homeDirectory: /home/uid374 - -dn: cn=user375,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user375 -sn: user375 -uid: uid375 -givenname: givenname375 -description: description375 -userPassword: password375 -mail: uid375 -uidnumber: 375 -gidnumber: 375 -homeDirectory: /home/uid375 - -dn: cn=user376,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user376 -sn: user376 -uid: uid376 -givenname: givenname376 -description: description376 -userPassword: password376 -mail: uid376 -uidnumber: 376 -gidnumber: 376 -homeDirectory: /home/uid376 - -dn: cn=user377,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user377 -sn: user377 -uid: uid377 -givenname: givenname377 -description: description377 -userPassword: password377 -mail: uid377 -uidnumber: 377 -gidnumber: 377 -homeDirectory: /home/uid377 - -dn: cn=user378,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user378 -sn: user378 -uid: uid378 -givenname: givenname378 -description: description378 -userPassword: password378 -mail: uid378 -uidnumber: 378 -gidnumber: 378 -homeDirectory: /home/uid378 - -dn: cn=user379,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user379 -sn: user379 -uid: uid379 -givenname: givenname379 -description: description379 -userPassword: password379 -mail: uid379 -uidnumber: 379 -gidnumber: 379 -homeDirectory: /home/uid379 - -dn: cn=user380,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user380 -sn: user380 -uid: uid380 -givenname: givenname380 -description: description380 -userPassword: password380 -mail: uid380 -uidnumber: 380 -gidnumber: 380 -homeDirectory: /home/uid380 - -dn: cn=user381,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user381 -sn: user381 -uid: uid381 -givenname: givenname381 -description: description381 -userPassword: password381 -mail: uid381 -uidnumber: 381 -gidnumber: 381 -homeDirectory: /home/uid381 - -dn: cn=user382,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user382 -sn: user382 -uid: uid382 -givenname: givenname382 -description: description382 -userPassword: password382 -mail: uid382 -uidnumber: 382 -gidnumber: 382 -homeDirectory: /home/uid382 - -dn: cn=user383,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user383 -sn: user383 -uid: uid383 -givenname: givenname383 -description: description383 -userPassword: password383 -mail: uid383 -uidnumber: 383 -gidnumber: 383 -homeDirectory: /home/uid383 - -dn: cn=user384,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user384 -sn: user384 -uid: uid384 -givenname: givenname384 -description: description384 -userPassword: password384 -mail: uid384 -uidnumber: 384 -gidnumber: 384 -homeDirectory: /home/uid384 - -dn: cn=user385,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user385 -sn: user385 -uid: uid385 -givenname: givenname385 -description: description385 -userPassword: password385 -mail: uid385 -uidnumber: 385 -gidnumber: 385 -homeDirectory: /home/uid385 - -dn: cn=user386,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user386 -sn: user386 -uid: uid386 -givenname: givenname386 -description: description386 -userPassword: password386 -mail: uid386 -uidnumber: 386 -gidnumber: 386 -homeDirectory: /home/uid386 - -dn: cn=user387,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user387 -sn: user387 -uid: uid387 -givenname: givenname387 -description: description387 -userPassword: password387 -mail: uid387 -uidnumber: 387 -gidnumber: 387 -homeDirectory: /home/uid387 - -dn: cn=user388,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user388 -sn: user388 -uid: uid388 -givenname: givenname388 -description: description388 -userPassword: password388 -mail: uid388 -uidnumber: 388 -gidnumber: 388 -homeDirectory: /home/uid388 - -dn: cn=user389,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user389 -sn: user389 -uid: uid389 -givenname: givenname389 -description: description389 -userPassword: password389 -mail: uid389 -uidnumber: 389 -gidnumber: 389 -homeDirectory: /home/uid389 - -dn: cn=user390,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user390 -sn: user390 -uid: uid390 -givenname: givenname390 -description: description390 -userPassword: password390 -mail: uid390 -uidnumber: 390 -gidnumber: 390 -homeDirectory: /home/uid390 - -dn: cn=user391,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user391 -sn: user391 -uid: uid391 -givenname: givenname391 -description: description391 -userPassword: password391 -mail: uid391 -uidnumber: 391 -gidnumber: 391 -homeDirectory: /home/uid391 - -dn: cn=user392,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user392 -sn: user392 -uid: uid392 -givenname: givenname392 -description: description392 -userPassword: password392 -mail: uid392 -uidnumber: 392 -gidnumber: 392 -homeDirectory: /home/uid392 - -dn: cn=user393,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user393 -sn: user393 -uid: uid393 -givenname: givenname393 -description: description393 -userPassword: password393 -mail: uid393 -uidnumber: 393 -gidnumber: 393 -homeDirectory: /home/uid393 - -dn: cn=user394,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user394 -sn: user394 -uid: uid394 -givenname: givenname394 -description: description394 -userPassword: password394 -mail: uid394 -uidnumber: 394 -gidnumber: 394 -homeDirectory: /home/uid394 - -dn: cn=user395,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user395 -sn: user395 -uid: uid395 -givenname: givenname395 -description: description395 -userPassword: password395 -mail: uid395 -uidnumber: 395 -gidnumber: 395 -homeDirectory: /home/uid395 - -dn: cn=user396,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user396 -sn: user396 -uid: uid396 -givenname: givenname396 -description: description396 -userPassword: password396 -mail: uid396 -uidnumber: 396 -gidnumber: 396 -homeDirectory: /home/uid396 - -dn: cn=user397,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user397 -sn: user397 -uid: uid397 -givenname: givenname397 -description: description397 -userPassword: password397 -mail: uid397 -uidnumber: 397 -gidnumber: 397 -homeDirectory: /home/uid397 - -dn: cn=user398,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user398 -sn: user398 -uid: uid398 -givenname: givenname398 -description: description398 -userPassword: password398 -mail: uid398 -uidnumber: 398 -gidnumber: 398 -homeDirectory: /home/uid398 - -dn: cn=user399,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user399 -sn: user399 -uid: uid399 -givenname: givenname399 -description: description399 -userPassword: password399 -mail: uid399 -uidnumber: 399 -gidnumber: 399 -homeDirectory: /home/uid399 - -dn: cn=user400,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user400 -sn: user400 -uid: uid400 -givenname: givenname400 -description: description400 -userPassword: password400 -mail: uid400 -uidnumber: 400 -gidnumber: 400 -homeDirectory: /home/uid400 - -dn: cn=user401,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user401 -sn: user401 -uid: uid401 -givenname: givenname401 -description: description401 -userPassword: password401 -mail: uid401 -uidnumber: 401 -gidnumber: 401 -homeDirectory: /home/uid401 - -dn: cn=user402,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user402 -sn: user402 -uid: uid402 -givenname: givenname402 -description: description402 -userPassword: password402 -mail: uid402 -uidnumber: 402 -gidnumber: 402 -homeDirectory: /home/uid402 - -dn: cn=user403,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user403 -sn: user403 -uid: uid403 -givenname: givenname403 -description: description403 -userPassword: password403 -mail: uid403 -uidnumber: 403 -gidnumber: 403 -homeDirectory: /home/uid403 - -dn: cn=user404,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user404 -sn: user404 -uid: uid404 -givenname: givenname404 -description: description404 -userPassword: password404 -mail: uid404 -uidnumber: 404 -gidnumber: 404 -homeDirectory: /home/uid404 - -dn: cn=user405,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user405 -sn: user405 -uid: uid405 -givenname: givenname405 -description: description405 -userPassword: password405 -mail: uid405 -uidnumber: 405 -gidnumber: 405 -homeDirectory: /home/uid405 - -dn: cn=user406,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user406 -sn: user406 -uid: uid406 -givenname: givenname406 -description: description406 -userPassword: password406 -mail: uid406 -uidnumber: 406 -gidnumber: 406 -homeDirectory: /home/uid406 - -dn: cn=user407,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user407 -sn: user407 -uid: uid407 -givenname: givenname407 -description: description407 -userPassword: password407 -mail: uid407 -uidnumber: 407 -gidnumber: 407 -homeDirectory: /home/uid407 - -dn: cn=user408,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user408 -sn: user408 -uid: uid408 -givenname: givenname408 -description: description408 -userPassword: password408 -mail: uid408 -uidnumber: 408 -gidnumber: 408 -homeDirectory: /home/uid408 - -dn: cn=user409,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user409 -sn: user409 -uid: uid409 -givenname: givenname409 -description: description409 -userPassword: password409 -mail: uid409 -uidnumber: 409 -gidnumber: 409 -homeDirectory: /home/uid409 - -dn: cn=user410,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user410 -sn: user410 -uid: uid410 -givenname: givenname410 -description: description410 -userPassword: password410 -mail: uid410 -uidnumber: 410 -gidnumber: 410 -homeDirectory: /home/uid410 - -dn: cn=user411,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user411 -sn: user411 -uid: uid411 -givenname: givenname411 -description: description411 -userPassword: password411 -mail: uid411 -uidnumber: 411 -gidnumber: 411 -homeDirectory: /home/uid411 - -dn: cn=user412,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user412 -sn: user412 -uid: uid412 -givenname: givenname412 -description: description412 -userPassword: password412 -mail: uid412 -uidnumber: 412 -gidnumber: 412 -homeDirectory: /home/uid412 - -dn: cn=user413,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user413 -sn: user413 -uid: uid413 -givenname: givenname413 -description: description413 -userPassword: password413 -mail: uid413 -uidnumber: 413 -gidnumber: 413 -homeDirectory: /home/uid413 - -dn: cn=user414,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user414 -sn: user414 -uid: uid414 -givenname: givenname414 -description: description414 -userPassword: password414 -mail: uid414 -uidnumber: 414 -gidnumber: 414 -homeDirectory: /home/uid414 - -dn: cn=user415,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user415 -sn: user415 -uid: uid415 -givenname: givenname415 -description: description415 -userPassword: password415 -mail: uid415 -uidnumber: 415 -gidnumber: 415 -homeDirectory: /home/uid415 - -dn: cn=user416,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user416 -sn: user416 -uid: uid416 -givenname: givenname416 -description: description416 -userPassword: password416 -mail: uid416 -uidnumber: 416 -gidnumber: 416 -homeDirectory: /home/uid416 - -dn: cn=user417,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user417 -sn: user417 -uid: uid417 -givenname: givenname417 -description: description417 -userPassword: password417 -mail: uid417 -uidnumber: 417 -gidnumber: 417 -homeDirectory: /home/uid417 - -dn: cn=user418,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user418 -sn: user418 -uid: uid418 -givenname: givenname418 -description: description418 -userPassword: password418 -mail: uid418 -uidnumber: 418 -gidnumber: 418 -homeDirectory: /home/uid418 - -dn: cn=user419,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user419 -sn: user419 -uid: uid419 -givenname: givenname419 -description: description419 -userPassword: password419 -mail: uid419 -uidnumber: 419 -gidnumber: 419 -homeDirectory: /home/uid419 - -dn: cn=user420,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user420 -sn: user420 -uid: uid420 -givenname: givenname420 -description: description420 -userPassword: password420 -mail: uid420 -uidnumber: 420 -gidnumber: 420 -homeDirectory: /home/uid420 - -dn: cn=user421,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user421 -sn: user421 -uid: uid421 -givenname: givenname421 -description: description421 -userPassword: password421 -mail: uid421 -uidnumber: 421 -gidnumber: 421 -homeDirectory: /home/uid421 - -dn: cn=user422,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user422 -sn: user422 -uid: uid422 -givenname: givenname422 -description: description422 -userPassword: password422 -mail: uid422 -uidnumber: 422 -gidnumber: 422 -homeDirectory: /home/uid422 - -dn: cn=user423,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user423 -sn: user423 -uid: uid423 -givenname: givenname423 -description: description423 -userPassword: password423 -mail: uid423 -uidnumber: 423 -gidnumber: 423 -homeDirectory: /home/uid423 - -dn: cn=user424,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user424 -sn: user424 -uid: uid424 -givenname: givenname424 -description: description424 -userPassword: password424 -mail: uid424 -uidnumber: 424 -gidnumber: 424 -homeDirectory: /home/uid424 - -dn: cn=user425,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user425 -sn: user425 -uid: uid425 -givenname: givenname425 -description: description425 -userPassword: password425 -mail: uid425 -uidnumber: 425 -gidnumber: 425 -homeDirectory: /home/uid425 - -dn: cn=user426,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user426 -sn: user426 -uid: uid426 -givenname: givenname426 -description: description426 -userPassword: password426 -mail: uid426 -uidnumber: 426 -gidnumber: 426 -homeDirectory: /home/uid426 - -dn: cn=user427,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user427 -sn: user427 -uid: uid427 -givenname: givenname427 -description: description427 -userPassword: password427 -mail: uid427 -uidnumber: 427 -gidnumber: 427 -homeDirectory: /home/uid427 - -dn: cn=user428,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user428 -sn: user428 -uid: uid428 -givenname: givenname428 -description: description428 -userPassword: password428 -mail: uid428 -uidnumber: 428 -gidnumber: 428 -homeDirectory: /home/uid428 - -dn: cn=user429,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user429 -sn: user429 -uid: uid429 -givenname: givenname429 -description: description429 -userPassword: password429 -mail: uid429 -uidnumber: 429 -gidnumber: 429 -homeDirectory: /home/uid429 - -dn: cn=user430,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user430 -sn: user430 -uid: uid430 -givenname: givenname430 -description: description430 -userPassword: password430 -mail: uid430 -uidnumber: 430 -gidnumber: 430 -homeDirectory: /home/uid430 - -dn: cn=user431,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user431 -sn: user431 -uid: uid431 -givenname: givenname431 -description: description431 -userPassword: password431 -mail: uid431 -uidnumber: 431 -gidnumber: 431 -homeDirectory: /home/uid431 - -dn: cn=user432,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user432 -sn: user432 -uid: uid432 -givenname: givenname432 -description: description432 -userPassword: password432 -mail: uid432 -uidnumber: 432 -gidnumber: 432 -homeDirectory: /home/uid432 - -dn: cn=user433,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user433 -sn: user433 -uid: uid433 -givenname: givenname433 -description: description433 -userPassword: password433 -mail: uid433 -uidnumber: 433 -gidnumber: 433 -homeDirectory: /home/uid433 - -dn: cn=user434,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user434 -sn: user434 -uid: uid434 -givenname: givenname434 -description: description434 -userPassword: password434 -mail: uid434 -uidnumber: 434 -gidnumber: 434 -homeDirectory: /home/uid434 - -dn: cn=user435,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user435 -sn: user435 -uid: uid435 -givenname: givenname435 -description: description435 -userPassword: password435 -mail: uid435 -uidnumber: 435 -gidnumber: 435 -homeDirectory: /home/uid435 - -dn: cn=user436,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user436 -sn: user436 -uid: uid436 -givenname: givenname436 -description: description436 -userPassword: password436 -mail: uid436 -uidnumber: 436 -gidnumber: 436 -homeDirectory: /home/uid436 - -dn: cn=user437,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user437 -sn: user437 -uid: uid437 -givenname: givenname437 -description: description437 -userPassword: password437 -mail: uid437 -uidnumber: 437 -gidnumber: 437 -homeDirectory: /home/uid437 - -dn: cn=user438,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user438 -sn: user438 -uid: uid438 -givenname: givenname438 -description: description438 -userPassword: password438 -mail: uid438 -uidnumber: 438 -gidnumber: 438 -homeDirectory: /home/uid438 - -dn: cn=user439,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user439 -sn: user439 -uid: uid439 -givenname: givenname439 -description: description439 -userPassword: password439 -mail: uid439 -uidnumber: 439 -gidnumber: 439 -homeDirectory: /home/uid439 - -dn: cn=user440,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user440 -sn: user440 -uid: uid440 -givenname: givenname440 -description: description440 -userPassword: password440 -mail: uid440 -uidnumber: 440 -gidnumber: 440 -homeDirectory: /home/uid440 - -dn: cn=user441,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user441 -sn: user441 -uid: uid441 -givenname: givenname441 -description: description441 -userPassword: password441 -mail: uid441 -uidnumber: 441 -gidnumber: 441 -homeDirectory: /home/uid441 - -dn: cn=user442,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user442 -sn: user442 -uid: uid442 -givenname: givenname442 -description: description442 -userPassword: password442 -mail: uid442 -uidnumber: 442 -gidnumber: 442 -homeDirectory: /home/uid442 - -dn: cn=user443,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user443 -sn: user443 -uid: uid443 -givenname: givenname443 -description: description443 -userPassword: password443 -mail: uid443 -uidnumber: 443 -gidnumber: 443 -homeDirectory: /home/uid443 - -dn: cn=user444,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user444 -sn: user444 -uid: uid444 -givenname: givenname444 -description: description444 -userPassword: password444 -mail: uid444 -uidnumber: 444 -gidnumber: 444 -homeDirectory: /home/uid444 - -dn: cn=user445,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user445 -sn: user445 -uid: uid445 -givenname: givenname445 -description: description445 -userPassword: password445 -mail: uid445 -uidnumber: 445 -gidnumber: 445 -homeDirectory: /home/uid445 - -dn: cn=user446,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user446 -sn: user446 -uid: uid446 -givenname: givenname446 -description: description446 -userPassword: password446 -mail: uid446 -uidnumber: 446 -gidnumber: 446 -homeDirectory: /home/uid446 - -dn: cn=user447,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user447 -sn: user447 -uid: uid447 -givenname: givenname447 -description: description447 -userPassword: password447 -mail: uid447 -uidnumber: 447 -gidnumber: 447 -homeDirectory: /home/uid447 - -dn: cn=user448,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user448 -sn: user448 -uid: uid448 -givenname: givenname448 -description: description448 -userPassword: password448 -mail: uid448 -uidnumber: 448 -gidnumber: 448 -homeDirectory: /home/uid448 - -dn: cn=user449,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user449 -sn: user449 -uid: uid449 -givenname: givenname449 -description: description449 -userPassword: password449 -mail: uid449 -uidnumber: 449 -gidnumber: 449 -homeDirectory: /home/uid449 - -dn: cn=user450,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user450 -sn: user450 -uid: uid450 -givenname: givenname450 -description: description450 -userPassword: password450 -mail: uid450 -uidnumber: 450 -gidnumber: 450 -homeDirectory: /home/uid450 - -dn: cn=user451,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user451 -sn: user451 -uid: uid451 -givenname: givenname451 -description: description451 -userPassword: password451 -mail: uid451 -uidnumber: 451 -gidnumber: 451 -homeDirectory: /home/uid451 - -dn: cn=user452,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user452 -sn: user452 -uid: uid452 -givenname: givenname452 -description: description452 -userPassword: password452 -mail: uid452 -uidnumber: 452 -gidnumber: 452 -homeDirectory: /home/uid452 - -dn: cn=user453,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user453 -sn: user453 -uid: uid453 -givenname: givenname453 -description: description453 -userPassword: password453 -mail: uid453 -uidnumber: 453 -gidnumber: 453 -homeDirectory: /home/uid453 - -dn: cn=user454,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user454 -sn: user454 -uid: uid454 -givenname: givenname454 -description: description454 -userPassword: password454 -mail: uid454 -uidnumber: 454 -gidnumber: 454 -homeDirectory: /home/uid454 - -dn: cn=user455,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user455 -sn: user455 -uid: uid455 -givenname: givenname455 -description: description455 -userPassword: password455 -mail: uid455 -uidnumber: 455 -gidnumber: 455 -homeDirectory: /home/uid455 - -dn: cn=user456,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user456 -sn: user456 -uid: uid456 -givenname: givenname456 -description: description456 -userPassword: password456 -mail: uid456 -uidnumber: 456 -gidnumber: 456 -homeDirectory: /home/uid456 - -dn: cn=user457,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user457 -sn: user457 -uid: uid457 -givenname: givenname457 -description: description457 -userPassword: password457 -mail: uid457 -uidnumber: 457 -gidnumber: 457 -homeDirectory: /home/uid457 - -dn: cn=user458,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user458 -sn: user458 -uid: uid458 -givenname: givenname458 -description: description458 -userPassword: password458 -mail: uid458 -uidnumber: 458 -gidnumber: 458 -homeDirectory: /home/uid458 - -dn: cn=user459,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user459 -sn: user459 -uid: uid459 -givenname: givenname459 -description: description459 -userPassword: password459 -mail: uid459 -uidnumber: 459 -gidnumber: 459 -homeDirectory: /home/uid459 - -dn: cn=user460,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user460 -sn: user460 -uid: uid460 -givenname: givenname460 -description: description460 -userPassword: password460 -mail: uid460 -uidnumber: 460 -gidnumber: 460 -homeDirectory: /home/uid460 - -dn: cn=user461,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user461 -sn: user461 -uid: uid461 -givenname: givenname461 -description: description461 -userPassword: password461 -mail: uid461 -uidnumber: 461 -gidnumber: 461 -homeDirectory: /home/uid461 - -dn: cn=user462,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user462 -sn: user462 -uid: uid462 -givenname: givenname462 -description: description462 -userPassword: password462 -mail: uid462 -uidnumber: 462 -gidnumber: 462 -homeDirectory: /home/uid462 - -dn: cn=user463,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user463 -sn: user463 -uid: uid463 -givenname: givenname463 -description: description463 -userPassword: password463 -mail: uid463 -uidnumber: 463 -gidnumber: 463 -homeDirectory: /home/uid463 - -dn: cn=user464,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user464 -sn: user464 -uid: uid464 -givenname: givenname464 -description: description464 -userPassword: password464 -mail: uid464 -uidnumber: 464 -gidnumber: 464 -homeDirectory: /home/uid464 - -dn: cn=user465,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user465 -sn: user465 -uid: uid465 -givenname: givenname465 -description: description465 -userPassword: password465 -mail: uid465 -uidnumber: 465 -gidnumber: 465 -homeDirectory: /home/uid465 - -dn: cn=user466,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user466 -sn: user466 -uid: uid466 -givenname: givenname466 -description: description466 -userPassword: password466 -mail: uid466 -uidnumber: 466 -gidnumber: 466 -homeDirectory: /home/uid466 - -dn: cn=user467,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user467 -sn: user467 -uid: uid467 -givenname: givenname467 -description: description467 -userPassword: password467 -mail: uid467 -uidnumber: 467 -gidnumber: 467 -homeDirectory: /home/uid467 - -dn: cn=user468,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user468 -sn: user468 -uid: uid468 -givenname: givenname468 -description: description468 -userPassword: password468 -mail: uid468 -uidnumber: 468 -gidnumber: 468 -homeDirectory: /home/uid468 - -dn: cn=user469,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user469 -sn: user469 -uid: uid469 -givenname: givenname469 -description: description469 -userPassword: password469 -mail: uid469 -uidnumber: 469 -gidnumber: 469 -homeDirectory: /home/uid469 - -dn: cn=user470,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user470 -sn: user470 -uid: uid470 -givenname: givenname470 -description: description470 -userPassword: password470 -mail: uid470 -uidnumber: 470 -gidnumber: 470 -homeDirectory: /home/uid470 - -dn: cn=user471,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user471 -sn: user471 -uid: uid471 -givenname: givenname471 -description: description471 -userPassword: password471 -mail: uid471 -uidnumber: 471 -gidnumber: 471 -homeDirectory: /home/uid471 - -dn: cn=user472,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user472 -sn: user472 -uid: uid472 -givenname: givenname472 -description: description472 -userPassword: password472 -mail: uid472 -uidnumber: 472 -gidnumber: 472 -homeDirectory: /home/uid472 - -dn: cn=user473,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user473 -sn: user473 -uid: uid473 -givenname: givenname473 -description: description473 -userPassword: password473 -mail: uid473 -uidnumber: 473 -gidnumber: 473 -homeDirectory: /home/uid473 - -dn: cn=user474,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user474 -sn: user474 -uid: uid474 -givenname: givenname474 -description: description474 -userPassword: password474 -mail: uid474 -uidnumber: 474 -gidnumber: 474 -homeDirectory: /home/uid474 - -dn: cn=user475,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user475 -sn: user475 -uid: uid475 -givenname: givenname475 -description: description475 -userPassword: password475 -mail: uid475 -uidnumber: 475 -gidnumber: 475 -homeDirectory: /home/uid475 - -dn: cn=user476,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user476 -sn: user476 -uid: uid476 -givenname: givenname476 -description: description476 -userPassword: password476 -mail: uid476 -uidnumber: 476 -gidnumber: 476 -homeDirectory: /home/uid476 - -dn: cn=user477,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user477 -sn: user477 -uid: uid477 -givenname: givenname477 -description: description477 -userPassword: password477 -mail: uid477 -uidnumber: 477 -gidnumber: 477 -homeDirectory: /home/uid477 - -dn: cn=user478,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user478 -sn: user478 -uid: uid478 -givenname: givenname478 -description: description478 -userPassword: password478 -mail: uid478 -uidnumber: 478 -gidnumber: 478 -homeDirectory: /home/uid478 - -dn: cn=user479,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user479 -sn: user479 -uid: uid479 -givenname: givenname479 -description: description479 -userPassword: password479 -mail: uid479 -uidnumber: 479 -gidnumber: 479 -homeDirectory: /home/uid479 - -dn: cn=user480,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user480 -sn: user480 -uid: uid480 -givenname: givenname480 -description: description480 -userPassword: password480 -mail: uid480 -uidnumber: 480 -gidnumber: 480 -homeDirectory: /home/uid480 - -dn: cn=user481,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user481 -sn: user481 -uid: uid481 -givenname: givenname481 -description: description481 -userPassword: password481 -mail: uid481 -uidnumber: 481 -gidnumber: 481 -homeDirectory: /home/uid481 - -dn: cn=user482,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user482 -sn: user482 -uid: uid482 -givenname: givenname482 -description: description482 -userPassword: password482 -mail: uid482 -uidnumber: 482 -gidnumber: 482 -homeDirectory: /home/uid482 - -dn: cn=user483,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user483 -sn: user483 -uid: uid483 -givenname: givenname483 -description: description483 -userPassword: password483 -mail: uid483 -uidnumber: 483 -gidnumber: 483 -homeDirectory: /home/uid483 - -dn: cn=user484,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user484 -sn: user484 -uid: uid484 -givenname: givenname484 -description: description484 -userPassword: password484 -mail: uid484 -uidnumber: 484 -gidnumber: 484 -homeDirectory: /home/uid484 - -dn: cn=user485,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user485 -sn: user485 -uid: uid485 -givenname: givenname485 -description: description485 -userPassword: password485 -mail: uid485 -uidnumber: 485 -gidnumber: 485 -homeDirectory: /home/uid485 - -dn: cn=user486,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user486 -sn: user486 -uid: uid486 -givenname: givenname486 -description: description486 -userPassword: password486 -mail: uid486 -uidnumber: 486 -gidnumber: 486 -homeDirectory: /home/uid486 - -dn: cn=user487,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user487 -sn: user487 -uid: uid487 -givenname: givenname487 -description: description487 -userPassword: password487 -mail: uid487 -uidnumber: 487 -gidnumber: 487 -homeDirectory: /home/uid487 - -dn: cn=user488,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user488 -sn: user488 -uid: uid488 -givenname: givenname488 -description: description488 -userPassword: password488 -mail: uid488 -uidnumber: 488 -gidnumber: 488 -homeDirectory: /home/uid488 - -dn: cn=user489,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user489 -sn: user489 -uid: uid489 -givenname: givenname489 -description: description489 -userPassword: password489 -mail: uid489 -uidnumber: 489 -gidnumber: 489 -homeDirectory: /home/uid489 - -dn: cn=user490,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user490 -sn: user490 -uid: uid490 -givenname: givenname490 -description: description490 -userPassword: password490 -mail: uid490 -uidnumber: 490 -gidnumber: 490 -homeDirectory: /home/uid490 - -dn: cn=user491,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user491 -sn: user491 -uid: uid491 -givenname: givenname491 -description: description491 -userPassword: password491 -mail: uid491 -uidnumber: 491 -gidnumber: 491 -homeDirectory: /home/uid491 - -dn: cn=user492,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user492 -sn: user492 -uid: uid492 -givenname: givenname492 -description: description492 -userPassword: password492 -mail: uid492 -uidnumber: 492 -gidnumber: 492 -homeDirectory: /home/uid492 - -dn: cn=user493,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user493 -sn: user493 -uid: uid493 -givenname: givenname493 -description: description493 -userPassword: password493 -mail: uid493 -uidnumber: 493 -gidnumber: 493 -homeDirectory: /home/uid493 - -dn: cn=user494,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user494 -sn: user494 -uid: uid494 -givenname: givenname494 -description: description494 -userPassword: password494 -mail: uid494 -uidnumber: 494 -gidnumber: 494 -homeDirectory: /home/uid494 - -dn: cn=user495,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user495 -sn: user495 -uid: uid495 -givenname: givenname495 -description: description495 -userPassword: password495 -mail: uid495 -uidnumber: 495 -gidnumber: 495 -homeDirectory: /home/uid495 - -dn: cn=user496,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user496 -sn: user496 -uid: uid496 -givenname: givenname496 -description: description496 -userPassword: password496 -mail: uid496 -uidnumber: 496 -gidnumber: 496 -homeDirectory: /home/uid496 - -dn: cn=user497,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user497 -sn: user497 -uid: uid497 -givenname: givenname497 -description: description497 -userPassword: password497 -mail: uid497 -uidnumber: 497 -gidnumber: 497 -homeDirectory: /home/uid497 - -dn: cn=user498,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user498 -sn: user498 -uid: uid498 -givenname: givenname498 -description: description498 -userPassword: password498 -mail: uid498 -uidnumber: 498 -gidnumber: 498 -homeDirectory: /home/uid498 - -dn: cn=user499,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user499 -sn: user499 -uid: uid499 -givenname: givenname499 -description: description499 -userPassword: password499 -mail: uid499 -uidnumber: 499 -gidnumber: 499 -homeDirectory: /home/uid499 - -dn: cn=user500,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user500 -sn: user500 -uid: uid500 -givenname: givenname500 -description: description500 -userPassword: password500 -mail: uid500 -uidnumber: 500 -gidnumber: 500 -homeDirectory: /home/uid500 - -dn: cn=user501,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user501 -sn: user501 -uid: uid501 -givenname: givenname501 -description: description501 -userPassword: password501 -mail: uid501 -uidnumber: 501 -gidnumber: 501 -homeDirectory: /home/uid501 - -dn: cn=user502,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user502 -sn: user502 -uid: uid502 -givenname: givenname502 -description: description502 -userPassword: password502 -mail: uid502 -uidnumber: 502 -gidnumber: 502 -homeDirectory: /home/uid502 - -dn: cn=user503,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user503 -sn: user503 -uid: uid503 -givenname: givenname503 -description: description503 -userPassword: password503 -mail: uid503 -uidnumber: 503 -gidnumber: 503 -homeDirectory: /home/uid503 - -dn: cn=user504,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user504 -sn: user504 -uid: uid504 -givenname: givenname504 -description: description504 -userPassword: password504 -mail: uid504 -uidnumber: 504 -gidnumber: 504 -homeDirectory: /home/uid504 - -dn: cn=user505,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user505 -sn: user505 -uid: uid505 -givenname: givenname505 -description: description505 -userPassword: password505 -mail: uid505 -uidnumber: 505 -gidnumber: 505 -homeDirectory: /home/uid505 - -dn: cn=user506,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user506 -sn: user506 -uid: uid506 -givenname: givenname506 -description: description506 -userPassword: password506 -mail: uid506 -uidnumber: 506 -gidnumber: 506 -homeDirectory: /home/uid506 - -dn: cn=user507,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user507 -sn: user507 -uid: uid507 -givenname: givenname507 -description: description507 -userPassword: password507 -mail: uid507 -uidnumber: 507 -gidnumber: 507 -homeDirectory: /home/uid507 - -dn: cn=user508,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user508 -sn: user508 -uid: uid508 -givenname: givenname508 -description: description508 -userPassword: password508 -mail: uid508 -uidnumber: 508 -gidnumber: 508 -homeDirectory: /home/uid508 - -dn: cn=user509,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user509 -sn: user509 -uid: uid509 -givenname: givenname509 -description: description509 -userPassword: password509 -mail: uid509 -uidnumber: 509 -gidnumber: 509 -homeDirectory: /home/uid509 - -dn: cn=user510,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user510 -sn: user510 -uid: uid510 -givenname: givenname510 -description: description510 -userPassword: password510 -mail: uid510 -uidnumber: 510 -gidnumber: 510 -homeDirectory: /home/uid510 - -dn: cn=user511,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user511 -sn: user511 -uid: uid511 -givenname: givenname511 -description: description511 -userPassword: password511 -mail: uid511 -uidnumber: 511 -gidnumber: 511 -homeDirectory: /home/uid511 - -dn: cn=user512,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user512 -sn: user512 -uid: uid512 -givenname: givenname512 -description: description512 -userPassword: password512 -mail: uid512 -uidnumber: 512 -gidnumber: 512 -homeDirectory: /home/uid512 - -dn: cn=user513,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user513 -sn: user513 -uid: uid513 -givenname: givenname513 -description: description513 -userPassword: password513 -mail: uid513 -uidnumber: 513 -gidnumber: 513 -homeDirectory: /home/uid513 - -dn: cn=user514,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user514 -sn: user514 -uid: uid514 -givenname: givenname514 -description: description514 -userPassword: password514 -mail: uid514 -uidnumber: 514 -gidnumber: 514 -homeDirectory: /home/uid514 - -dn: cn=user515,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user515 -sn: user515 -uid: uid515 -givenname: givenname515 -description: description515 -userPassword: password515 -mail: uid515 -uidnumber: 515 -gidnumber: 515 -homeDirectory: /home/uid515 - -dn: cn=user516,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user516 -sn: user516 -uid: uid516 -givenname: givenname516 -description: description516 -userPassword: password516 -mail: uid516 -uidnumber: 516 -gidnumber: 516 -homeDirectory: /home/uid516 - -dn: cn=user517,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user517 -sn: user517 -uid: uid517 -givenname: givenname517 -description: description517 -userPassword: password517 -mail: uid517 -uidnumber: 517 -gidnumber: 517 -homeDirectory: /home/uid517 - -dn: cn=user518,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user518 -sn: user518 -uid: uid518 -givenname: givenname518 -description: description518 -userPassword: password518 -mail: uid518 -uidnumber: 518 -gidnumber: 518 -homeDirectory: /home/uid518 - -dn: cn=user519,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user519 -sn: user519 -uid: uid519 -givenname: givenname519 -description: description519 -userPassword: password519 -mail: uid519 -uidnumber: 519 -gidnumber: 519 -homeDirectory: /home/uid519 - -dn: cn=user520,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user520 -sn: user520 -uid: uid520 -givenname: givenname520 -description: description520 -userPassword: password520 -mail: uid520 -uidnumber: 520 -gidnumber: 520 -homeDirectory: /home/uid520 - -dn: cn=user521,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user521 -sn: user521 -uid: uid521 -givenname: givenname521 -description: description521 -userPassword: password521 -mail: uid521 -uidnumber: 521 -gidnumber: 521 -homeDirectory: /home/uid521 - -dn: cn=user522,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user522 -sn: user522 -uid: uid522 -givenname: givenname522 -description: description522 -userPassword: password522 -mail: uid522 -uidnumber: 522 -gidnumber: 522 -homeDirectory: /home/uid522 - -dn: cn=user523,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user523 -sn: user523 -uid: uid523 -givenname: givenname523 -description: description523 -userPassword: password523 -mail: uid523 -uidnumber: 523 -gidnumber: 523 -homeDirectory: /home/uid523 - -dn: cn=user524,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user524 -sn: user524 -uid: uid524 -givenname: givenname524 -description: description524 -userPassword: password524 -mail: uid524 -uidnumber: 524 -gidnumber: 524 -homeDirectory: /home/uid524 - -dn: cn=user525,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user525 -sn: user525 -uid: uid525 -givenname: givenname525 -description: description525 -userPassword: password525 -mail: uid525 -uidnumber: 525 -gidnumber: 525 -homeDirectory: /home/uid525 - -dn: cn=user526,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user526 -sn: user526 -uid: uid526 -givenname: givenname526 -description: description526 -userPassword: password526 -mail: uid526 -uidnumber: 526 -gidnumber: 526 -homeDirectory: /home/uid526 - -dn: cn=user527,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user527 -sn: user527 -uid: uid527 -givenname: givenname527 -description: description527 -userPassword: password527 -mail: uid527 -uidnumber: 527 -gidnumber: 527 -homeDirectory: /home/uid527 - -dn: cn=user528,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user528 -sn: user528 -uid: uid528 -givenname: givenname528 -description: description528 -userPassword: password528 -mail: uid528 -uidnumber: 528 -gidnumber: 528 -homeDirectory: /home/uid528 - -dn: cn=user529,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user529 -sn: user529 -uid: uid529 -givenname: givenname529 -description: description529 -userPassword: password529 -mail: uid529 -uidnumber: 529 -gidnumber: 529 -homeDirectory: /home/uid529 - -dn: cn=user530,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user530 -sn: user530 -uid: uid530 -givenname: givenname530 -description: description530 -userPassword: password530 -mail: uid530 -uidnumber: 530 -gidnumber: 530 -homeDirectory: /home/uid530 - -dn: cn=user531,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user531 -sn: user531 -uid: uid531 -givenname: givenname531 -description: description531 -userPassword: password531 -mail: uid531 -uidnumber: 531 -gidnumber: 531 -homeDirectory: /home/uid531 - -dn: cn=user532,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user532 -sn: user532 -uid: uid532 -givenname: givenname532 -description: description532 -userPassword: password532 -mail: uid532 -uidnumber: 532 -gidnumber: 532 -homeDirectory: /home/uid532 - -dn: cn=user533,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user533 -sn: user533 -uid: uid533 -givenname: givenname533 -description: description533 -userPassword: password533 -mail: uid533 -uidnumber: 533 -gidnumber: 533 -homeDirectory: /home/uid533 - -dn: cn=user534,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user534 -sn: user534 -uid: uid534 -givenname: givenname534 -description: description534 -userPassword: password534 -mail: uid534 -uidnumber: 534 -gidnumber: 534 -homeDirectory: /home/uid534 - -dn: cn=user535,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user535 -sn: user535 -uid: uid535 -givenname: givenname535 -description: description535 -userPassword: password535 -mail: uid535 -uidnumber: 535 -gidnumber: 535 -homeDirectory: /home/uid535 - -dn: cn=user536,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user536 -sn: user536 -uid: uid536 -givenname: givenname536 -description: description536 -userPassword: password536 -mail: uid536 -uidnumber: 536 -gidnumber: 536 -homeDirectory: /home/uid536 - -dn: cn=user537,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user537 -sn: user537 -uid: uid537 -givenname: givenname537 -description: description537 -userPassword: password537 -mail: uid537 -uidnumber: 537 -gidnumber: 537 -homeDirectory: /home/uid537 - -dn: cn=user538,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user538 -sn: user538 -uid: uid538 -givenname: givenname538 -description: description538 -userPassword: password538 -mail: uid538 -uidnumber: 538 -gidnumber: 538 -homeDirectory: /home/uid538 - -dn: cn=user539,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user539 -sn: user539 -uid: uid539 -givenname: givenname539 -description: description539 -userPassword: password539 -mail: uid539 -uidnumber: 539 -gidnumber: 539 -homeDirectory: /home/uid539 - -dn: cn=user540,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user540 -sn: user540 -uid: uid540 -givenname: givenname540 -description: description540 -userPassword: password540 -mail: uid540 -uidnumber: 540 -gidnumber: 540 -homeDirectory: /home/uid540 - -dn: cn=user541,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user541 -sn: user541 -uid: uid541 -givenname: givenname541 -description: description541 -userPassword: password541 -mail: uid541 -uidnumber: 541 -gidnumber: 541 -homeDirectory: /home/uid541 - -dn: cn=user542,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user542 -sn: user542 -uid: uid542 -givenname: givenname542 -description: description542 -userPassword: password542 -mail: uid542 -uidnumber: 542 -gidnumber: 542 -homeDirectory: /home/uid542 - -dn: cn=user543,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user543 -sn: user543 -uid: uid543 -givenname: givenname543 -description: description543 -userPassword: password543 -mail: uid543 -uidnumber: 543 -gidnumber: 543 -homeDirectory: /home/uid543 - -dn: cn=user544,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user544 -sn: user544 -uid: uid544 -givenname: givenname544 -description: description544 -userPassword: password544 -mail: uid544 -uidnumber: 544 -gidnumber: 544 -homeDirectory: /home/uid544 - -dn: cn=user545,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user545 -sn: user545 -uid: uid545 -givenname: givenname545 -description: description545 -userPassword: password545 -mail: uid545 -uidnumber: 545 -gidnumber: 545 -homeDirectory: /home/uid545 - -dn: cn=user546,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user546 -sn: user546 -uid: uid546 -givenname: givenname546 -description: description546 -userPassword: password546 -mail: uid546 -uidnumber: 546 -gidnumber: 546 -homeDirectory: /home/uid546 - -dn: cn=user547,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user547 -sn: user547 -uid: uid547 -givenname: givenname547 -description: description547 -userPassword: password547 -mail: uid547 -uidnumber: 547 -gidnumber: 547 -homeDirectory: /home/uid547 - -dn: cn=user548,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user548 -sn: user548 -uid: uid548 -givenname: givenname548 -description: description548 -userPassword: password548 -mail: uid548 -uidnumber: 548 -gidnumber: 548 -homeDirectory: /home/uid548 - -dn: cn=user549,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user549 -sn: user549 -uid: uid549 -givenname: givenname549 -description: description549 -userPassword: password549 -mail: uid549 -uidnumber: 549 -gidnumber: 549 -homeDirectory: /home/uid549 - -dn: cn=user550,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user550 -sn: user550 -uid: uid550 -givenname: givenname550 -description: description550 -userPassword: password550 -mail: uid550 -uidnumber: 550 -gidnumber: 550 -homeDirectory: /home/uid550 - -dn: cn=user551,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user551 -sn: user551 -uid: uid551 -givenname: givenname551 -description: description551 -userPassword: password551 -mail: uid551 -uidnumber: 551 -gidnumber: 551 -homeDirectory: /home/uid551 - -dn: cn=user552,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user552 -sn: user552 -uid: uid552 -givenname: givenname552 -description: description552 -userPassword: password552 -mail: uid552 -uidnumber: 552 -gidnumber: 552 -homeDirectory: /home/uid552 - -dn: cn=user553,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user553 -sn: user553 -uid: uid553 -givenname: givenname553 -description: description553 -userPassword: password553 -mail: uid553 -uidnumber: 553 -gidnumber: 553 -homeDirectory: /home/uid553 - -dn: cn=user554,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user554 -sn: user554 -uid: uid554 -givenname: givenname554 -description: description554 -userPassword: password554 -mail: uid554 -uidnumber: 554 -gidnumber: 554 -homeDirectory: /home/uid554 - -dn: cn=user555,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user555 -sn: user555 -uid: uid555 -givenname: givenname555 -description: description555 -userPassword: password555 -mail: uid555 -uidnumber: 555 -gidnumber: 555 -homeDirectory: /home/uid555 - -dn: cn=user556,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user556 -sn: user556 -uid: uid556 -givenname: givenname556 -description: description556 -userPassword: password556 -mail: uid556 -uidnumber: 556 -gidnumber: 556 -homeDirectory: /home/uid556 - -dn: cn=user557,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user557 -sn: user557 -uid: uid557 -givenname: givenname557 -description: description557 -userPassword: password557 -mail: uid557 -uidnumber: 557 -gidnumber: 557 -homeDirectory: /home/uid557 - -dn: cn=user558,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user558 -sn: user558 -uid: uid558 -givenname: givenname558 -description: description558 -userPassword: password558 -mail: uid558 -uidnumber: 558 -gidnumber: 558 -homeDirectory: /home/uid558 - -dn: cn=user559,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user559 -sn: user559 -uid: uid559 -givenname: givenname559 -description: description559 -userPassword: password559 -mail: uid559 -uidnumber: 559 -gidnumber: 559 -homeDirectory: /home/uid559 - -dn: cn=user560,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user560 -sn: user560 -uid: uid560 -givenname: givenname560 -description: description560 -userPassword: password560 -mail: uid560 -uidnumber: 560 -gidnumber: 560 -homeDirectory: /home/uid560 - -dn: cn=user561,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user561 -sn: user561 -uid: uid561 -givenname: givenname561 -description: description561 -userPassword: password561 -mail: uid561 -uidnumber: 561 -gidnumber: 561 -homeDirectory: /home/uid561 - -dn: cn=user562,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user562 -sn: user562 -uid: uid562 -givenname: givenname562 -description: description562 -userPassword: password562 -mail: uid562 -uidnumber: 562 -gidnumber: 562 -homeDirectory: /home/uid562 - -dn: cn=user563,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user563 -sn: user563 -uid: uid563 -givenname: givenname563 -description: description563 -userPassword: password563 -mail: uid563 -uidnumber: 563 -gidnumber: 563 -homeDirectory: /home/uid563 - -dn: cn=user564,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user564 -sn: user564 -uid: uid564 -givenname: givenname564 -description: description564 -userPassword: password564 -mail: uid564 -uidnumber: 564 -gidnumber: 564 -homeDirectory: /home/uid564 - -dn: cn=user565,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user565 -sn: user565 -uid: uid565 -givenname: givenname565 -description: description565 -userPassword: password565 -mail: uid565 -uidnumber: 565 -gidnumber: 565 -homeDirectory: /home/uid565 - -dn: cn=user566,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user566 -sn: user566 -uid: uid566 -givenname: givenname566 -description: description566 -userPassword: password566 -mail: uid566 -uidnumber: 566 -gidnumber: 566 -homeDirectory: /home/uid566 - -dn: cn=user567,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user567 -sn: user567 -uid: uid567 -givenname: givenname567 -description: description567 -userPassword: password567 -mail: uid567 -uidnumber: 567 -gidnumber: 567 -homeDirectory: /home/uid567 - -dn: cn=user568,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user568 -sn: user568 -uid: uid568 -givenname: givenname568 -description: description568 -userPassword: password568 -mail: uid568 -uidnumber: 568 -gidnumber: 568 -homeDirectory: /home/uid568 - -dn: cn=user569,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user569 -sn: user569 -uid: uid569 -givenname: givenname569 -description: description569 -userPassword: password569 -mail: uid569 -uidnumber: 569 -gidnumber: 569 -homeDirectory: /home/uid569 - -dn: cn=user570,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user570 -sn: user570 -uid: uid570 -givenname: givenname570 -description: description570 -userPassword: password570 -mail: uid570 -uidnumber: 570 -gidnumber: 570 -homeDirectory: /home/uid570 - -dn: cn=user571,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user571 -sn: user571 -uid: uid571 -givenname: givenname571 -description: description571 -userPassword: password571 -mail: uid571 -uidnumber: 571 -gidnumber: 571 -homeDirectory: /home/uid571 - -dn: cn=user572,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user572 -sn: user572 -uid: uid572 -givenname: givenname572 -description: description572 -userPassword: password572 -mail: uid572 -uidnumber: 572 -gidnumber: 572 -homeDirectory: /home/uid572 - -dn: cn=user573,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user573 -sn: user573 -uid: uid573 -givenname: givenname573 -description: description573 -userPassword: password573 -mail: uid573 -uidnumber: 573 -gidnumber: 573 -homeDirectory: /home/uid573 - -dn: cn=user574,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user574 -sn: user574 -uid: uid574 -givenname: givenname574 -description: description574 -userPassword: password574 -mail: uid574 -uidnumber: 574 -gidnumber: 574 -homeDirectory: /home/uid574 - -dn: cn=user575,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user575 -sn: user575 -uid: uid575 -givenname: givenname575 -description: description575 -userPassword: password575 -mail: uid575 -uidnumber: 575 -gidnumber: 575 -homeDirectory: /home/uid575 - -dn: cn=user576,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user576 -sn: user576 -uid: uid576 -givenname: givenname576 -description: description576 -userPassword: password576 -mail: uid576 -uidnumber: 576 -gidnumber: 576 -homeDirectory: /home/uid576 - -dn: cn=user577,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user577 -sn: user577 -uid: uid577 -givenname: givenname577 -description: description577 -userPassword: password577 -mail: uid577 -uidnumber: 577 -gidnumber: 577 -homeDirectory: /home/uid577 - -dn: cn=user578,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user578 -sn: user578 -uid: uid578 -givenname: givenname578 -description: description578 -userPassword: password578 -mail: uid578 -uidnumber: 578 -gidnumber: 578 -homeDirectory: /home/uid578 - -dn: cn=user579,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user579 -sn: user579 -uid: uid579 -givenname: givenname579 -description: description579 -userPassword: password579 -mail: uid579 -uidnumber: 579 -gidnumber: 579 -homeDirectory: /home/uid579 - -dn: cn=user580,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user580 -sn: user580 -uid: uid580 -givenname: givenname580 -description: description580 -userPassword: password580 -mail: uid580 -uidnumber: 580 -gidnumber: 580 -homeDirectory: /home/uid580 - -dn: cn=user581,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user581 -sn: user581 -uid: uid581 -givenname: givenname581 -description: description581 -userPassword: password581 -mail: uid581 -uidnumber: 581 -gidnumber: 581 -homeDirectory: /home/uid581 - -dn: cn=user582,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user582 -sn: user582 -uid: uid582 -givenname: givenname582 -description: description582 -userPassword: password582 -mail: uid582 -uidnumber: 582 -gidnumber: 582 -homeDirectory: /home/uid582 - -dn: cn=user583,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user583 -sn: user583 -uid: uid583 -givenname: givenname583 -description: description583 -userPassword: password583 -mail: uid583 -uidnumber: 583 -gidnumber: 583 -homeDirectory: /home/uid583 - -dn: cn=user584,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user584 -sn: user584 -uid: uid584 -givenname: givenname584 -description: description584 -userPassword: password584 -mail: uid584 -uidnumber: 584 -gidnumber: 584 -homeDirectory: /home/uid584 - -dn: cn=user585,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user585 -sn: user585 -uid: uid585 -givenname: givenname585 -description: description585 -userPassword: password585 -mail: uid585 -uidnumber: 585 -gidnumber: 585 -homeDirectory: /home/uid585 - -dn: cn=user586,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user586 -sn: user586 -uid: uid586 -givenname: givenname586 -description: description586 -userPassword: password586 -mail: uid586 -uidnumber: 586 -gidnumber: 586 -homeDirectory: /home/uid586 - -dn: cn=user587,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user587 -sn: user587 -uid: uid587 -givenname: givenname587 -description: description587 -userPassword: password587 -mail: uid587 -uidnumber: 587 -gidnumber: 587 -homeDirectory: /home/uid587 - -dn: cn=user588,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user588 -sn: user588 -uid: uid588 -givenname: givenname588 -description: description588 -userPassword: password588 -mail: uid588 -uidnumber: 588 -gidnumber: 588 -homeDirectory: /home/uid588 - -dn: cn=user589,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user589 -sn: user589 -uid: uid589 -givenname: givenname589 -description: description589 -userPassword: password589 -mail: uid589 -uidnumber: 589 -gidnumber: 589 -homeDirectory: /home/uid589 - -dn: cn=user590,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user590 -sn: user590 -uid: uid590 -givenname: givenname590 -description: description590 -userPassword: password590 -mail: uid590 -uidnumber: 590 -gidnumber: 590 -homeDirectory: /home/uid590 - -dn: cn=user591,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user591 -sn: user591 -uid: uid591 -givenname: givenname591 -description: description591 -userPassword: password591 -mail: uid591 -uidnumber: 591 -gidnumber: 591 -homeDirectory: /home/uid591 - -dn: cn=user592,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user592 -sn: user592 -uid: uid592 -givenname: givenname592 -description: description592 -userPassword: password592 -mail: uid592 -uidnumber: 592 -gidnumber: 592 -homeDirectory: /home/uid592 - -dn: cn=user593,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user593 -sn: user593 -uid: uid593 -givenname: givenname593 -description: description593 -userPassword: password593 -mail: uid593 -uidnumber: 593 -gidnumber: 593 -homeDirectory: /home/uid593 - -dn: cn=user594,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user594 -sn: user594 -uid: uid594 -givenname: givenname594 -description: description594 -userPassword: password594 -mail: uid594 -uidnumber: 594 -gidnumber: 594 -homeDirectory: /home/uid594 - -dn: cn=user595,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user595 -sn: user595 -uid: uid595 -givenname: givenname595 -description: description595 -userPassword: password595 -mail: uid595 -uidnumber: 595 -gidnumber: 595 -homeDirectory: /home/uid595 - -dn: cn=user596,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user596 -sn: user596 -uid: uid596 -givenname: givenname596 -description: description596 -userPassword: password596 -mail: uid596 -uidnumber: 596 -gidnumber: 596 -homeDirectory: /home/uid596 - -dn: cn=user597,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user597 -sn: user597 -uid: uid597 -givenname: givenname597 -description: description597 -userPassword: password597 -mail: uid597 -uidnumber: 597 -gidnumber: 597 -homeDirectory: /home/uid597 - -dn: cn=user598,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user598 -sn: user598 -uid: uid598 -givenname: givenname598 -description: description598 -userPassword: password598 -mail: uid598 -uidnumber: 598 -gidnumber: 598 -homeDirectory: /home/uid598 - -dn: cn=user599,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user599 -sn: user599 -uid: uid599 -givenname: givenname599 -description: description599 -userPassword: password599 -mail: uid599 -uidnumber: 599 -gidnumber: 599 -homeDirectory: /home/uid599 - -dn: cn=user600,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user600 -sn: user600 -uid: uid600 -givenname: givenname600 -description: description600 -userPassword: password600 -mail: uid600 -uidnumber: 600 -gidnumber: 600 -homeDirectory: /home/uid600 - -dn: cn=user601,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user601 -sn: user601 -uid: uid601 -givenname: givenname601 -description: description601 -userPassword: password601 -mail: uid601 -uidnumber: 601 -gidnumber: 601 -homeDirectory: /home/uid601 - -dn: cn=user602,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user602 -sn: user602 -uid: uid602 -givenname: givenname602 -description: description602 -userPassword: password602 -mail: uid602 -uidnumber: 602 -gidnumber: 602 -homeDirectory: /home/uid602 - -dn: cn=user603,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user603 -sn: user603 -uid: uid603 -givenname: givenname603 -description: description603 -userPassword: password603 -mail: uid603 -uidnumber: 603 -gidnumber: 603 -homeDirectory: /home/uid603 - -dn: cn=user604,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user604 -sn: user604 -uid: uid604 -givenname: givenname604 -description: description604 -userPassword: password604 -mail: uid604 -uidnumber: 604 -gidnumber: 604 -homeDirectory: /home/uid604 - -dn: cn=user605,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user605 -sn: user605 -uid: uid605 -givenname: givenname605 -description: description605 -userPassword: password605 -mail: uid605 -uidnumber: 605 -gidnumber: 605 -homeDirectory: /home/uid605 - -dn: cn=user606,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user606 -sn: user606 -uid: uid606 -givenname: givenname606 -description: description606 -userPassword: password606 -mail: uid606 -uidnumber: 606 -gidnumber: 606 -homeDirectory: /home/uid606 - -dn: cn=user607,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user607 -sn: user607 -uid: uid607 -givenname: givenname607 -description: description607 -userPassword: password607 -mail: uid607 -uidnumber: 607 -gidnumber: 607 -homeDirectory: /home/uid607 - -dn: cn=user608,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user608 -sn: user608 -uid: uid608 -givenname: givenname608 -description: description608 -userPassword: password608 -mail: uid608 -uidnumber: 608 -gidnumber: 608 -homeDirectory: /home/uid608 - -dn: cn=user609,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user609 -sn: user609 -uid: uid609 -givenname: givenname609 -description: description609 -userPassword: password609 -mail: uid609 -uidnumber: 609 -gidnumber: 609 -homeDirectory: /home/uid609 - -dn: cn=user610,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user610 -sn: user610 -uid: uid610 -givenname: givenname610 -description: description610 -userPassword: password610 -mail: uid610 -uidnumber: 610 -gidnumber: 610 -homeDirectory: /home/uid610 - -dn: cn=user611,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user611 -sn: user611 -uid: uid611 -givenname: givenname611 -description: description611 -userPassword: password611 -mail: uid611 -uidnumber: 611 -gidnumber: 611 -homeDirectory: /home/uid611 - -dn: cn=user612,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user612 -sn: user612 -uid: uid612 -givenname: givenname612 -description: description612 -userPassword: password612 -mail: uid612 -uidnumber: 612 -gidnumber: 612 -homeDirectory: /home/uid612 - -dn: cn=user613,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user613 -sn: user613 -uid: uid613 -givenname: givenname613 -description: description613 -userPassword: password613 -mail: uid613 -uidnumber: 613 -gidnumber: 613 -homeDirectory: /home/uid613 - -dn: cn=user614,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user614 -sn: user614 -uid: uid614 -givenname: givenname614 -description: description614 -userPassword: password614 -mail: uid614 -uidnumber: 614 -gidnumber: 614 -homeDirectory: /home/uid614 - -dn: cn=user615,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user615 -sn: user615 -uid: uid615 -givenname: givenname615 -description: description615 -userPassword: password615 -mail: uid615 -uidnumber: 615 -gidnumber: 615 -homeDirectory: /home/uid615 - -dn: cn=user616,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user616 -sn: user616 -uid: uid616 -givenname: givenname616 -description: description616 -userPassword: password616 -mail: uid616 -uidnumber: 616 -gidnumber: 616 -homeDirectory: /home/uid616 - -dn: cn=user617,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user617 -sn: user617 -uid: uid617 -givenname: givenname617 -description: description617 -userPassword: password617 -mail: uid617 -uidnumber: 617 -gidnumber: 617 -homeDirectory: /home/uid617 - -dn: cn=user618,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user618 -sn: user618 -uid: uid618 -givenname: givenname618 -description: description618 -userPassword: password618 -mail: uid618 -uidnumber: 618 -gidnumber: 618 -homeDirectory: /home/uid618 - -dn: cn=user619,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user619 -sn: user619 -uid: uid619 -givenname: givenname619 -description: description619 -userPassword: password619 -mail: uid619 -uidnumber: 619 -gidnumber: 619 -homeDirectory: /home/uid619 - -dn: cn=user620,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user620 -sn: user620 -uid: uid620 -givenname: givenname620 -description: description620 -userPassword: password620 -mail: uid620 -uidnumber: 620 -gidnumber: 620 -homeDirectory: /home/uid620 - -dn: cn=user621,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user621 -sn: user621 -uid: uid621 -givenname: givenname621 -description: description621 -userPassword: password621 -mail: uid621 -uidnumber: 621 -gidnumber: 621 -homeDirectory: /home/uid621 - -dn: cn=user622,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user622 -sn: user622 -uid: uid622 -givenname: givenname622 -description: description622 -userPassword: password622 -mail: uid622 -uidnumber: 622 -gidnumber: 622 -homeDirectory: /home/uid622 - -dn: cn=user623,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user623 -sn: user623 -uid: uid623 -givenname: givenname623 -description: description623 -userPassword: password623 -mail: uid623 -uidnumber: 623 -gidnumber: 623 -homeDirectory: /home/uid623 - -dn: cn=user624,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user624 -sn: user624 -uid: uid624 -givenname: givenname624 -description: description624 -userPassword: password624 -mail: uid624 -uidnumber: 624 -gidnumber: 624 -homeDirectory: /home/uid624 - -dn: cn=user625,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user625 -sn: user625 -uid: uid625 -givenname: givenname625 -description: description625 -userPassword: password625 -mail: uid625 -uidnumber: 625 -gidnumber: 625 -homeDirectory: /home/uid625 - -dn: cn=user626,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user626 -sn: user626 -uid: uid626 -givenname: givenname626 -description: description626 -userPassword: password626 -mail: uid626 -uidnumber: 626 -gidnumber: 626 -homeDirectory: /home/uid626 - -dn: cn=user627,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user627 -sn: user627 -uid: uid627 -givenname: givenname627 -description: description627 -userPassword: password627 -mail: uid627 -uidnumber: 627 -gidnumber: 627 -homeDirectory: /home/uid627 - -dn: cn=user628,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user628 -sn: user628 -uid: uid628 -givenname: givenname628 -description: description628 -userPassword: password628 -mail: uid628 -uidnumber: 628 -gidnumber: 628 -homeDirectory: /home/uid628 - -dn: cn=user629,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user629 -sn: user629 -uid: uid629 -givenname: givenname629 -description: description629 -userPassword: password629 -mail: uid629 -uidnumber: 629 -gidnumber: 629 -homeDirectory: /home/uid629 - -dn: cn=user630,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user630 -sn: user630 -uid: uid630 -givenname: givenname630 -description: description630 -userPassword: password630 -mail: uid630 -uidnumber: 630 -gidnumber: 630 -homeDirectory: /home/uid630 - -dn: cn=user631,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user631 -sn: user631 -uid: uid631 -givenname: givenname631 -description: description631 -userPassword: password631 -mail: uid631 -uidnumber: 631 -gidnumber: 631 -homeDirectory: /home/uid631 - -dn: cn=user632,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user632 -sn: user632 -uid: uid632 -givenname: givenname632 -description: description632 -userPassword: password632 -mail: uid632 -uidnumber: 632 -gidnumber: 632 -homeDirectory: /home/uid632 - -dn: cn=user633,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user633 -sn: user633 -uid: uid633 -givenname: givenname633 -description: description633 -userPassword: password633 -mail: uid633 -uidnumber: 633 -gidnumber: 633 -homeDirectory: /home/uid633 - -dn: cn=user634,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user634 -sn: user634 -uid: uid634 -givenname: givenname634 -description: description634 -userPassword: password634 -mail: uid634 -uidnumber: 634 -gidnumber: 634 -homeDirectory: /home/uid634 - -dn: cn=user635,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user635 -sn: user635 -uid: uid635 -givenname: givenname635 -description: description635 -userPassword: password635 -mail: uid635 -uidnumber: 635 -gidnumber: 635 -homeDirectory: /home/uid635 - -dn: cn=user636,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user636 -sn: user636 -uid: uid636 -givenname: givenname636 -description: description636 -userPassword: password636 -mail: uid636 -uidnumber: 636 -gidnumber: 636 -homeDirectory: /home/uid636 - -dn: cn=user637,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user637 -sn: user637 -uid: uid637 -givenname: givenname637 -description: description637 -userPassword: password637 -mail: uid637 -uidnumber: 637 -gidnumber: 637 -homeDirectory: /home/uid637 - -dn: cn=user638,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user638 -sn: user638 -uid: uid638 -givenname: givenname638 -description: description638 -userPassword: password638 -mail: uid638 -uidnumber: 638 -gidnumber: 638 -homeDirectory: /home/uid638 - -dn: cn=user639,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user639 -sn: user639 -uid: uid639 -givenname: givenname639 -description: description639 -userPassword: password639 -mail: uid639 -uidnumber: 639 -gidnumber: 639 -homeDirectory: /home/uid639 - -dn: cn=user640,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user640 -sn: user640 -uid: uid640 -givenname: givenname640 -description: description640 -userPassword: password640 -mail: uid640 -uidnumber: 640 -gidnumber: 640 -homeDirectory: /home/uid640 - -dn: cn=user641,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user641 -sn: user641 -uid: uid641 -givenname: givenname641 -description: description641 -userPassword: password641 -mail: uid641 -uidnumber: 641 -gidnumber: 641 -homeDirectory: /home/uid641 - -dn: cn=user642,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user642 -sn: user642 -uid: uid642 -givenname: givenname642 -description: description642 -userPassword: password642 -mail: uid642 -uidnumber: 642 -gidnumber: 642 -homeDirectory: /home/uid642 - -dn: cn=user643,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user643 -sn: user643 -uid: uid643 -givenname: givenname643 -description: description643 -userPassword: password643 -mail: uid643 -uidnumber: 643 -gidnumber: 643 -homeDirectory: /home/uid643 - -dn: cn=user644,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user644 -sn: user644 -uid: uid644 -givenname: givenname644 -description: description644 -userPassword: password644 -mail: uid644 -uidnumber: 644 -gidnumber: 644 -homeDirectory: /home/uid644 - -dn: cn=user645,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user645 -sn: user645 -uid: uid645 -givenname: givenname645 -description: description645 -userPassword: password645 -mail: uid645 -uidnumber: 645 -gidnumber: 645 -homeDirectory: /home/uid645 - -dn: cn=user646,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user646 -sn: user646 -uid: uid646 -givenname: givenname646 -description: description646 -userPassword: password646 -mail: uid646 -uidnumber: 646 -gidnumber: 646 -homeDirectory: /home/uid646 - -dn: cn=user647,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user647 -sn: user647 -uid: uid647 -givenname: givenname647 -description: description647 -userPassword: password647 -mail: uid647 -uidnumber: 647 -gidnumber: 647 -homeDirectory: /home/uid647 - -dn: cn=user648,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user648 -sn: user648 -uid: uid648 -givenname: givenname648 -description: description648 -userPassword: password648 -mail: uid648 -uidnumber: 648 -gidnumber: 648 -homeDirectory: /home/uid648 - -dn: cn=user649,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user649 -sn: user649 -uid: uid649 -givenname: givenname649 -description: description649 -userPassword: password649 -mail: uid649 -uidnumber: 649 -gidnumber: 649 -homeDirectory: /home/uid649 - -dn: cn=user650,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user650 -sn: user650 -uid: uid650 -givenname: givenname650 -description: description650 -userPassword: password650 -mail: uid650 -uidnumber: 650 -gidnumber: 650 -homeDirectory: /home/uid650 - -dn: cn=user651,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user651 -sn: user651 -uid: uid651 -givenname: givenname651 -description: description651 -userPassword: password651 -mail: uid651 -uidnumber: 651 -gidnumber: 651 -homeDirectory: /home/uid651 - -dn: cn=user652,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user652 -sn: user652 -uid: uid652 -givenname: givenname652 -description: description652 -userPassword: password652 -mail: uid652 -uidnumber: 652 -gidnumber: 652 -homeDirectory: /home/uid652 - -dn: cn=user653,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user653 -sn: user653 -uid: uid653 -givenname: givenname653 -description: description653 -userPassword: password653 -mail: uid653 -uidnumber: 653 -gidnumber: 653 -homeDirectory: /home/uid653 - -dn: cn=user654,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user654 -sn: user654 -uid: uid654 -givenname: givenname654 -description: description654 -userPassword: password654 -mail: uid654 -uidnumber: 654 -gidnumber: 654 -homeDirectory: /home/uid654 - -dn: cn=user655,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user655 -sn: user655 -uid: uid655 -givenname: givenname655 -description: description655 -userPassword: password655 -mail: uid655 -uidnumber: 655 -gidnumber: 655 -homeDirectory: /home/uid655 - -dn: cn=user656,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user656 -sn: user656 -uid: uid656 -givenname: givenname656 -description: description656 -userPassword: password656 -mail: uid656 -uidnumber: 656 -gidnumber: 656 -homeDirectory: /home/uid656 - -dn: cn=user657,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user657 -sn: user657 -uid: uid657 -givenname: givenname657 -description: description657 -userPassword: password657 -mail: uid657 -uidnumber: 657 -gidnumber: 657 -homeDirectory: /home/uid657 - -dn: cn=user658,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user658 -sn: user658 -uid: uid658 -givenname: givenname658 -description: description658 -userPassword: password658 -mail: uid658 -uidnumber: 658 -gidnumber: 658 -homeDirectory: /home/uid658 - -dn: cn=user659,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user659 -sn: user659 -uid: uid659 -givenname: givenname659 -description: description659 -userPassword: password659 -mail: uid659 -uidnumber: 659 -gidnumber: 659 -homeDirectory: /home/uid659 - -dn: cn=user660,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user660 -sn: user660 -uid: uid660 -givenname: givenname660 -description: description660 -userPassword: password660 -mail: uid660 -uidnumber: 660 -gidnumber: 660 -homeDirectory: /home/uid660 - -dn: cn=user661,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user661 -sn: user661 -uid: uid661 -givenname: givenname661 -description: description661 -userPassword: password661 -mail: uid661 -uidnumber: 661 -gidnumber: 661 -homeDirectory: /home/uid661 - -dn: cn=user662,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user662 -sn: user662 -uid: uid662 -givenname: givenname662 -description: description662 -userPassword: password662 -mail: uid662 -uidnumber: 662 -gidnumber: 662 -homeDirectory: /home/uid662 - -dn: cn=user663,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user663 -sn: user663 -uid: uid663 -givenname: givenname663 -description: description663 -userPassword: password663 -mail: uid663 -uidnumber: 663 -gidnumber: 663 -homeDirectory: /home/uid663 - -dn: cn=user664,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user664 -sn: user664 -uid: uid664 -givenname: givenname664 -description: description664 -userPassword: password664 -mail: uid664 -uidnumber: 664 -gidnumber: 664 -homeDirectory: /home/uid664 - -dn: cn=user665,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user665 -sn: user665 -uid: uid665 -givenname: givenname665 -description: description665 -userPassword: password665 -mail: uid665 -uidnumber: 665 -gidnumber: 665 -homeDirectory: /home/uid665 - -dn: cn=user666,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user666 -sn: user666 -uid: uid666 -givenname: givenname666 -description: description666 -userPassword: password666 -mail: uid666 -uidnumber: 666 -gidnumber: 666 -homeDirectory: /home/uid666 - -dn: cn=user667,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user667 -sn: user667 -uid: uid667 -givenname: givenname667 -description: description667 -userPassword: password667 -mail: uid667 -uidnumber: 667 -gidnumber: 667 -homeDirectory: /home/uid667 - -dn: cn=user668,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user668 -sn: user668 -uid: uid668 -givenname: givenname668 -description: description668 -userPassword: password668 -mail: uid668 -uidnumber: 668 -gidnumber: 668 -homeDirectory: /home/uid668 - -dn: cn=user669,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user669 -sn: user669 -uid: uid669 -givenname: givenname669 -description: description669 -userPassword: password669 -mail: uid669 -uidnumber: 669 -gidnumber: 669 -homeDirectory: /home/uid669 - -dn: cn=user670,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user670 -sn: user670 -uid: uid670 -givenname: givenname670 -description: description670 -userPassword: password670 -mail: uid670 -uidnumber: 670 -gidnumber: 670 -homeDirectory: /home/uid670 - -dn: cn=user671,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user671 -sn: user671 -uid: uid671 -givenname: givenname671 -description: description671 -userPassword: password671 -mail: uid671 -uidnumber: 671 -gidnumber: 671 -homeDirectory: /home/uid671 - -dn: cn=user672,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user672 -sn: user672 -uid: uid672 -givenname: givenname672 -description: description672 -userPassword: password672 -mail: uid672 -uidnumber: 672 -gidnumber: 672 -homeDirectory: /home/uid672 - -dn: cn=user673,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user673 -sn: user673 -uid: uid673 -givenname: givenname673 -description: description673 -userPassword: password673 -mail: uid673 -uidnumber: 673 -gidnumber: 673 -homeDirectory: /home/uid673 - -dn: cn=user674,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user674 -sn: user674 -uid: uid674 -givenname: givenname674 -description: description674 -userPassword: password674 -mail: uid674 -uidnumber: 674 -gidnumber: 674 -homeDirectory: /home/uid674 - -dn: cn=user675,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user675 -sn: user675 -uid: uid675 -givenname: givenname675 -description: description675 -userPassword: password675 -mail: uid675 -uidnumber: 675 -gidnumber: 675 -homeDirectory: /home/uid675 - -dn: cn=user676,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user676 -sn: user676 -uid: uid676 -givenname: givenname676 -description: description676 -userPassword: password676 -mail: uid676 -uidnumber: 676 -gidnumber: 676 -homeDirectory: /home/uid676 - -dn: cn=user677,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user677 -sn: user677 -uid: uid677 -givenname: givenname677 -description: description677 -userPassword: password677 -mail: uid677 -uidnumber: 677 -gidnumber: 677 -homeDirectory: /home/uid677 - -dn: cn=user678,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user678 -sn: user678 -uid: uid678 -givenname: givenname678 -description: description678 -userPassword: password678 -mail: uid678 -uidnumber: 678 -gidnumber: 678 -homeDirectory: /home/uid678 - -dn: cn=user679,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user679 -sn: user679 -uid: uid679 -givenname: givenname679 -description: description679 -userPassword: password679 -mail: uid679 -uidnumber: 679 -gidnumber: 679 -homeDirectory: /home/uid679 - -dn: cn=user680,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user680 -sn: user680 -uid: uid680 -givenname: givenname680 -description: description680 -userPassword: password680 -mail: uid680 -uidnumber: 680 -gidnumber: 680 -homeDirectory: /home/uid680 - -dn: cn=user681,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user681 -sn: user681 -uid: uid681 -givenname: givenname681 -description: description681 -userPassword: password681 -mail: uid681 -uidnumber: 681 -gidnumber: 681 -homeDirectory: /home/uid681 - -dn: cn=user682,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user682 -sn: user682 -uid: uid682 -givenname: givenname682 -description: description682 -userPassword: password682 -mail: uid682 -uidnumber: 682 -gidnumber: 682 -homeDirectory: /home/uid682 - -dn: cn=user683,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user683 -sn: user683 -uid: uid683 -givenname: givenname683 -description: description683 -userPassword: password683 -mail: uid683 -uidnumber: 683 -gidnumber: 683 -homeDirectory: /home/uid683 - -dn: cn=user684,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user684 -sn: user684 -uid: uid684 -givenname: givenname684 -description: description684 -userPassword: password684 -mail: uid684 -uidnumber: 684 -gidnumber: 684 -homeDirectory: /home/uid684 - -dn: cn=user685,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user685 -sn: user685 -uid: uid685 -givenname: givenname685 -description: description685 -userPassword: password685 -mail: uid685 -uidnumber: 685 -gidnumber: 685 -homeDirectory: /home/uid685 - -dn: cn=user686,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user686 -sn: user686 -uid: uid686 -givenname: givenname686 -description: description686 -userPassword: password686 -mail: uid686 -uidnumber: 686 -gidnumber: 686 -homeDirectory: /home/uid686 - -dn: cn=user687,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user687 -sn: user687 -uid: uid687 -givenname: givenname687 -description: description687 -userPassword: password687 -mail: uid687 -uidnumber: 687 -gidnumber: 687 -homeDirectory: /home/uid687 - -dn: cn=user688,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user688 -sn: user688 -uid: uid688 -givenname: givenname688 -description: description688 -userPassword: password688 -mail: uid688 -uidnumber: 688 -gidnumber: 688 -homeDirectory: /home/uid688 - -dn: cn=user689,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user689 -sn: user689 -uid: uid689 -givenname: givenname689 -description: description689 -userPassword: password689 -mail: uid689 -uidnumber: 689 -gidnumber: 689 -homeDirectory: /home/uid689 - -dn: cn=user690,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user690 -sn: user690 -uid: uid690 -givenname: givenname690 -description: description690 -userPassword: password690 -mail: uid690 -uidnumber: 690 -gidnumber: 690 -homeDirectory: /home/uid690 - -dn: cn=user691,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user691 -sn: user691 -uid: uid691 -givenname: givenname691 -description: description691 -userPassword: password691 -mail: uid691 -uidnumber: 691 -gidnumber: 691 -homeDirectory: /home/uid691 - -dn: cn=user692,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user692 -sn: user692 -uid: uid692 -givenname: givenname692 -description: description692 -userPassword: password692 -mail: uid692 -uidnumber: 692 -gidnumber: 692 -homeDirectory: /home/uid692 - -dn: cn=user693,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user693 -sn: user693 -uid: uid693 -givenname: givenname693 -description: description693 -userPassword: password693 -mail: uid693 -uidnumber: 693 -gidnumber: 693 -homeDirectory: /home/uid693 - -dn: cn=user694,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user694 -sn: user694 -uid: uid694 -givenname: givenname694 -description: description694 -userPassword: password694 -mail: uid694 -uidnumber: 694 -gidnumber: 694 -homeDirectory: /home/uid694 - -dn: cn=user695,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user695 -sn: user695 -uid: uid695 -givenname: givenname695 -description: description695 -userPassword: password695 -mail: uid695 -uidnumber: 695 -gidnumber: 695 -homeDirectory: /home/uid695 - -dn: cn=user696,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user696 -sn: user696 -uid: uid696 -givenname: givenname696 -description: description696 -userPassword: password696 -mail: uid696 -uidnumber: 696 -gidnumber: 696 -homeDirectory: /home/uid696 - -dn: cn=user697,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user697 -sn: user697 -uid: uid697 -givenname: givenname697 -description: description697 -userPassword: password697 -mail: uid697 -uidnumber: 697 -gidnumber: 697 -homeDirectory: /home/uid697 - -dn: cn=user698,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user698 -sn: user698 -uid: uid698 -givenname: givenname698 -description: description698 -userPassword: password698 -mail: uid698 -uidnumber: 698 -gidnumber: 698 -homeDirectory: /home/uid698 - -dn: cn=user699,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user699 -sn: user699 -uid: uid699 -givenname: givenname699 -description: description699 -userPassword: password699 -mail: uid699 -uidnumber: 699 -gidnumber: 699 -homeDirectory: /home/uid699 - -dn: cn=user700,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user700 -sn: user700 -uid: uid700 -givenname: givenname700 -description: description700 -userPassword: password700 -mail: uid700 -uidnumber: 700 -gidnumber: 700 -homeDirectory: /home/uid700 - -dn: cn=user701,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user701 -sn: user701 -uid: uid701 -givenname: givenname701 -description: description701 -userPassword: password701 -mail: uid701 -uidnumber: 701 -gidnumber: 701 -homeDirectory: /home/uid701 - -dn: cn=user702,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user702 -sn: user702 -uid: uid702 -givenname: givenname702 -description: description702 -userPassword: password702 -mail: uid702 -uidnumber: 702 -gidnumber: 702 -homeDirectory: /home/uid702 - -dn: cn=user703,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user703 -sn: user703 -uid: uid703 -givenname: givenname703 -description: description703 -userPassword: password703 -mail: uid703 -uidnumber: 703 -gidnumber: 703 -homeDirectory: /home/uid703 - -dn: cn=user704,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user704 -sn: user704 -uid: uid704 -givenname: givenname704 -description: description704 -userPassword: password704 -mail: uid704 -uidnumber: 704 -gidnumber: 704 -homeDirectory: /home/uid704 - -dn: cn=user705,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user705 -sn: user705 -uid: uid705 -givenname: givenname705 -description: description705 -userPassword: password705 -mail: uid705 -uidnumber: 705 -gidnumber: 705 -homeDirectory: /home/uid705 - -dn: cn=user706,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user706 -sn: user706 -uid: uid706 -givenname: givenname706 -description: description706 -userPassword: password706 -mail: uid706 -uidnumber: 706 -gidnumber: 706 -homeDirectory: /home/uid706 - -dn: cn=user707,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user707 -sn: user707 -uid: uid707 -givenname: givenname707 -description: description707 -userPassword: password707 -mail: uid707 -uidnumber: 707 -gidnumber: 707 -homeDirectory: /home/uid707 - -dn: cn=user708,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user708 -sn: user708 -uid: uid708 -givenname: givenname708 -description: description708 -userPassword: password708 -mail: uid708 -uidnumber: 708 -gidnumber: 708 -homeDirectory: /home/uid708 - -dn: cn=user709,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user709 -sn: user709 -uid: uid709 -givenname: givenname709 -description: description709 -userPassword: password709 -mail: uid709 -uidnumber: 709 -gidnumber: 709 -homeDirectory: /home/uid709 - -dn: cn=user710,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user710 -sn: user710 -uid: uid710 -givenname: givenname710 -description: description710 -userPassword: password710 -mail: uid710 -uidnumber: 710 -gidnumber: 710 -homeDirectory: /home/uid710 - -dn: cn=user711,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user711 -sn: user711 -uid: uid711 -givenname: givenname711 -description: description711 -userPassword: password711 -mail: uid711 -uidnumber: 711 -gidnumber: 711 -homeDirectory: /home/uid711 - -dn: cn=user712,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user712 -sn: user712 -uid: uid712 -givenname: givenname712 -description: description712 -userPassword: password712 -mail: uid712 -uidnumber: 712 -gidnumber: 712 -homeDirectory: /home/uid712 - -dn: cn=user713,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user713 -sn: user713 -uid: uid713 -givenname: givenname713 -description: description713 -userPassword: password713 -mail: uid713 -uidnumber: 713 -gidnumber: 713 -homeDirectory: /home/uid713 - -dn: cn=user714,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user714 -sn: user714 -uid: uid714 -givenname: givenname714 -description: description714 -userPassword: password714 -mail: uid714 -uidnumber: 714 -gidnumber: 714 -homeDirectory: /home/uid714 - -dn: cn=user715,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user715 -sn: user715 -uid: uid715 -givenname: givenname715 -description: description715 -userPassword: password715 -mail: uid715 -uidnumber: 715 -gidnumber: 715 -homeDirectory: /home/uid715 - -dn: cn=user716,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user716 -sn: user716 -uid: uid716 -givenname: givenname716 -description: description716 -userPassword: password716 -mail: uid716 -uidnumber: 716 -gidnumber: 716 -homeDirectory: /home/uid716 - -dn: cn=user717,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user717 -sn: user717 -uid: uid717 -givenname: givenname717 -description: description717 -userPassword: password717 -mail: uid717 -uidnumber: 717 -gidnumber: 717 -homeDirectory: /home/uid717 - -dn: cn=user718,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user718 -sn: user718 -uid: uid718 -givenname: givenname718 -description: description718 -userPassword: password718 -mail: uid718 -uidnumber: 718 -gidnumber: 718 -homeDirectory: /home/uid718 - -dn: cn=user719,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user719 -sn: user719 -uid: uid719 -givenname: givenname719 -description: description719 -userPassword: password719 -mail: uid719 -uidnumber: 719 -gidnumber: 719 -homeDirectory: /home/uid719 - -dn: cn=user720,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user720 -sn: user720 -uid: uid720 -givenname: givenname720 -description: description720 -userPassword: password720 -mail: uid720 -uidnumber: 720 -gidnumber: 720 -homeDirectory: /home/uid720 - -dn: cn=user721,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user721 -sn: user721 -uid: uid721 -givenname: givenname721 -description: description721 -userPassword: password721 -mail: uid721 -uidnumber: 721 -gidnumber: 721 -homeDirectory: /home/uid721 - -dn: cn=user722,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user722 -sn: user722 -uid: uid722 -givenname: givenname722 -description: description722 -userPassword: password722 -mail: uid722 -uidnumber: 722 -gidnumber: 722 -homeDirectory: /home/uid722 - -dn: cn=user723,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user723 -sn: user723 -uid: uid723 -givenname: givenname723 -description: description723 -userPassword: password723 -mail: uid723 -uidnumber: 723 -gidnumber: 723 -homeDirectory: /home/uid723 - -dn: cn=user724,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user724 -sn: user724 -uid: uid724 -givenname: givenname724 -description: description724 -userPassword: password724 -mail: uid724 -uidnumber: 724 -gidnumber: 724 -homeDirectory: /home/uid724 - -dn: cn=user725,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user725 -sn: user725 -uid: uid725 -givenname: givenname725 -description: description725 -userPassword: password725 -mail: uid725 -uidnumber: 725 -gidnumber: 725 -homeDirectory: /home/uid725 - -dn: cn=user726,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user726 -sn: user726 -uid: uid726 -givenname: givenname726 -description: description726 -userPassword: password726 -mail: uid726 -uidnumber: 726 -gidnumber: 726 -homeDirectory: /home/uid726 - -dn: cn=user727,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user727 -sn: user727 -uid: uid727 -givenname: givenname727 -description: description727 -userPassword: password727 -mail: uid727 -uidnumber: 727 -gidnumber: 727 -homeDirectory: /home/uid727 - -dn: cn=user728,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user728 -sn: user728 -uid: uid728 -givenname: givenname728 -description: description728 -userPassword: password728 -mail: uid728 -uidnumber: 728 -gidnumber: 728 -homeDirectory: /home/uid728 - -dn: cn=user729,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user729 -sn: user729 -uid: uid729 -givenname: givenname729 -description: description729 -userPassword: password729 -mail: uid729 -uidnumber: 729 -gidnumber: 729 -homeDirectory: /home/uid729 - -dn: cn=user730,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user730 -sn: user730 -uid: uid730 -givenname: givenname730 -description: description730 -userPassword: password730 -mail: uid730 -uidnumber: 730 -gidnumber: 730 -homeDirectory: /home/uid730 - -dn: cn=user731,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user731 -sn: user731 -uid: uid731 -givenname: givenname731 -description: description731 -userPassword: password731 -mail: uid731 -uidnumber: 731 -gidnumber: 731 -homeDirectory: /home/uid731 - -dn: cn=user732,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user732 -sn: user732 -uid: uid732 -givenname: givenname732 -description: description732 -userPassword: password732 -mail: uid732 -uidnumber: 732 -gidnumber: 732 -homeDirectory: /home/uid732 - -dn: cn=user733,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user733 -sn: user733 -uid: uid733 -givenname: givenname733 -description: description733 -userPassword: password733 -mail: uid733 -uidnumber: 733 -gidnumber: 733 -homeDirectory: /home/uid733 - -dn: cn=user734,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user734 -sn: user734 -uid: uid734 -givenname: givenname734 -description: description734 -userPassword: password734 -mail: uid734 -uidnumber: 734 -gidnumber: 734 -homeDirectory: /home/uid734 - -dn: cn=user735,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user735 -sn: user735 -uid: uid735 -givenname: givenname735 -description: description735 -userPassword: password735 -mail: uid735 -uidnumber: 735 -gidnumber: 735 -homeDirectory: /home/uid735 - -dn: cn=user736,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user736 -sn: user736 -uid: uid736 -givenname: givenname736 -description: description736 -userPassword: password736 -mail: uid736 -uidnumber: 736 -gidnumber: 736 -homeDirectory: /home/uid736 - -dn: cn=user737,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user737 -sn: user737 -uid: uid737 -givenname: givenname737 -description: description737 -userPassword: password737 -mail: uid737 -uidnumber: 737 -gidnumber: 737 -homeDirectory: /home/uid737 - -dn: cn=user738,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user738 -sn: user738 -uid: uid738 -givenname: givenname738 -description: description738 -userPassword: password738 -mail: uid738 -uidnumber: 738 -gidnumber: 738 -homeDirectory: /home/uid738 - -dn: cn=user739,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user739 -sn: user739 -uid: uid739 -givenname: givenname739 -description: description739 -userPassword: password739 -mail: uid739 -uidnumber: 739 -gidnumber: 739 -homeDirectory: /home/uid739 - -dn: cn=user740,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user740 -sn: user740 -uid: uid740 -givenname: givenname740 -description: description740 -userPassword: password740 -mail: uid740 -uidnumber: 740 -gidnumber: 740 -homeDirectory: /home/uid740 - -dn: cn=user741,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user741 -sn: user741 -uid: uid741 -givenname: givenname741 -description: description741 -userPassword: password741 -mail: uid741 -uidnumber: 741 -gidnumber: 741 -homeDirectory: /home/uid741 - -dn: cn=user742,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user742 -sn: user742 -uid: uid742 -givenname: givenname742 -description: description742 -userPassword: password742 -mail: uid742 -uidnumber: 742 -gidnumber: 742 -homeDirectory: /home/uid742 - -dn: cn=user743,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user743 -sn: user743 -uid: uid743 -givenname: givenname743 -description: description743 -userPassword: password743 -mail: uid743 -uidnumber: 743 -gidnumber: 743 -homeDirectory: /home/uid743 - -dn: cn=user744,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user744 -sn: user744 -uid: uid744 -givenname: givenname744 -description: description744 -userPassword: password744 -mail: uid744 -uidnumber: 744 -gidnumber: 744 -homeDirectory: /home/uid744 - -dn: cn=user745,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user745 -sn: user745 -uid: uid745 -givenname: givenname745 -description: description745 -userPassword: password745 -mail: uid745 -uidnumber: 745 -gidnumber: 745 -homeDirectory: /home/uid745 - -dn: cn=user746,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user746 -sn: user746 -uid: uid746 -givenname: givenname746 -description: description746 -userPassword: password746 -mail: uid746 -uidnumber: 746 -gidnumber: 746 -homeDirectory: /home/uid746 - -dn: cn=user747,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user747 -sn: user747 -uid: uid747 -givenname: givenname747 -description: description747 -userPassword: password747 -mail: uid747 -uidnumber: 747 -gidnumber: 747 -homeDirectory: /home/uid747 - -dn: cn=user748,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user748 -sn: user748 -uid: uid748 -givenname: givenname748 -description: description748 -userPassword: password748 -mail: uid748 -uidnumber: 748 -gidnumber: 748 -homeDirectory: /home/uid748 - -dn: cn=user749,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user749 -sn: user749 -uid: uid749 -givenname: givenname749 -description: description749 -userPassword: password749 -mail: uid749 -uidnumber: 749 -gidnumber: 749 -homeDirectory: /home/uid749 - -dn: cn=user750,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user750 -sn: user750 -uid: uid750 -givenname: givenname750 -description: description750 -userPassword: password750 -mail: uid750 -uidnumber: 750 -gidnumber: 750 -homeDirectory: /home/uid750 - -dn: cn=user751,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user751 -sn: user751 -uid: uid751 -givenname: givenname751 -description: description751 -userPassword: password751 -mail: uid751 -uidnumber: 751 -gidnumber: 751 -homeDirectory: /home/uid751 - -dn: cn=user752,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user752 -sn: user752 -uid: uid752 -givenname: givenname752 -description: description752 -userPassword: password752 -mail: uid752 -uidnumber: 752 -gidnumber: 752 -homeDirectory: /home/uid752 - -dn: cn=user753,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user753 -sn: user753 -uid: uid753 -givenname: givenname753 -description: description753 -userPassword: password753 -mail: uid753 -uidnumber: 753 -gidnumber: 753 -homeDirectory: /home/uid753 - -dn: cn=user754,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user754 -sn: user754 -uid: uid754 -givenname: givenname754 -description: description754 -userPassword: password754 -mail: uid754 -uidnumber: 754 -gidnumber: 754 -homeDirectory: /home/uid754 - -dn: cn=user755,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user755 -sn: user755 -uid: uid755 -givenname: givenname755 -description: description755 -userPassword: password755 -mail: uid755 -uidnumber: 755 -gidnumber: 755 -homeDirectory: /home/uid755 - -dn: cn=user756,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user756 -sn: user756 -uid: uid756 -givenname: givenname756 -description: description756 -userPassword: password756 -mail: uid756 -uidnumber: 756 -gidnumber: 756 -homeDirectory: /home/uid756 - -dn: cn=user757,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user757 -sn: user757 -uid: uid757 -givenname: givenname757 -description: description757 -userPassword: password757 -mail: uid757 -uidnumber: 757 -gidnumber: 757 -homeDirectory: /home/uid757 - -dn: cn=user758,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user758 -sn: user758 -uid: uid758 -givenname: givenname758 -description: description758 -userPassword: password758 -mail: uid758 -uidnumber: 758 -gidnumber: 758 -homeDirectory: /home/uid758 - -dn: cn=user759,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user759 -sn: user759 -uid: uid759 -givenname: givenname759 -description: description759 -userPassword: password759 -mail: uid759 -uidnumber: 759 -gidnumber: 759 -homeDirectory: /home/uid759 - -dn: cn=user760,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user760 -sn: user760 -uid: uid760 -givenname: givenname760 -description: description760 -userPassword: password760 -mail: uid760 -uidnumber: 760 -gidnumber: 760 -homeDirectory: /home/uid760 - -dn: cn=user761,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user761 -sn: user761 -uid: uid761 -givenname: givenname761 -description: description761 -userPassword: password761 -mail: uid761 -uidnumber: 761 -gidnumber: 761 -homeDirectory: /home/uid761 - -dn: cn=user762,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user762 -sn: user762 -uid: uid762 -givenname: givenname762 -description: description762 -userPassword: password762 -mail: uid762 -uidnumber: 762 -gidnumber: 762 -homeDirectory: /home/uid762 - -dn: cn=user763,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user763 -sn: user763 -uid: uid763 -givenname: givenname763 -description: description763 -userPassword: password763 -mail: uid763 -uidnumber: 763 -gidnumber: 763 -homeDirectory: /home/uid763 - -dn: cn=user764,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user764 -sn: user764 -uid: uid764 -givenname: givenname764 -description: description764 -userPassword: password764 -mail: uid764 -uidnumber: 764 -gidnumber: 764 -homeDirectory: /home/uid764 - -dn: cn=user765,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user765 -sn: user765 -uid: uid765 -givenname: givenname765 -description: description765 -userPassword: password765 -mail: uid765 -uidnumber: 765 -gidnumber: 765 -homeDirectory: /home/uid765 - -dn: cn=user766,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user766 -sn: user766 -uid: uid766 -givenname: givenname766 -description: description766 -userPassword: password766 -mail: uid766 -uidnumber: 766 -gidnumber: 766 -homeDirectory: /home/uid766 - -dn: cn=user767,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user767 -sn: user767 -uid: uid767 -givenname: givenname767 -description: description767 -userPassword: password767 -mail: uid767 -uidnumber: 767 -gidnumber: 767 -homeDirectory: /home/uid767 - -dn: cn=user768,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user768 -sn: user768 -uid: uid768 -givenname: givenname768 -description: description768 -userPassword: password768 -mail: uid768 -uidnumber: 768 -gidnumber: 768 -homeDirectory: /home/uid768 - -dn: cn=user769,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user769 -sn: user769 -uid: uid769 -givenname: givenname769 -description: description769 -userPassword: password769 -mail: uid769 -uidnumber: 769 -gidnumber: 769 -homeDirectory: /home/uid769 - -dn: cn=user770,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user770 -sn: user770 -uid: uid770 -givenname: givenname770 -description: description770 -userPassword: password770 -mail: uid770 -uidnumber: 770 -gidnumber: 770 -homeDirectory: /home/uid770 - -dn: cn=user771,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user771 -sn: user771 -uid: uid771 -givenname: givenname771 -description: description771 -userPassword: password771 -mail: uid771 -uidnumber: 771 -gidnumber: 771 -homeDirectory: /home/uid771 - -dn: cn=user772,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user772 -sn: user772 -uid: uid772 -givenname: givenname772 -description: description772 -userPassword: password772 -mail: uid772 -uidnumber: 772 -gidnumber: 772 -homeDirectory: /home/uid772 - -dn: cn=user773,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user773 -sn: user773 -uid: uid773 -givenname: givenname773 -description: description773 -userPassword: password773 -mail: uid773 -uidnumber: 773 -gidnumber: 773 -homeDirectory: /home/uid773 - -dn: cn=user774,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user774 -sn: user774 -uid: uid774 -givenname: givenname774 -description: description774 -userPassword: password774 -mail: uid774 -uidnumber: 774 -gidnumber: 774 -homeDirectory: /home/uid774 - -dn: cn=user775,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user775 -sn: user775 -uid: uid775 -givenname: givenname775 -description: description775 -userPassword: password775 -mail: uid775 -uidnumber: 775 -gidnumber: 775 -homeDirectory: /home/uid775 - -dn: cn=user776,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user776 -sn: user776 -uid: uid776 -givenname: givenname776 -description: description776 -userPassword: password776 -mail: uid776 -uidnumber: 776 -gidnumber: 776 -homeDirectory: /home/uid776 - -dn: cn=user777,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user777 -sn: user777 -uid: uid777 -givenname: givenname777 -description: description777 -userPassword: password777 -mail: uid777 -uidnumber: 777 -gidnumber: 777 -homeDirectory: /home/uid777 - -dn: cn=user778,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user778 -sn: user778 -uid: uid778 -givenname: givenname778 -description: description778 -userPassword: password778 -mail: uid778 -uidnumber: 778 -gidnumber: 778 -homeDirectory: /home/uid778 - -dn: cn=user779,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user779 -sn: user779 -uid: uid779 -givenname: givenname779 -description: description779 -userPassword: password779 -mail: uid779 -uidnumber: 779 -gidnumber: 779 -homeDirectory: /home/uid779 - -dn: cn=user780,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user780 -sn: user780 -uid: uid780 -givenname: givenname780 -description: description780 -userPassword: password780 -mail: uid780 -uidnumber: 780 -gidnumber: 780 -homeDirectory: /home/uid780 - -dn: cn=user781,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user781 -sn: user781 -uid: uid781 -givenname: givenname781 -description: description781 -userPassword: password781 -mail: uid781 -uidnumber: 781 -gidnumber: 781 -homeDirectory: /home/uid781 - -dn: cn=user782,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user782 -sn: user782 -uid: uid782 -givenname: givenname782 -description: description782 -userPassword: password782 -mail: uid782 -uidnumber: 782 -gidnumber: 782 -homeDirectory: /home/uid782 - -dn: cn=user783,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user783 -sn: user783 -uid: uid783 -givenname: givenname783 -description: description783 -userPassword: password783 -mail: uid783 -uidnumber: 783 -gidnumber: 783 -homeDirectory: /home/uid783 - -dn: cn=user784,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user784 -sn: user784 -uid: uid784 -givenname: givenname784 -description: description784 -userPassword: password784 -mail: uid784 -uidnumber: 784 -gidnumber: 784 -homeDirectory: /home/uid784 - -dn: cn=user785,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user785 -sn: user785 -uid: uid785 -givenname: givenname785 -description: description785 -userPassword: password785 -mail: uid785 -uidnumber: 785 -gidnumber: 785 -homeDirectory: /home/uid785 - -dn: cn=user786,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user786 -sn: user786 -uid: uid786 -givenname: givenname786 -description: description786 -userPassword: password786 -mail: uid786 -uidnumber: 786 -gidnumber: 786 -homeDirectory: /home/uid786 - -dn: cn=user787,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user787 -sn: user787 -uid: uid787 -givenname: givenname787 -description: description787 -userPassword: password787 -mail: uid787 -uidnumber: 787 -gidnumber: 787 -homeDirectory: /home/uid787 - -dn: cn=user788,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user788 -sn: user788 -uid: uid788 -givenname: givenname788 -description: description788 -userPassword: password788 -mail: uid788 -uidnumber: 788 -gidnumber: 788 -homeDirectory: /home/uid788 - -dn: cn=user789,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user789 -sn: user789 -uid: uid789 -givenname: givenname789 -description: description789 -userPassword: password789 -mail: uid789 -uidnumber: 789 -gidnumber: 789 -homeDirectory: /home/uid789 - -dn: cn=user790,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user790 -sn: user790 -uid: uid790 -givenname: givenname790 -description: description790 -userPassword: password790 -mail: uid790 -uidnumber: 790 -gidnumber: 790 -homeDirectory: /home/uid790 - -dn: cn=user791,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user791 -sn: user791 -uid: uid791 -givenname: givenname791 -description: description791 -userPassword: password791 -mail: uid791 -uidnumber: 791 -gidnumber: 791 -homeDirectory: /home/uid791 - -dn: cn=user792,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user792 -sn: user792 -uid: uid792 -givenname: givenname792 -description: description792 -userPassword: password792 -mail: uid792 -uidnumber: 792 -gidnumber: 792 -homeDirectory: /home/uid792 - -dn: cn=user793,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user793 -sn: user793 -uid: uid793 -givenname: givenname793 -description: description793 -userPassword: password793 -mail: uid793 -uidnumber: 793 -gidnumber: 793 -homeDirectory: /home/uid793 - -dn: cn=user794,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user794 -sn: user794 -uid: uid794 -givenname: givenname794 -description: description794 -userPassword: password794 -mail: uid794 -uidnumber: 794 -gidnumber: 794 -homeDirectory: /home/uid794 - -dn: cn=user795,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user795 -sn: user795 -uid: uid795 -givenname: givenname795 -description: description795 -userPassword: password795 -mail: uid795 -uidnumber: 795 -gidnumber: 795 -homeDirectory: /home/uid795 - -dn: cn=user796,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user796 -sn: user796 -uid: uid796 -givenname: givenname796 -description: description796 -userPassword: password796 -mail: uid796 -uidnumber: 796 -gidnumber: 796 -homeDirectory: /home/uid796 - -dn: cn=user797,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user797 -sn: user797 -uid: uid797 -givenname: givenname797 -description: description797 -userPassword: password797 -mail: uid797 -uidnumber: 797 -gidnumber: 797 -homeDirectory: /home/uid797 - -dn: cn=user798,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user798 -sn: user798 -uid: uid798 -givenname: givenname798 -description: description798 -userPassword: password798 -mail: uid798 -uidnumber: 798 -gidnumber: 798 -homeDirectory: /home/uid798 - -dn: cn=user799,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user799 -sn: user799 -uid: uid799 -givenname: givenname799 -description: description799 -userPassword: password799 -mail: uid799 -uidnumber: 799 -gidnumber: 799 -homeDirectory: /home/uid799 - -dn: cn=user800,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user800 -sn: user800 -uid: uid800 -givenname: givenname800 -description: description800 -userPassword: password800 -mail: uid800 -uidnumber: 800 -gidnumber: 800 -homeDirectory: /home/uid800 - -dn: cn=user801,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user801 -sn: user801 -uid: uid801 -givenname: givenname801 -description: description801 -userPassword: password801 -mail: uid801 -uidnumber: 801 -gidnumber: 801 -homeDirectory: /home/uid801 - -dn: cn=user802,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user802 -sn: user802 -uid: uid802 -givenname: givenname802 -description: description802 -userPassword: password802 -mail: uid802 -uidnumber: 802 -gidnumber: 802 -homeDirectory: /home/uid802 - -dn: cn=user803,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user803 -sn: user803 -uid: uid803 -givenname: givenname803 -description: description803 -userPassword: password803 -mail: uid803 -uidnumber: 803 -gidnumber: 803 -homeDirectory: /home/uid803 - -dn: cn=user804,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user804 -sn: user804 -uid: uid804 -givenname: givenname804 -description: description804 -userPassword: password804 -mail: uid804 -uidnumber: 804 -gidnumber: 804 -homeDirectory: /home/uid804 - -dn: cn=user805,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user805 -sn: user805 -uid: uid805 -givenname: givenname805 -description: description805 -userPassword: password805 -mail: uid805 -uidnumber: 805 -gidnumber: 805 -homeDirectory: /home/uid805 - -dn: cn=user806,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user806 -sn: user806 -uid: uid806 -givenname: givenname806 -description: description806 -userPassword: password806 -mail: uid806 -uidnumber: 806 -gidnumber: 806 -homeDirectory: /home/uid806 - -dn: cn=user807,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user807 -sn: user807 -uid: uid807 -givenname: givenname807 -description: description807 -userPassword: password807 -mail: uid807 -uidnumber: 807 -gidnumber: 807 -homeDirectory: /home/uid807 - -dn: cn=user808,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user808 -sn: user808 -uid: uid808 -givenname: givenname808 -description: description808 -userPassword: password808 -mail: uid808 -uidnumber: 808 -gidnumber: 808 -homeDirectory: /home/uid808 - -dn: cn=user809,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user809 -sn: user809 -uid: uid809 -givenname: givenname809 -description: description809 -userPassword: password809 -mail: uid809 -uidnumber: 809 -gidnumber: 809 -homeDirectory: /home/uid809 - -dn: cn=user810,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user810 -sn: user810 -uid: uid810 -givenname: givenname810 -description: description810 -userPassword: password810 -mail: uid810 -uidnumber: 810 -gidnumber: 810 -homeDirectory: /home/uid810 - -dn: cn=user811,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user811 -sn: user811 -uid: uid811 -givenname: givenname811 -description: description811 -userPassword: password811 -mail: uid811 -uidnumber: 811 -gidnumber: 811 -homeDirectory: /home/uid811 - -dn: cn=user812,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user812 -sn: user812 -uid: uid812 -givenname: givenname812 -description: description812 -userPassword: password812 -mail: uid812 -uidnumber: 812 -gidnumber: 812 -homeDirectory: /home/uid812 - -dn: cn=user813,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user813 -sn: user813 -uid: uid813 -givenname: givenname813 -description: description813 -userPassword: password813 -mail: uid813 -uidnumber: 813 -gidnumber: 813 -homeDirectory: /home/uid813 - -dn: cn=user814,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user814 -sn: user814 -uid: uid814 -givenname: givenname814 -description: description814 -userPassword: password814 -mail: uid814 -uidnumber: 814 -gidnumber: 814 -homeDirectory: /home/uid814 - -dn: cn=user815,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user815 -sn: user815 -uid: uid815 -givenname: givenname815 -description: description815 -userPassword: password815 -mail: uid815 -uidnumber: 815 -gidnumber: 815 -homeDirectory: /home/uid815 - -dn: cn=user816,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user816 -sn: user816 -uid: uid816 -givenname: givenname816 -description: description816 -userPassword: password816 -mail: uid816 -uidnumber: 816 -gidnumber: 816 -homeDirectory: /home/uid816 - -dn: cn=user817,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user817 -sn: user817 -uid: uid817 -givenname: givenname817 -description: description817 -userPassword: password817 -mail: uid817 -uidnumber: 817 -gidnumber: 817 -homeDirectory: /home/uid817 - -dn: cn=user818,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user818 -sn: user818 -uid: uid818 -givenname: givenname818 -description: description818 -userPassword: password818 -mail: uid818 -uidnumber: 818 -gidnumber: 818 -homeDirectory: /home/uid818 - -dn: cn=user819,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user819 -sn: user819 -uid: uid819 -givenname: givenname819 -description: description819 -userPassword: password819 -mail: uid819 -uidnumber: 819 -gidnumber: 819 -homeDirectory: /home/uid819 - -dn: cn=user820,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user820 -sn: user820 -uid: uid820 -givenname: givenname820 -description: description820 -userPassword: password820 -mail: uid820 -uidnumber: 820 -gidnumber: 820 -homeDirectory: /home/uid820 - -dn: cn=user821,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user821 -sn: user821 -uid: uid821 -givenname: givenname821 -description: description821 -userPassword: password821 -mail: uid821 -uidnumber: 821 -gidnumber: 821 -homeDirectory: /home/uid821 - -dn: cn=user822,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user822 -sn: user822 -uid: uid822 -givenname: givenname822 -description: description822 -userPassword: password822 -mail: uid822 -uidnumber: 822 -gidnumber: 822 -homeDirectory: /home/uid822 - -dn: cn=user823,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user823 -sn: user823 -uid: uid823 -givenname: givenname823 -description: description823 -userPassword: password823 -mail: uid823 -uidnumber: 823 -gidnumber: 823 -homeDirectory: /home/uid823 - -dn: cn=user824,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user824 -sn: user824 -uid: uid824 -givenname: givenname824 -description: description824 -userPassword: password824 -mail: uid824 -uidnumber: 824 -gidnumber: 824 -homeDirectory: /home/uid824 - -dn: cn=user825,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user825 -sn: user825 -uid: uid825 -givenname: givenname825 -description: description825 -userPassword: password825 -mail: uid825 -uidnumber: 825 -gidnumber: 825 -homeDirectory: /home/uid825 - -dn: cn=user826,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user826 -sn: user826 -uid: uid826 -givenname: givenname826 -description: description826 -userPassword: password826 -mail: uid826 -uidnumber: 826 -gidnumber: 826 -homeDirectory: /home/uid826 - -dn: cn=user827,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user827 -sn: user827 -uid: uid827 -givenname: givenname827 -description: description827 -userPassword: password827 -mail: uid827 -uidnumber: 827 -gidnumber: 827 -homeDirectory: /home/uid827 - -dn: cn=user828,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user828 -sn: user828 -uid: uid828 -givenname: givenname828 -description: description828 -userPassword: password828 -mail: uid828 -uidnumber: 828 -gidnumber: 828 -homeDirectory: /home/uid828 - -dn: cn=user829,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user829 -sn: user829 -uid: uid829 -givenname: givenname829 -description: description829 -userPassword: password829 -mail: uid829 -uidnumber: 829 -gidnumber: 829 -homeDirectory: /home/uid829 - -dn: cn=user830,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user830 -sn: user830 -uid: uid830 -givenname: givenname830 -description: description830 -userPassword: password830 -mail: uid830 -uidnumber: 830 -gidnumber: 830 -homeDirectory: /home/uid830 - -dn: cn=user831,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user831 -sn: user831 -uid: uid831 -givenname: givenname831 -description: description831 -userPassword: password831 -mail: uid831 -uidnumber: 831 -gidnumber: 831 -homeDirectory: /home/uid831 - -dn: cn=user832,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user832 -sn: user832 -uid: uid832 -givenname: givenname832 -description: description832 -userPassword: password832 -mail: uid832 -uidnumber: 832 -gidnumber: 832 -homeDirectory: /home/uid832 - -dn: cn=user833,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user833 -sn: user833 -uid: uid833 -givenname: givenname833 -description: description833 -userPassword: password833 -mail: uid833 -uidnumber: 833 -gidnumber: 833 -homeDirectory: /home/uid833 - -dn: cn=user834,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user834 -sn: user834 -uid: uid834 -givenname: givenname834 -description: description834 -userPassword: password834 -mail: uid834 -uidnumber: 834 -gidnumber: 834 -homeDirectory: /home/uid834 - -dn: cn=user835,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user835 -sn: user835 -uid: uid835 -givenname: givenname835 -description: description835 -userPassword: password835 -mail: uid835 -uidnumber: 835 -gidnumber: 835 -homeDirectory: /home/uid835 - -dn: cn=user836,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user836 -sn: user836 -uid: uid836 -givenname: givenname836 -description: description836 -userPassword: password836 -mail: uid836 -uidnumber: 836 -gidnumber: 836 -homeDirectory: /home/uid836 - -dn: cn=user837,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user837 -sn: user837 -uid: uid837 -givenname: givenname837 -description: description837 -userPassword: password837 -mail: uid837 -uidnumber: 837 -gidnumber: 837 -homeDirectory: /home/uid837 - -dn: cn=user838,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user838 -sn: user838 -uid: uid838 -givenname: givenname838 -description: description838 -userPassword: password838 -mail: uid838 -uidnumber: 838 -gidnumber: 838 -homeDirectory: /home/uid838 - -dn: cn=user839,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user839 -sn: user839 -uid: uid839 -givenname: givenname839 -description: description839 -userPassword: password839 -mail: uid839 -uidnumber: 839 -gidnumber: 839 -homeDirectory: /home/uid839 - -dn: cn=user840,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user840 -sn: user840 -uid: uid840 -givenname: givenname840 -description: description840 -userPassword: password840 -mail: uid840 -uidnumber: 840 -gidnumber: 840 -homeDirectory: /home/uid840 - -dn: cn=user841,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user841 -sn: user841 -uid: uid841 -givenname: givenname841 -description: description841 -userPassword: password841 -mail: uid841 -uidnumber: 841 -gidnumber: 841 -homeDirectory: /home/uid841 - -dn: cn=user842,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user842 -sn: user842 -uid: uid842 -givenname: givenname842 -description: description842 -userPassword: password842 -mail: uid842 -uidnumber: 842 -gidnumber: 842 -homeDirectory: /home/uid842 - -dn: cn=user843,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user843 -sn: user843 -uid: uid843 -givenname: givenname843 -description: description843 -userPassword: password843 -mail: uid843 -uidnumber: 843 -gidnumber: 843 -homeDirectory: /home/uid843 - -dn: cn=user844,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user844 -sn: user844 -uid: uid844 -givenname: givenname844 -description: description844 -userPassword: password844 -mail: uid844 -uidnumber: 844 -gidnumber: 844 -homeDirectory: /home/uid844 - -dn: cn=user845,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user845 -sn: user845 -uid: uid845 -givenname: givenname845 -description: description845 -userPassword: password845 -mail: uid845 -uidnumber: 845 -gidnumber: 845 -homeDirectory: /home/uid845 - -dn: cn=user846,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user846 -sn: user846 -uid: uid846 -givenname: givenname846 -description: description846 -userPassword: password846 -mail: uid846 -uidnumber: 846 -gidnumber: 846 -homeDirectory: /home/uid846 - -dn: cn=user847,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user847 -sn: user847 -uid: uid847 -givenname: givenname847 -description: description847 -userPassword: password847 -mail: uid847 -uidnumber: 847 -gidnumber: 847 -homeDirectory: /home/uid847 - -dn: cn=user848,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user848 -sn: user848 -uid: uid848 -givenname: givenname848 -description: description848 -userPassword: password848 -mail: uid848 -uidnumber: 848 -gidnumber: 848 -homeDirectory: /home/uid848 - -dn: cn=user849,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user849 -sn: user849 -uid: uid849 -givenname: givenname849 -description: description849 -userPassword: password849 -mail: uid849 -uidnumber: 849 -gidnumber: 849 -homeDirectory: /home/uid849 - -dn: cn=user850,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user850 -sn: user850 -uid: uid850 -givenname: givenname850 -description: description850 -userPassword: password850 -mail: uid850 -uidnumber: 850 -gidnumber: 850 -homeDirectory: /home/uid850 - -dn: cn=user851,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user851 -sn: user851 -uid: uid851 -givenname: givenname851 -description: description851 -userPassword: password851 -mail: uid851 -uidnumber: 851 -gidnumber: 851 -homeDirectory: /home/uid851 - -dn: cn=user852,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user852 -sn: user852 -uid: uid852 -givenname: givenname852 -description: description852 -userPassword: password852 -mail: uid852 -uidnumber: 852 -gidnumber: 852 -homeDirectory: /home/uid852 - -dn: cn=user853,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user853 -sn: user853 -uid: uid853 -givenname: givenname853 -description: description853 -userPassword: password853 -mail: uid853 -uidnumber: 853 -gidnumber: 853 -homeDirectory: /home/uid853 - -dn: cn=user854,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user854 -sn: user854 -uid: uid854 -givenname: givenname854 -description: description854 -userPassword: password854 -mail: uid854 -uidnumber: 854 -gidnumber: 854 -homeDirectory: /home/uid854 - -dn: cn=user855,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user855 -sn: user855 -uid: uid855 -givenname: givenname855 -description: description855 -userPassword: password855 -mail: uid855 -uidnumber: 855 -gidnumber: 855 -homeDirectory: /home/uid855 - -dn: cn=user856,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user856 -sn: user856 -uid: uid856 -givenname: givenname856 -description: description856 -userPassword: password856 -mail: uid856 -uidnumber: 856 -gidnumber: 856 -homeDirectory: /home/uid856 - -dn: cn=user857,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user857 -sn: user857 -uid: uid857 -givenname: givenname857 -description: description857 -userPassword: password857 -mail: uid857 -uidnumber: 857 -gidnumber: 857 -homeDirectory: /home/uid857 - -dn: cn=user858,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user858 -sn: user858 -uid: uid858 -givenname: givenname858 -description: description858 -userPassword: password858 -mail: uid858 -uidnumber: 858 -gidnumber: 858 -homeDirectory: /home/uid858 - -dn: cn=user859,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user859 -sn: user859 -uid: uid859 -givenname: givenname859 -description: description859 -userPassword: password859 -mail: uid859 -uidnumber: 859 -gidnumber: 859 -homeDirectory: /home/uid859 - -dn: cn=user860,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user860 -sn: user860 -uid: uid860 -givenname: givenname860 -description: description860 -userPassword: password860 -mail: uid860 -uidnumber: 860 -gidnumber: 860 -homeDirectory: /home/uid860 - -dn: cn=user861,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user861 -sn: user861 -uid: uid861 -givenname: givenname861 -description: description861 -userPassword: password861 -mail: uid861 -uidnumber: 861 -gidnumber: 861 -homeDirectory: /home/uid861 - -dn: cn=user862,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user862 -sn: user862 -uid: uid862 -givenname: givenname862 -description: description862 -userPassword: password862 -mail: uid862 -uidnumber: 862 -gidnumber: 862 -homeDirectory: /home/uid862 - -dn: cn=user863,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user863 -sn: user863 -uid: uid863 -givenname: givenname863 -description: description863 -userPassword: password863 -mail: uid863 -uidnumber: 863 -gidnumber: 863 -homeDirectory: /home/uid863 - -dn: cn=user864,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user864 -sn: user864 -uid: uid864 -givenname: givenname864 -description: description864 -userPassword: password864 -mail: uid864 -uidnumber: 864 -gidnumber: 864 -homeDirectory: /home/uid864 - -dn: cn=user865,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user865 -sn: user865 -uid: uid865 -givenname: givenname865 -description: description865 -userPassword: password865 -mail: uid865 -uidnumber: 865 -gidnumber: 865 -homeDirectory: /home/uid865 - -dn: cn=user866,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user866 -sn: user866 -uid: uid866 -givenname: givenname866 -description: description866 -userPassword: password866 -mail: uid866 -uidnumber: 866 -gidnumber: 866 -homeDirectory: /home/uid866 - -dn: cn=user867,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user867 -sn: user867 -uid: uid867 -givenname: givenname867 -description: description867 -userPassword: password867 -mail: uid867 -uidnumber: 867 -gidnumber: 867 -homeDirectory: /home/uid867 - -dn: cn=user868,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user868 -sn: user868 -uid: uid868 -givenname: givenname868 -description: description868 -userPassword: password868 -mail: uid868 -uidnumber: 868 -gidnumber: 868 -homeDirectory: /home/uid868 - -dn: cn=user869,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user869 -sn: user869 -uid: uid869 -givenname: givenname869 -description: description869 -userPassword: password869 -mail: uid869 -uidnumber: 869 -gidnumber: 869 -homeDirectory: /home/uid869 - -dn: cn=user870,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user870 -sn: user870 -uid: uid870 -givenname: givenname870 -description: description870 -userPassword: password870 -mail: uid870 -uidnumber: 870 -gidnumber: 870 -homeDirectory: /home/uid870 - -dn: cn=user871,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user871 -sn: user871 -uid: uid871 -givenname: givenname871 -description: description871 -userPassword: password871 -mail: uid871 -uidnumber: 871 -gidnumber: 871 -homeDirectory: /home/uid871 - -dn: cn=user872,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user872 -sn: user872 -uid: uid872 -givenname: givenname872 -description: description872 -userPassword: password872 -mail: uid872 -uidnumber: 872 -gidnumber: 872 -homeDirectory: /home/uid872 - -dn: cn=user873,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user873 -sn: user873 -uid: uid873 -givenname: givenname873 -description: description873 -userPassword: password873 -mail: uid873 -uidnumber: 873 -gidnumber: 873 -homeDirectory: /home/uid873 - -dn: cn=user874,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user874 -sn: user874 -uid: uid874 -givenname: givenname874 -description: description874 -userPassword: password874 -mail: uid874 -uidnumber: 874 -gidnumber: 874 -homeDirectory: /home/uid874 - -dn: cn=user875,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user875 -sn: user875 -uid: uid875 -givenname: givenname875 -description: description875 -userPassword: password875 -mail: uid875 -uidnumber: 875 -gidnumber: 875 -homeDirectory: /home/uid875 - -dn: cn=user876,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user876 -sn: user876 -uid: uid876 -givenname: givenname876 -description: description876 -userPassword: password876 -mail: uid876 -uidnumber: 876 -gidnumber: 876 -homeDirectory: /home/uid876 - -dn: cn=user877,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user877 -sn: user877 -uid: uid877 -givenname: givenname877 -description: description877 -userPassword: password877 -mail: uid877 -uidnumber: 877 -gidnumber: 877 -homeDirectory: /home/uid877 - -dn: cn=user878,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user878 -sn: user878 -uid: uid878 -givenname: givenname878 -description: description878 -userPassword: password878 -mail: uid878 -uidnumber: 878 -gidnumber: 878 -homeDirectory: /home/uid878 - -dn: cn=user879,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user879 -sn: user879 -uid: uid879 -givenname: givenname879 -description: description879 -userPassword: password879 -mail: uid879 -uidnumber: 879 -gidnumber: 879 -homeDirectory: /home/uid879 - -dn: cn=user880,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user880 -sn: user880 -uid: uid880 -givenname: givenname880 -description: description880 -userPassword: password880 -mail: uid880 -uidnumber: 880 -gidnumber: 880 -homeDirectory: /home/uid880 - -dn: cn=user881,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user881 -sn: user881 -uid: uid881 -givenname: givenname881 -description: description881 -userPassword: password881 -mail: uid881 -uidnumber: 881 -gidnumber: 881 -homeDirectory: /home/uid881 - -dn: cn=user882,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user882 -sn: user882 -uid: uid882 -givenname: givenname882 -description: description882 -userPassword: password882 -mail: uid882 -uidnumber: 882 -gidnumber: 882 -homeDirectory: /home/uid882 - -dn: cn=user883,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user883 -sn: user883 -uid: uid883 -givenname: givenname883 -description: description883 -userPassword: password883 -mail: uid883 -uidnumber: 883 -gidnumber: 883 -homeDirectory: /home/uid883 - -dn: cn=user884,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user884 -sn: user884 -uid: uid884 -givenname: givenname884 -description: description884 -userPassword: password884 -mail: uid884 -uidnumber: 884 -gidnumber: 884 -homeDirectory: /home/uid884 - -dn: cn=user885,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user885 -sn: user885 -uid: uid885 -givenname: givenname885 -description: description885 -userPassword: password885 -mail: uid885 -uidnumber: 885 -gidnumber: 885 -homeDirectory: /home/uid885 - -dn: cn=user886,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user886 -sn: user886 -uid: uid886 -givenname: givenname886 -description: description886 -userPassword: password886 -mail: uid886 -uidnumber: 886 -gidnumber: 886 -homeDirectory: /home/uid886 - -dn: cn=user887,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user887 -sn: user887 -uid: uid887 -givenname: givenname887 -description: description887 -userPassword: password887 -mail: uid887 -uidnumber: 887 -gidnumber: 887 -homeDirectory: /home/uid887 - -dn: cn=user888,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user888 -sn: user888 -uid: uid888 -givenname: givenname888 -description: description888 -userPassword: password888 -mail: uid888 -uidnumber: 888 -gidnumber: 888 -homeDirectory: /home/uid888 - -dn: cn=user889,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user889 -sn: user889 -uid: uid889 -givenname: givenname889 -description: description889 -userPassword: password889 -mail: uid889 -uidnumber: 889 -gidnumber: 889 -homeDirectory: /home/uid889 - -dn: cn=user890,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user890 -sn: user890 -uid: uid890 -givenname: givenname890 -description: description890 -userPassword: password890 -mail: uid890 -uidnumber: 890 -gidnumber: 890 -homeDirectory: /home/uid890 - -dn: cn=user891,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user891 -sn: user891 -uid: uid891 -givenname: givenname891 -description: description891 -userPassword: password891 -mail: uid891 -uidnumber: 891 -gidnumber: 891 -homeDirectory: /home/uid891 - -dn: cn=user892,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user892 -sn: user892 -uid: uid892 -givenname: givenname892 -description: description892 -userPassword: password892 -mail: uid892 -uidnumber: 892 -gidnumber: 892 -homeDirectory: /home/uid892 - -dn: cn=user893,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user893 -sn: user893 -uid: uid893 -givenname: givenname893 -description: description893 -userPassword: password893 -mail: uid893 -uidnumber: 893 -gidnumber: 893 -homeDirectory: /home/uid893 - -dn: cn=user894,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user894 -sn: user894 -uid: uid894 -givenname: givenname894 -description: description894 -userPassword: password894 -mail: uid894 -uidnumber: 894 -gidnumber: 894 -homeDirectory: /home/uid894 - -dn: cn=user895,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user895 -sn: user895 -uid: uid895 -givenname: givenname895 -description: description895 -userPassword: password895 -mail: uid895 -uidnumber: 895 -gidnumber: 895 -homeDirectory: /home/uid895 - -dn: cn=user896,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user896 -sn: user896 -uid: uid896 -givenname: givenname896 -description: description896 -userPassword: password896 -mail: uid896 -uidnumber: 896 -gidnumber: 896 -homeDirectory: /home/uid896 - -dn: cn=user897,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user897 -sn: user897 -uid: uid897 -givenname: givenname897 -description: description897 -userPassword: password897 -mail: uid897 -uidnumber: 897 -gidnumber: 897 -homeDirectory: /home/uid897 - -dn: cn=user898,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user898 -sn: user898 -uid: uid898 -givenname: givenname898 -description: description898 -userPassword: password898 -mail: uid898 -uidnumber: 898 -gidnumber: 898 -homeDirectory: /home/uid898 - -dn: cn=user899,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user899 -sn: user899 -uid: uid899 -givenname: givenname899 -description: description899 -userPassword: password899 -mail: uid899 -uidnumber: 899 -gidnumber: 899 -homeDirectory: /home/uid899 - -dn: cn=user900,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user900 -sn: user900 -uid: uid900 -givenname: givenname900 -description: description900 -userPassword: password900 -mail: uid900 -uidnumber: 900 -gidnumber: 900 -homeDirectory: /home/uid900 - -dn: cn=user901,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user901 -sn: user901 -uid: uid901 -givenname: givenname901 -description: description901 -userPassword: password901 -mail: uid901 -uidnumber: 901 -gidnumber: 901 -homeDirectory: /home/uid901 - -dn: cn=user902,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user902 -sn: user902 -uid: uid902 -givenname: givenname902 -description: description902 -userPassword: password902 -mail: uid902 -uidnumber: 902 -gidnumber: 902 -homeDirectory: /home/uid902 - -dn: cn=user903,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user903 -sn: user903 -uid: uid903 -givenname: givenname903 -description: description903 -userPassword: password903 -mail: uid903 -uidnumber: 903 -gidnumber: 903 -homeDirectory: /home/uid903 - -dn: cn=user904,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user904 -sn: user904 -uid: uid904 -givenname: givenname904 -description: description904 -userPassword: password904 -mail: uid904 -uidnumber: 904 -gidnumber: 904 -homeDirectory: /home/uid904 - -dn: cn=user905,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user905 -sn: user905 -uid: uid905 -givenname: givenname905 -description: description905 -userPassword: password905 -mail: uid905 -uidnumber: 905 -gidnumber: 905 -homeDirectory: /home/uid905 - -dn: cn=user906,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user906 -sn: user906 -uid: uid906 -givenname: givenname906 -description: description906 -userPassword: password906 -mail: uid906 -uidnumber: 906 -gidnumber: 906 -homeDirectory: /home/uid906 - -dn: cn=user907,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user907 -sn: user907 -uid: uid907 -givenname: givenname907 -description: description907 -userPassword: password907 -mail: uid907 -uidnumber: 907 -gidnumber: 907 -homeDirectory: /home/uid907 - -dn: cn=user908,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user908 -sn: user908 -uid: uid908 -givenname: givenname908 -description: description908 -userPassword: password908 -mail: uid908 -uidnumber: 908 -gidnumber: 908 -homeDirectory: /home/uid908 - -dn: cn=user909,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user909 -sn: user909 -uid: uid909 -givenname: givenname909 -description: description909 -userPassword: password909 -mail: uid909 -uidnumber: 909 -gidnumber: 909 -homeDirectory: /home/uid909 - -dn: cn=user910,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user910 -sn: user910 -uid: uid910 -givenname: givenname910 -description: description910 -userPassword: password910 -mail: uid910 -uidnumber: 910 -gidnumber: 910 -homeDirectory: /home/uid910 - -dn: cn=user911,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user911 -sn: user911 -uid: uid911 -givenname: givenname911 -description: description911 -userPassword: password911 -mail: uid911 -uidnumber: 911 -gidnumber: 911 -homeDirectory: /home/uid911 - -dn: cn=user912,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user912 -sn: user912 -uid: uid912 -givenname: givenname912 -description: description912 -userPassword: password912 -mail: uid912 -uidnumber: 912 -gidnumber: 912 -homeDirectory: /home/uid912 - -dn: cn=user913,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user913 -sn: user913 -uid: uid913 -givenname: givenname913 -description: description913 -userPassword: password913 -mail: uid913 -uidnumber: 913 -gidnumber: 913 -homeDirectory: /home/uid913 - -dn: cn=user914,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user914 -sn: user914 -uid: uid914 -givenname: givenname914 -description: description914 -userPassword: password914 -mail: uid914 -uidnumber: 914 -gidnumber: 914 -homeDirectory: /home/uid914 - -dn: cn=user915,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user915 -sn: user915 -uid: uid915 -givenname: givenname915 -description: description915 -userPassword: password915 -mail: uid915 -uidnumber: 915 -gidnumber: 915 -homeDirectory: /home/uid915 - -dn: cn=user916,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user916 -sn: user916 -uid: uid916 -givenname: givenname916 -description: description916 -userPassword: password916 -mail: uid916 -uidnumber: 916 -gidnumber: 916 -homeDirectory: /home/uid916 - -dn: cn=user917,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user917 -sn: user917 -uid: uid917 -givenname: givenname917 -description: description917 -userPassword: password917 -mail: uid917 -uidnumber: 917 -gidnumber: 917 -homeDirectory: /home/uid917 - -dn: cn=user918,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user918 -sn: user918 -uid: uid918 -givenname: givenname918 -description: description918 -userPassword: password918 -mail: uid918 -uidnumber: 918 -gidnumber: 918 -homeDirectory: /home/uid918 - -dn: cn=user919,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user919 -sn: user919 -uid: uid919 -givenname: givenname919 -description: description919 -userPassword: password919 -mail: uid919 -uidnumber: 919 -gidnumber: 919 -homeDirectory: /home/uid919 - -dn: cn=user920,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user920 -sn: user920 -uid: uid920 -givenname: givenname920 -description: description920 -userPassword: password920 -mail: uid920 -uidnumber: 920 -gidnumber: 920 -homeDirectory: /home/uid920 - -dn: cn=user921,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user921 -sn: user921 -uid: uid921 -givenname: givenname921 -description: description921 -userPassword: password921 -mail: uid921 -uidnumber: 921 -gidnumber: 921 -homeDirectory: /home/uid921 - -dn: cn=user922,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user922 -sn: user922 -uid: uid922 -givenname: givenname922 -description: description922 -userPassword: password922 -mail: uid922 -uidnumber: 922 -gidnumber: 922 -homeDirectory: /home/uid922 - -dn: cn=user923,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user923 -sn: user923 -uid: uid923 -givenname: givenname923 -description: description923 -userPassword: password923 -mail: uid923 -uidnumber: 923 -gidnumber: 923 -homeDirectory: /home/uid923 - -dn: cn=user924,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user924 -sn: user924 -uid: uid924 -givenname: givenname924 -description: description924 -userPassword: password924 -mail: uid924 -uidnumber: 924 -gidnumber: 924 -homeDirectory: /home/uid924 - -dn: cn=user925,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user925 -sn: user925 -uid: uid925 -givenname: givenname925 -description: description925 -userPassword: password925 -mail: uid925 -uidnumber: 925 -gidnumber: 925 -homeDirectory: /home/uid925 - -dn: cn=user926,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user926 -sn: user926 -uid: uid926 -givenname: givenname926 -description: description926 -userPassword: password926 -mail: uid926 -uidnumber: 926 -gidnumber: 926 -homeDirectory: /home/uid926 - -dn: cn=user927,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user927 -sn: user927 -uid: uid927 -givenname: givenname927 -description: description927 -userPassword: password927 -mail: uid927 -uidnumber: 927 -gidnumber: 927 -homeDirectory: /home/uid927 - -dn: cn=user928,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user928 -sn: user928 -uid: uid928 -givenname: givenname928 -description: description928 -userPassword: password928 -mail: uid928 -uidnumber: 928 -gidnumber: 928 -homeDirectory: /home/uid928 - -dn: cn=user929,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user929 -sn: user929 -uid: uid929 -givenname: givenname929 -description: description929 -userPassword: password929 -mail: uid929 -uidnumber: 929 -gidnumber: 929 -homeDirectory: /home/uid929 - -dn: cn=user930,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user930 -sn: user930 -uid: uid930 -givenname: givenname930 -description: description930 -userPassword: password930 -mail: uid930 -uidnumber: 930 -gidnumber: 930 -homeDirectory: /home/uid930 - -dn: cn=user931,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user931 -sn: user931 -uid: uid931 -givenname: givenname931 -description: description931 -userPassword: password931 -mail: uid931 -uidnumber: 931 -gidnumber: 931 -homeDirectory: /home/uid931 - -dn: cn=user932,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user932 -sn: user932 -uid: uid932 -givenname: givenname932 -description: description932 -userPassword: password932 -mail: uid932 -uidnumber: 932 -gidnumber: 932 -homeDirectory: /home/uid932 - -dn: cn=user933,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user933 -sn: user933 -uid: uid933 -givenname: givenname933 -description: description933 -userPassword: password933 -mail: uid933 -uidnumber: 933 -gidnumber: 933 -homeDirectory: /home/uid933 - -dn: cn=user934,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user934 -sn: user934 -uid: uid934 -givenname: givenname934 -description: description934 -userPassword: password934 -mail: uid934 -uidnumber: 934 -gidnumber: 934 -homeDirectory: /home/uid934 - -dn: cn=user935,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user935 -sn: user935 -uid: uid935 -givenname: givenname935 -description: description935 -userPassword: password935 -mail: uid935 -uidnumber: 935 -gidnumber: 935 -homeDirectory: /home/uid935 - -dn: cn=user936,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user936 -sn: user936 -uid: uid936 -givenname: givenname936 -description: description936 -userPassword: password936 -mail: uid936 -uidnumber: 936 -gidnumber: 936 -homeDirectory: /home/uid936 - -dn: cn=user937,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user937 -sn: user937 -uid: uid937 -givenname: givenname937 -description: description937 -userPassword: password937 -mail: uid937 -uidnumber: 937 -gidnumber: 937 -homeDirectory: /home/uid937 - -dn: cn=user938,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user938 -sn: user938 -uid: uid938 -givenname: givenname938 -description: description938 -userPassword: password938 -mail: uid938 -uidnumber: 938 -gidnumber: 938 -homeDirectory: /home/uid938 - -dn: cn=user939,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user939 -sn: user939 -uid: uid939 -givenname: givenname939 -description: description939 -userPassword: password939 -mail: uid939 -uidnumber: 939 -gidnumber: 939 -homeDirectory: /home/uid939 - -dn: cn=user940,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user940 -sn: user940 -uid: uid940 -givenname: givenname940 -description: description940 -userPassword: password940 -mail: uid940 -uidnumber: 940 -gidnumber: 940 -homeDirectory: /home/uid940 - -dn: cn=user941,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user941 -sn: user941 -uid: uid941 -givenname: givenname941 -description: description941 -userPassword: password941 -mail: uid941 -uidnumber: 941 -gidnumber: 941 -homeDirectory: /home/uid941 - -dn: cn=user942,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user942 -sn: user942 -uid: uid942 -givenname: givenname942 -description: description942 -userPassword: password942 -mail: uid942 -uidnumber: 942 -gidnumber: 942 -homeDirectory: /home/uid942 - -dn: cn=user943,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user943 -sn: user943 -uid: uid943 -givenname: givenname943 -description: description943 -userPassword: password943 -mail: uid943 -uidnumber: 943 -gidnumber: 943 -homeDirectory: /home/uid943 - -dn: cn=user944,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user944 -sn: user944 -uid: uid944 -givenname: givenname944 -description: description944 -userPassword: password944 -mail: uid944 -uidnumber: 944 -gidnumber: 944 -homeDirectory: /home/uid944 - -dn: cn=user945,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user945 -sn: user945 -uid: uid945 -givenname: givenname945 -description: description945 -userPassword: password945 -mail: uid945 -uidnumber: 945 -gidnumber: 945 -homeDirectory: /home/uid945 - -dn: cn=user946,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user946 -sn: user946 -uid: uid946 -givenname: givenname946 -description: description946 -userPassword: password946 -mail: uid946 -uidnumber: 946 -gidnumber: 946 -homeDirectory: /home/uid946 - -dn: cn=user947,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user947 -sn: user947 -uid: uid947 -givenname: givenname947 -description: description947 -userPassword: password947 -mail: uid947 -uidnumber: 947 -gidnumber: 947 -homeDirectory: /home/uid947 - -dn: cn=user948,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user948 -sn: user948 -uid: uid948 -givenname: givenname948 -description: description948 -userPassword: password948 -mail: uid948 -uidnumber: 948 -gidnumber: 948 -homeDirectory: /home/uid948 - -dn: cn=user949,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user949 -sn: user949 -uid: uid949 -givenname: givenname949 -description: description949 -userPassword: password949 -mail: uid949 -uidnumber: 949 -gidnumber: 949 -homeDirectory: /home/uid949 - -dn: cn=user950,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user950 -sn: user950 -uid: uid950 -givenname: givenname950 -description: description950 -userPassword: password950 -mail: uid950 -uidnumber: 950 -gidnumber: 950 -homeDirectory: /home/uid950 - -dn: cn=user951,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user951 -sn: user951 -uid: uid951 -givenname: givenname951 -description: description951 -userPassword: password951 -mail: uid951 -uidnumber: 951 -gidnumber: 951 -homeDirectory: /home/uid951 - -dn: cn=user952,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user952 -sn: user952 -uid: uid952 -givenname: givenname952 -description: description952 -userPassword: password952 -mail: uid952 -uidnumber: 952 -gidnumber: 952 -homeDirectory: /home/uid952 - -dn: cn=user953,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user953 -sn: user953 -uid: uid953 -givenname: givenname953 -description: description953 -userPassword: password953 -mail: uid953 -uidnumber: 953 -gidnumber: 953 -homeDirectory: /home/uid953 - -dn: cn=user954,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user954 -sn: user954 -uid: uid954 -givenname: givenname954 -description: description954 -userPassword: password954 -mail: uid954 -uidnumber: 954 -gidnumber: 954 -homeDirectory: /home/uid954 - -dn: cn=user955,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user955 -sn: user955 -uid: uid955 -givenname: givenname955 -description: description955 -userPassword: password955 -mail: uid955 -uidnumber: 955 -gidnumber: 955 -homeDirectory: /home/uid955 - -dn: cn=user956,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user956 -sn: user956 -uid: uid956 -givenname: givenname956 -description: description956 -userPassword: password956 -mail: uid956 -uidnumber: 956 -gidnumber: 956 -homeDirectory: /home/uid956 - -dn: cn=user957,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user957 -sn: user957 -uid: uid957 -givenname: givenname957 -description: description957 -userPassword: password957 -mail: uid957 -uidnumber: 957 -gidnumber: 957 -homeDirectory: /home/uid957 - -dn: cn=user958,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user958 -sn: user958 -uid: uid958 -givenname: givenname958 -description: description958 -userPassword: password958 -mail: uid958 -uidnumber: 958 -gidnumber: 958 -homeDirectory: /home/uid958 - -dn: cn=user959,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user959 -sn: user959 -uid: uid959 -givenname: givenname959 -description: description959 -userPassword: password959 -mail: uid959 -uidnumber: 959 -gidnumber: 959 -homeDirectory: /home/uid959 - -dn: cn=user960,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user960 -sn: user960 -uid: uid960 -givenname: givenname960 -description: description960 -userPassword: password960 -mail: uid960 -uidnumber: 960 -gidnumber: 960 -homeDirectory: /home/uid960 - -dn: cn=user961,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user961 -sn: user961 -uid: uid961 -givenname: givenname961 -description: description961 -userPassword: password961 -mail: uid961 -uidnumber: 961 -gidnumber: 961 -homeDirectory: /home/uid961 - -dn: cn=user962,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user962 -sn: user962 -uid: uid962 -givenname: givenname962 -description: description962 -userPassword: password962 -mail: uid962 -uidnumber: 962 -gidnumber: 962 -homeDirectory: /home/uid962 - -dn: cn=user963,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user963 -sn: user963 -uid: uid963 -givenname: givenname963 -description: description963 -userPassword: password963 -mail: uid963 -uidnumber: 963 -gidnumber: 963 -homeDirectory: /home/uid963 - -dn: cn=user964,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user964 -sn: user964 -uid: uid964 -givenname: givenname964 -description: description964 -userPassword: password964 -mail: uid964 -uidnumber: 964 -gidnumber: 964 -homeDirectory: /home/uid964 - -dn: cn=user965,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user965 -sn: user965 -uid: uid965 -givenname: givenname965 -description: description965 -userPassword: password965 -mail: uid965 -uidnumber: 965 -gidnumber: 965 -homeDirectory: /home/uid965 - -dn: cn=user966,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user966 -sn: user966 -uid: uid966 -givenname: givenname966 -description: description966 -userPassword: password966 -mail: uid966 -uidnumber: 966 -gidnumber: 966 -homeDirectory: /home/uid966 - -dn: cn=user967,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user967 -sn: user967 -uid: uid967 -givenname: givenname967 -description: description967 -userPassword: password967 -mail: uid967 -uidnumber: 967 -gidnumber: 967 -homeDirectory: /home/uid967 - -dn: cn=user968,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user968 -sn: user968 -uid: uid968 -givenname: givenname968 -description: description968 -userPassword: password968 -mail: uid968 -uidnumber: 968 -gidnumber: 968 -homeDirectory: /home/uid968 - -dn: cn=user969,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user969 -sn: user969 -uid: uid969 -givenname: givenname969 -description: description969 -userPassword: password969 -mail: uid969 -uidnumber: 969 -gidnumber: 969 -homeDirectory: /home/uid969 - -dn: cn=user970,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user970 -sn: user970 -uid: uid970 -givenname: givenname970 -description: description970 -userPassword: password970 -mail: uid970 -uidnumber: 970 -gidnumber: 970 -homeDirectory: /home/uid970 - -dn: cn=user971,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user971 -sn: user971 -uid: uid971 -givenname: givenname971 -description: description971 -userPassword: password971 -mail: uid971 -uidnumber: 971 -gidnumber: 971 -homeDirectory: /home/uid971 - -dn: cn=user972,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user972 -sn: user972 -uid: uid972 -givenname: givenname972 -description: description972 -userPassword: password972 -mail: uid972 -uidnumber: 972 -gidnumber: 972 -homeDirectory: /home/uid972 - -dn: cn=user973,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user973 -sn: user973 -uid: uid973 -givenname: givenname973 -description: description973 -userPassword: password973 -mail: uid973 -uidnumber: 973 -gidnumber: 973 -homeDirectory: /home/uid973 - -dn: cn=user974,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user974 -sn: user974 -uid: uid974 -givenname: givenname974 -description: description974 -userPassword: password974 -mail: uid974 -uidnumber: 974 -gidnumber: 974 -homeDirectory: /home/uid974 - -dn: cn=user975,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user975 -sn: user975 -uid: uid975 -givenname: givenname975 -description: description975 -userPassword: password975 -mail: uid975 -uidnumber: 975 -gidnumber: 975 -homeDirectory: /home/uid975 - -dn: cn=user976,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user976 -sn: user976 -uid: uid976 -givenname: givenname976 -description: description976 -userPassword: password976 -mail: uid976 -uidnumber: 976 -gidnumber: 976 -homeDirectory: /home/uid976 - -dn: cn=user977,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user977 -sn: user977 -uid: uid977 -givenname: givenname977 -description: description977 -userPassword: password977 -mail: uid977 -uidnumber: 977 -gidnumber: 977 -homeDirectory: /home/uid977 - -dn: cn=user978,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user978 -sn: user978 -uid: uid978 -givenname: givenname978 -description: description978 -userPassword: password978 -mail: uid978 -uidnumber: 978 -gidnumber: 978 -homeDirectory: /home/uid978 - -dn: cn=user979,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user979 -sn: user979 -uid: uid979 -givenname: givenname979 -description: description979 -userPassword: password979 -mail: uid979 -uidnumber: 979 -gidnumber: 979 -homeDirectory: /home/uid979 - -dn: cn=user980,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user980 -sn: user980 -uid: uid980 -givenname: givenname980 -description: description980 -userPassword: password980 -mail: uid980 -uidnumber: 980 -gidnumber: 980 -homeDirectory: /home/uid980 - -dn: cn=user981,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user981 -sn: user981 -uid: uid981 -givenname: givenname981 -description: description981 -userPassword: password981 -mail: uid981 -uidnumber: 981 -gidnumber: 981 -homeDirectory: /home/uid981 - -dn: cn=user982,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user982 -sn: user982 -uid: uid982 -givenname: givenname982 -description: description982 -userPassword: password982 -mail: uid982 -uidnumber: 982 -gidnumber: 982 -homeDirectory: /home/uid982 - -dn: cn=user983,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user983 -sn: user983 -uid: uid983 -givenname: givenname983 -description: description983 -userPassword: password983 -mail: uid983 -uidnumber: 983 -gidnumber: 983 -homeDirectory: /home/uid983 - -dn: cn=user984,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user984 -sn: user984 -uid: uid984 -givenname: givenname984 -description: description984 -userPassword: password984 -mail: uid984 -uidnumber: 984 -gidnumber: 984 -homeDirectory: /home/uid984 - -dn: cn=user985,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user985 -sn: user985 -uid: uid985 -givenname: givenname985 -description: description985 -userPassword: password985 -mail: uid985 -uidnumber: 985 -gidnumber: 985 -homeDirectory: /home/uid985 - -dn: cn=user986,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user986 -sn: user986 -uid: uid986 -givenname: givenname986 -description: description986 -userPassword: password986 -mail: uid986 -uidnumber: 986 -gidnumber: 986 -homeDirectory: /home/uid986 - -dn: cn=user987,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user987 -sn: user987 -uid: uid987 -givenname: givenname987 -description: description987 -userPassword: password987 -mail: uid987 -uidnumber: 987 -gidnumber: 987 -homeDirectory: /home/uid987 - -dn: cn=user988,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user988 -sn: user988 -uid: uid988 -givenname: givenname988 -description: description988 -userPassword: password988 -mail: uid988 -uidnumber: 988 -gidnumber: 988 -homeDirectory: /home/uid988 - -dn: cn=user989,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user989 -sn: user989 -uid: uid989 -givenname: givenname989 -description: description989 -userPassword: password989 -mail: uid989 -uidnumber: 989 -gidnumber: 989 -homeDirectory: /home/uid989 - -dn: cn=user990,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user990 -sn: user990 -uid: uid990 -givenname: givenname990 -description: description990 -userPassword: password990 -mail: uid990 -uidnumber: 990 -gidnumber: 990 -homeDirectory: /home/uid990 - -dn: cn=user991,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user991 -sn: user991 -uid: uid991 -givenname: givenname991 -description: description991 -userPassword: password991 -mail: uid991 -uidnumber: 991 -gidnumber: 991 -homeDirectory: /home/uid991 - -dn: cn=user992,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user992 -sn: user992 -uid: uid992 -givenname: givenname992 -description: description992 -userPassword: password992 -mail: uid992 -uidnumber: 992 -gidnumber: 992 -homeDirectory: /home/uid992 - -dn: cn=user993,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user993 -sn: user993 -uid: uid993 -givenname: givenname993 -description: description993 -userPassword: password993 -mail: uid993 -uidnumber: 993 -gidnumber: 993 -homeDirectory: /home/uid993 - -dn: cn=user994,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user994 -sn: user994 -uid: uid994 -givenname: givenname994 -description: description994 -userPassword: password994 -mail: uid994 -uidnumber: 994 -gidnumber: 994 -homeDirectory: /home/uid994 - -dn: cn=user995,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user995 -sn: user995 -uid: uid995 -givenname: givenname995 -description: description995 -userPassword: password995 -mail: uid995 -uidnumber: 995 -gidnumber: 995 -homeDirectory: /home/uid995 - -dn: cn=user996,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user996 -sn: user996 -uid: uid996 -givenname: givenname996 -description: description996 -userPassword: password996 -mail: uid996 -uidnumber: 996 -gidnumber: 996 -homeDirectory: /home/uid996 - -dn: cn=user997,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user997 -sn: user997 -uid: uid997 -givenname: givenname997 -description: description997 -userPassword: password997 -mail: uid997 -uidnumber: 997 -gidnumber: 997 -homeDirectory: /home/uid997 - -dn: cn=user998,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user998 -sn: user998 -uid: uid998 -givenname: givenname998 -description: description998 -userPassword: password998 -mail: uid998 -uidnumber: 998 -gidnumber: 998 -homeDirectory: /home/uid998 - -dn: cn=user999,ou=People,dc=example,dc=com -objectClass: top -objectClass: person -objectClass: organizationalPerson -objectClass: inetOrgPerson -objectClass: posixAccount -cn: user999 -sn: user999 -uid: uid999 -givenname: givenname999 -description: description999 -userPassword: password999 -mail: uid999 -uidnumber: 999 -gidnumber: 999 -homeDirectory: /home/uid999 - diff --git a/dirsrvtests/suites/acct_usability_plugin/acct_usability_test.py b/dirsrvtests/suites/acct_usability_plugin/acct_usability_test.py deleted file mode 100644 index 36021e2..0000000 --- a/dirsrvtests/suites/acct_usability_plugin/acct_usability_test.py +++ /dev/null @@ -1,93 +0,0 @@ -# --- BEGIN COPYRIGHT BLOCK --- -# Copyright (C) 2015 Red Hat, Inc. -# All rights reserved. -# -# License: GPL (version 3 or any later version). -# See LICENSE for details. -# --- END COPYRIGHT BLOCK --- -# -import os -import sys -import time -import ldap -import logging -import pytest -from lib389 import DirSrv, Entry, tools, tasks -from lib389.tools import DirSrvTools -from lib389._constants import * -from lib389.properties import * -from lib389.tasks import * -from lib389.utils import * - -logging.getLogger(__name__).setLevel(logging.DEBUG) -log = logging.getLogger(__name__) - -installation1_prefix = None - - -class TopologyStandalone(object): - def __init__(self, standalone): - standalone.open() - self.standalone = standalone - - -@pytest.fixture(scope="module") -def topology(request): - global installation1_prefix - if installation1_prefix: - args_instance[SER_DEPLOYED_DIR] = installation1_prefix - - # Creating standalone instance ... - standalone = DirSrv(verbose=False) - args_instance[SER_HOST] = HOST_STANDALONE - args_instance[SER_PORT] = PORT_STANDALONE - args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE - args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX - args_standalone = args_instance.copy() - standalone.allocate(args_standalone) - instance_standalone = standalone.exists() - if instance_standalone: - standalone.delete() - standalone.create() - standalone.open() - - # Clear out the tmp dir - standalone.clearTmpDir(__file__) - - return TopologyStandalone(standalone) - - -def test_acct_usability_init(topology): - ''' - Write any test suite initialization here(if needed) - ''' - - return - - -def test_acct_usability_(topology): - ''' - Write a single test here... - ''' - - return - - -def test_acct_usability_final(topology): - topology.standalone.delete() - log.info('acct_usability test suite PASSED') - - -def run_isolated(): - global installation1_prefix - installation1_prefix = None - - topo = topology(True) - test_acct_usability_init(topo) - test_acct_usability_(topo) - test_acct_usability_final(topo) - - -if __name__ == '__main__': - run_isolated() - diff --git a/dirsrvtests/suites/acctpolicy_plugin/acctpolicy_test.py b/dirsrvtests/suites/acctpolicy_plugin/acctpolicy_test.py deleted file mode 100644 index b7db352..0000000 --- a/dirsrvtests/suites/acctpolicy_plugin/acctpolicy_test.py +++ /dev/null @@ -1,93 +0,0 @@ -# --- BEGIN COPYRIGHT BLOCK --- -# Copyright (C) 2015 Red Hat, Inc. -# All rights reserved. -# -# License: GPL (version 3 or any later version). -# See LICENSE for details. -# --- END COPYRIGHT BLOCK --- -# -import os -import sys -import time -import ldap -import logging -import pytest -from lib389 import DirSrv, Entry, tools, tasks -from lib389.tools import DirSrvTools -from lib389._constants import * -from lib389.properties import * -from lib389.tasks import * -from lib389.utils import * - -logging.getLogger(__name__).setLevel(logging.DEBUG) -log = logging.getLogger(__name__) - -installation1_prefix = None - - -class TopologyStandalone(object): - def __init__(self, standalone): - standalone.open() - self.standalone = standalone - - -@pytest.fixture(scope="module") -def topology(request): - global installation1_prefix - if installation1_prefix: - args_instance[SER_DEPLOYED_DIR] = installation1_prefix - - # Creating standalone instance ... - standalone = DirSrv(verbose=False) - args_instance[SER_HOST] = HOST_STANDALONE - args_instance[SER_PORT] = PORT_STANDALONE - args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE - args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX - args_standalone = args_instance.copy() - standalone.allocate(args_standalone) - instance_standalone = standalone.exists() - if instance_standalone: - standalone.delete() - standalone.create() - standalone.open() - - # Clear out the tmp dir - standalone.clearTmpDir(__file__) - - return TopologyStandalone(standalone) - - -def test_acctpolicy_init(topology): - ''' - Write any test suite initialization here(if needed) - ''' - - return - - -def test_acctpolicy_(topology): - ''' - Write a single test here... - ''' - - return - - -def test_acctpolicy_final(topology): - topology.standalone.delete() - log.info('acctpolicy test suite PASSED') - - -def run_isolated(): - global installation1_prefix - installation1_prefix = None - - topo = topology(True) - test_acctpolicy_init(topo) - test_acctpolicy_(topo) - test_acctpolicy_final(topo) - - -if __name__ == '__main__': - run_isolated() - diff --git a/dirsrvtests/suites/acl/acl_test.py b/dirsrvtests/suites/acl/acl_test.py deleted file mode 100644 index 422a1ec..0000000 --- a/dirsrvtests/suites/acl/acl_test.py +++ /dev/null @@ -1,1059 +0,0 @@ -# --- BEGIN COPYRIGHT BLOCK --- -# Copyright (C) 2015 Red Hat, Inc. -# All rights reserved. -# -# License: GPL (version 3 or any later version). -# See LICENSE for details. -# --- END COPYRIGHT BLOCK --- -# -import os -import sys -import time -import ldap -import logging -import pytest -from lib389 import DirSrv, Entry, tools, tasks -from lib389.tools import DirSrvTools -from lib389._constants import * -from lib389.properties import * -from lib389.tasks import * -from lib389.utils import * -from ldap.controls.simple import GetEffectiveRightsControl - -logging.getLogger(__name__).setLevel(logging.DEBUG) -log = logging.getLogger(__name__) - -# -# important part. We can deploy Master1 and Master2 on different versions -# -installation1_prefix = None -installation2_prefix = None - -TEST_REPL_DN = "cn=test_repl, %s" % SUFFIX - -STAGING_CN = "staged user" -PRODUCTION_CN = "accounts" -EXCEPT_CN = "excepts" - -STAGING_DN = "cn=%s,%s" % (STAGING_CN, SUFFIX) -PRODUCTION_DN = "cn=%s,%s" % (PRODUCTION_CN, SUFFIX) -PROD_EXCEPT_DN = "cn=%s,%s" % (EXCEPT_CN, PRODUCTION_DN) - -STAGING_PATTERN = "cn=%s*,%s" % (STAGING_CN[:2], SUFFIX) -PRODUCTION_PATTERN = "cn=%s*,%s" % (PRODUCTION_CN[:2], SUFFIX) -BAD_STAGING_PATTERN = "cn=bad*,%s" % (SUFFIX) -BAD_PRODUCTION_PATTERN = "cn=bad*,%s" % (SUFFIX) - -BIND_CN = "bind_entry" -BIND_DN = "cn=%s,%s" % (BIND_CN, SUFFIX) -BIND_PW = "password" - -NEW_ACCOUNT = "new_account" -MAX_ACCOUNTS = 20 - -CONFIG_MODDN_ACI_ATTR = "nsslapd-moddn-aci" - -SRC_ENTRY_CN = "tuser" -EXT_RDN = "01" -DST_ENTRY_CN = SRC_ENTRY_CN + EXT_RDN - -SRC_ENTRY_DN = "cn=%s,%s" % (SRC_ENTRY_CN, SUFFIX) -DST_ENTRY_DN = "cn=%s,%s" % (DST_ENTRY_CN, SUFFIX) - - -class TopologyMaster1Master2(object): - def __init__(self, master1, master2): - master1.open() - self.master1 = master1 - - master2.open() - self.master2 = master2 - - -@pytest.fixture(scope="module") -def topology(request): - """This fixture is used to create a replicated topology for the 'module'. - The replicated topology is MASTER1 <-> Master2. - """ - - global installation1_prefix - global installation2_prefix - - # allocate master1 on a given deployement - master1 = DirSrv(verbose=False) - if installation1_prefix: - args_instance[SER_DEPLOYED_DIR] = installation1_prefix - - # Args for the master1 instance - args_instance[SER_HOST] = HOST_MASTER_1 - args_instance[SER_PORT] = PORT_MASTER_1 - args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1 - args_master = args_instance.copy() - master1.allocate(args_master) - - # allocate master1 on a given deployement - master2 = DirSrv(verbose=False) - if installation2_prefix: - args_instance[SER_DEPLOYED_DIR] = installation2_prefix - - # Args for the consumer instance - args_instance[SER_HOST] = HOST_MASTER_2 - args_instance[SER_PORT] = PORT_MASTER_2 - args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_2 - args_master = args_instance.copy() - master2.allocate(args_master) - - # Get the status of the instance and restart it if it exists - instance_master1 = master1.exists() - instance_master2 = master2.exists() - - # Remove all the instances - if instance_master1: - master1.delete() - if instance_master2: - master2.delete() - - # Create the instances - master1.create() - master1.open() - master2.create() - master2.open() - - # - # Now prepare the Master-Consumer topology - # - # First Enable replication - master1.replica.enableReplication(suffix=SUFFIX, - role=REPLICAROLE_MASTER, - replicaId=REPLICAID_MASTER_1) - master2.replica.enableReplication(suffix=SUFFIX, - role=REPLICAROLE_MASTER, - replicaId=REPLICAID_MASTER_2) - - # Initialize the supplier->consumer - - properties = {RA_NAME: r'meTo_$host:$port', - RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], - RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], - RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], - RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} - repl_agreement = master1.agreement.create(suffix=SUFFIX, - host=master2.host, - port=master2.port, - properties=properties) - - if not repl_agreement: - log.fatal("Fail to create a replica agreement") - sys.exit(1) - - log.debug("%s created" % repl_agreement) - - properties = {RA_NAME: r'meTo_$host:$port', - RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], - RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], - RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], - RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} - master2.agreement.create(suffix=SUFFIX, - host=master1.host, - port=master1.port, - properties=properties) - - master1.agreement.init(SUFFIX, HOST_MASTER_2, PORT_MASTER_2) - master1.waitForReplInit(repl_agreement) - - # Check replication is working fine - if master1.testReplication(DEFAULT_SUFFIX, master2): - log.info('Replication is working.') - else: - log.fatal('Replication is not working.') - assert False - - def fin(): - master1.delete() - master2.delete() - request.addfinalizer(fin) - - # clear the tmp directory - master1.clearTmpDir(__file__) - - # Here we have two instances master and consumer - # with replication working. - return TopologyMaster1Master2(master1, master2) - - -def add_attr(topology, attr_name): - """Adds attribute to the schema""" - - ATTR_VALUE = """(NAME '%s' \ - DESC 'Attribute filteri-Multi-Valued' \ - SYNTAX 1.3.6.1.4.1.1466.115.121.1.27)""" % attr_name - mod = [(ldap.MOD_ADD, 'attributeTypes', ATTR_VALUE)] - - try: - topology.standalone.modify_s(DN_SCHEMA, mod) - except ldap.LDAPError as e: - log.fatal('Failed to add attr (%s): error (%s)' % (attr_name, - e.message['desc'])) - assert False - - -@pytest.fixture(params=["lang-ja", "binary", "phonetic"]) -def aci_with_attr_subtype(request, topology): - """Adds and deletes an ACI in the DEFAULT_SUFFIX""" - - TARGET_ATTR = 'protectedOperation' - USER_ATTR = 'allowedToPerform' - SUBTYPE = request.param - - log.info("========Executing test with '%s' subtype========" % SUBTYPE) - log.info(" Add a target attribute") - add_attr(topology, TARGET_ATTR) - - log.info(" Add a user attribute") - add_attr(topology, USER_ATTR) - - ACI_TARGET = '(targetattr=%s;%s)' % (TARGET_ATTR, SUBTYPE) - ACI_ALLOW = '(version 3.0; acl "test aci for subtypes"; allow (read) ' - ACI_SUBJECT = 'userattr = "%s;%s#GROUPDN";)' % (USER_ATTR, SUBTYPE) - ACI_BODY = ACI_TARGET + ACI_ALLOW + ACI_SUBJECT - - log.info(" Add an ACI with attribute subtype") - mod = [(ldap.MOD_ADD, 'aci', ACI_BODY)] - try: - topology.standalone.modify_s(DEFAULT_SUFFIX, mod) - except ldap.LDAPError as e: - log.fatal('Failed to add ACI: error (%s)' % (e.message['desc'])) - assert False - - def fin(): - log.info(" Finally, delete an ACI with the '%s' subtype" % - SUBTYPE) - mod = [(ldap.MOD_DELETE, 'aci', ACI_BODY)] - try: - topology.standalone.modify_s(DEFAULT_SUFFIX, mod) - except ldap.LDAPError as e: - log.fatal('Failed to delete ACI: error (%s)' % (e.message['desc'])) - assert False - request.addfinalizer(fin) - - return ACI_BODY - - -def test_aci_attr_subtype_targetattr(topology, aci_with_attr_subtype): - """Checks, that ACIs allow attribute subtypes in the targetattr keyword - - Test description: - 1. Define two attributes in the schema - - first will be a targetattr - - second will be a userattr - 2. Add an ACI with an attribute subtype - - or language subtype - - or binary subtype - - or pronunciation subtype - """ - - log.info(" Search for the added attribute") - try: - entries = topology.master1.search_s(DEFAULT_SUFFIX, - ldap.SCOPE_BASE, - '(objectclass=*)', ['aci']) - entry = str(entries[0]) - assert aci_with_attr_subtype in entry - log.info(" The added attribute was found") - - except ldap.LDAPError as e: - log.fatal('Search failed, error: ' + e.message['desc']) - assert False - - -def _bind_manager(topology): - topology.master1.log.info("Bind as %s " % DN_DM) - topology.master1.simple_bind_s(DN_DM, PASSWORD) - - -def _bind_normal(topology): - # bind as bind_entry - topology.master1.log.info("Bind as %s" % BIND_DN) - topology.master1.simple_bind_s(BIND_DN, BIND_PW) - - -def _moddn_aci_deny_tree(topology, mod_type=None, - target_from=STAGING_DN, target_to=PROD_EXCEPT_DN): - """It denies the access moddn_to in cn=except,cn=accounts,SUFFIX""" - - assert mod_type is not None - - ACI_TARGET_FROM = "" - ACI_TARGET_TO = "" - if target_from: - ACI_TARGET_FROM = "(target_from = \"ldap:///%s\")" % (target_from) - if target_to: - ACI_TARGET_TO = "(target_to = \"ldap:///%s\")" % (target_to) - - ACI_ALLOW = "(version 3.0; acl \"Deny MODDN to prod_except\"; deny (moddn)" - ACI_SUBJECT = " userdn = \"ldap:///%s\";)" % BIND_DN - ACI_BODY = ACI_TARGET_TO + ACI_TARGET_FROM + ACI_ALLOW + ACI_SUBJECT - mod = [(mod_type, 'aci', ACI_BODY)] - #topology.master1.modify_s(SUFFIX, mod) - topology.master1.log.info("Add a DENY aci under %s " % PROD_EXCEPT_DN) - topology.master1.modify_s(PROD_EXCEPT_DN, mod) - - -def _write_aci_staging(topology, mod_type=None): - assert mod_type is not None - - ACI_TARGET = "(targetattr= \"cn\")(target=\"ldap:///cn=*,%s\")" % STAGING_DN - ACI_ALLOW = "(version 3.0; acl \"write staging entries\"; allow (write)" - ACI_SUBJECT = " userdn = \"ldap:///%s\";)" % BIND_DN - ACI_BODY = ACI_TARGET + ACI_ALLOW + ACI_SUBJECT - mod = [(mod_type, 'aci', ACI_BODY)] - topology.master1.modify_s(SUFFIX, mod) - - -def _write_aci_production(topology, mod_type=None): - assert mod_type is not None - - ACI_TARGET = "(targetattr= \"cn\")(target=\"ldap:///cn=*,%s\")" % PRODUCTION_DN - ACI_ALLOW = "(version 3.0; acl \"write production entries\"; allow (write)" - ACI_SUBJECT = " userdn = \"ldap:///%s\";)" % BIND_DN - ACI_BODY = ACI_TARGET + ACI_ALLOW + ACI_SUBJECT - mod = [(mod_type, 'aci', ACI_BODY)] - topology.master1.modify_s(SUFFIX, mod) - - -def _moddn_aci_staging_to_production(topology, mod_type=None, - target_from=STAGING_DN, target_to=PRODUCTION_DN): - assert mod_type is not None - - - ACI_TARGET_FROM = "" - ACI_TARGET_TO = "" - if target_from: - ACI_TARGET_FROM = "(target_from = \"ldap:///%s\")" % (target_from) - if target_to: - ACI_TARGET_TO = "(target_to = \"ldap:///%s\")" % (target_to) - - ACI_ALLOW = "(version 3.0; acl \"MODDN from staging to production\"; allow (moddn)" - ACI_SUBJECT = " userdn = \"ldap:///%s\";)" % BIND_DN - ACI_BODY = ACI_TARGET_FROM + ACI_TARGET_TO + ACI_ALLOW + ACI_SUBJECT - mod = [(mod_type, 'aci', ACI_BODY)] - topology.master1.modify_s(SUFFIX, mod) - - _write_aci_staging(topology, mod_type=mod_type) - - -def _moddn_aci_from_production_to_staging(topology, mod_type=None): - assert mod_type is not None - - ACI_TARGET = "(target_from = \"ldap:///%s\") (target_to = \"ldap:///%s\")" % ( - PRODUCTION_DN, STAGING_DN) - ACI_ALLOW = "(version 3.0; acl \"MODDN from production to staging\"; allow (moddn)" - ACI_SUBJECT = " userdn = \"ldap:///%s\";)" % BIND_DN - ACI_BODY = ACI_TARGET + ACI_ALLOW + ACI_SUBJECT - mod = [(mod_type, 'aci', ACI_BODY)] - topology.master1.modify_s(SUFFIX, mod) - - _write_aci_production(topology, mod_type=mod_type) - - -@pytest.fixture(scope="module") -def moddn_setup(topology): - """Creates - - a staging DIT - - a production DIT - - add accounts in staging DIT - - enable ACL logging (commented for performance reason) - """ - - topology.master1.log.info("\n\n######## INITIALIZATION ########\n") - - # entry used to bind with - topology.master1.log.info("Add %s" % BIND_DN) - topology.master1.add_s(Entry((BIND_DN, { - 'objectclass': "top person".split(), - 'sn': BIND_CN, - 'cn': BIND_CN, - 'userpassword': BIND_PW}))) - - # DIT for staging - topology.master1.log.info("Add %s" % STAGING_DN) - topology.master1.add_s(Entry((STAGING_DN, { - 'objectclass': "top organizationalRole".split(), - 'cn': STAGING_CN, - 'description': "staging DIT"}))) - - # DIT for production - topology.master1.log.info("Add %s" % PRODUCTION_DN) - topology.master1.add_s(Entry((PRODUCTION_DN, { - 'objectclass': "top organizationalRole".split(), - 'cn': PRODUCTION_CN, - 'description': "production DIT"}))) - - # DIT for production/except - topology.master1.log.info("Add %s" % PROD_EXCEPT_DN) - topology.master1.add_s(Entry((PROD_EXCEPT_DN, { - 'objectclass': "top organizationalRole".split(), - 'cn': EXCEPT_CN, - 'description': "production except DIT"}))) - - # enable acl error logging - #mod = [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', '128')] - #topology.master1.modify_s(DN_CONFIG, mod) - #topology.master2.modify_s(DN_CONFIG, mod) - - # add dummy entries in the staging DIT - for cpt in range(MAX_ACCOUNTS): - name = "%s%d" % (NEW_ACCOUNT, cpt) - topology.master1.add_s(Entry(("cn=%s,%s" % (name, STAGING_DN), { - 'objectclass': "top person".split(), - 'sn': name, - 'cn': name}))) - - -def test_mode_default_add_deny(topology, moddn_setup): - """This test case checks - that the ADD operation fails (no ADD aci on production) - """ - - topology.master1.log.info("\n\n######## mode moddn_aci : ADD (should fail) ########\n") - - _bind_normal(topology) - - # - # First try to add an entry in production => INSUFFICIENT_ACCESS - # - try: - topology.master1.log.info("Try to add %s" % PRODUCTION_DN) - name = "%s%d" % (NEW_ACCOUNT, 0) - topology.master1.add_s(Entry(("cn=%s,%s" % (name, PRODUCTION_DN), { - 'objectclass': "top person".split(), - 'sn': name, - 'cn': name}))) - assert 0 # this is an error, we should not be allowed to add an entry in production - except Exception as e: - topology.master1.log.info("Exception (expected): %s" % type(e).__name__) - assert isinstance(e, ldap.INSUFFICIENT_ACCESS) - - -def test_mode_default_delete_deny(topology, moddn_setup): - """This test case checks - that the DEL operation fails (no 'delete' aci on production) - """ - - topology.master1.log.info("\n\n######## DELETE (should fail) ########\n") - - _bind_normal(topology) - # - # Second try to delete an entry in staging => INSUFFICIENT_ACCESS - # - try: - topology.master1.log.info("Try to delete %s" % STAGING_DN) - name = "%s%d" % (NEW_ACCOUNT, 0) - topology.master1.delete_s("cn=%s,%s" % (name, STAGING_DN)) - assert 0 # this is an error, we should not be allowed to add an entry in production - except Exception as e: - topology.master1.log.info("Exception (expected): %s" % type(e).__name__) - assert isinstance(e, ldap.INSUFFICIENT_ACCESS) - - -@pytest.mark.parametrize("index,tfrom,tto,failure", - [(0, STAGING_DN, PRODUCTION_DN, False), - (1, STAGING_DN, PRODUCTION_DN, False), - (2, STAGING_DN, BAD_PRODUCTION_PATTERN, True), - (3, STAGING_PATTERN, PRODUCTION_DN, False), - (4, BAD_STAGING_PATTERN, PRODUCTION_DN, True), - (5, STAGING_PATTERN, PRODUCTION_PATTERN, False), - (6, None, PRODUCTION_PATTERN, False), - (7, STAGING_PATTERN, None, False), - (8, None, None, False)]) -def test_moddn_staging_prod(topology, moddn_setup, - index, tfrom, tto, failure): - """This test case MOVE entry NEW_ACCOUNT0 from staging to prod - target_to/target_from: equality filter - """ - - topology.master1.log.info("\n\n######## MOVE staging -> Prod (%s) ########\n" % index) - _bind_normal(topology) - - old_rdn = "cn=%s%s" % (NEW_ACCOUNT, index) - old_dn = "%s,%s" % (old_rdn, STAGING_DN) - new_rdn = old_rdn - new_superior = PRODUCTION_DN - - # - # Try to rename without the apropriate ACI => INSUFFICIENT_ACCESS - # - try: - topology.master1.log.info("Try to MODDN %s -> %s,%s" % (old_dn, new_rdn, new_superior)) - topology.master1.rename_s(old_dn, new_rdn, newsuperior=new_superior) - assert 0 - except AssertionError: - topology.master1.log.info("Exception (not really expected exception but that is fine as it fails to rename)") - except Exception as e: - topology.master1.log.info("Exception (expected): %s" % type(e).__name__) - assert isinstance(e, ldap.INSUFFICIENT_ACCESS) - - - # successfull MOD with the ACI - topology.master1.log.info("\n\n######## MOVE to and from equality filter ########\n") - _bind_manager(topology) - _moddn_aci_staging_to_production(topology, mod_type=ldap.MOD_ADD, - target_from=tfrom, target_to=tto) - _bind_normal(topology) - - try: - topology.master1.log.info("Try to MODDN %s -> %s,%s" % (old_dn, new_rdn, new_superior)) - topology.master1.rename_s(old_dn, new_rdn, newsuperior=new_superior) - except Exception as e: - topology.master1.log.info("Exception (expected): %s" % type(e).__name__) - if failure: - assert isinstance(e, ldap.INSUFFICIENT_ACCESS) - - # successfull MOD with the both ACI - _bind_manager(topology) - _moddn_aci_staging_to_production(topology, mod_type=ldap.MOD_DELETE, - target_from=tfrom, target_to=tto) - _bind_normal(topology) - - -def test_moddn_staging_prod_9(topology, moddn_setup): - """This test case disable the 'moddn' right so a MODDN requires a 'add' right - to be successfull. - It fails to MOVE entry NEW_ACCOUNT9 from staging to prod. - Add a 'add' right to prod. - Then it succeeds to MOVE NEW_ACCOUNT9 from staging to prod. - - Then enable the 'moddn' right so a MODDN requires a 'moddn' right - It fails to MOVE entry NEW_ACCOUNT10 from staging to prod. - Add a 'moddn' right to prod. - Then it succeeds to MOVE NEW_ACCOUNT10 from staging to prod. - """ - - topology.master1.log.info("\n\n######## MOVE staging -> Prod (9) ########\n") - - _bind_normal(topology) - old_rdn = "cn=%s9" % NEW_ACCOUNT - old_dn = "%s,%s" % (old_rdn, STAGING_DN) - new_rdn = old_rdn - new_superior = PRODUCTION_DN - - # - # Try to rename without the apropriate ACI => INSUFFICIENT_ACCESS - # - try: - topology.master1.log.info("Try to MODDN %s -> %s,%s" % (old_dn, new_rdn, new_superior)) - topology.master1.rename_s(old_dn, new_rdn, newsuperior=new_superior) - assert 0 - except AssertionError: - topology.master1.log.info("Exception (not really expected exception but that is fine as it fails to rename)") - except Exception as e: - topology.master1.log.info("Exception (expected): %s" % type(e).__name__) - assert isinstance(e, ldap.INSUFFICIENT_ACCESS) - - ############# - # Now do tests with no support of moddn aci - ############# - topology.master1.log.info("Disable the moddn right") - _bind_manager(topology) - mod = [(ldap.MOD_REPLACE, CONFIG_MODDN_ACI_ATTR, 'off')] - topology.master1.modify_s(DN_CONFIG, mod) - - # Add the moddn aci that will not be evaluated because of the config flag - topology.master1.log.info("\n\n######## MOVE to and from equality filter ########\n") - _bind_manager(topology) - _moddn_aci_staging_to_production(topology, mod_type=ldap.MOD_ADD, - target_from=STAGING_DN, target_to=PRODUCTION_DN) - _bind_normal(topology) - - # It will fail because it will test the ADD right - try: - topology.master1.log.info("Try to MODDN %s -> %s,%s" % (old_dn, new_rdn, new_superior)) - topology.master1.rename_s(old_dn, new_rdn, newsuperior=new_superior) - assert 0 - except AssertionError: - topology.master1.log.info("Exception (not really expected exception but that is fine as it fails to rename)") - except Exception as e: - topology.master1.log.info("Exception (expected): %s" % type(e).__name__) - assert isinstance(e, ldap.INSUFFICIENT_ACCESS) - - # remove the moddn aci - _bind_manager(topology) - _moddn_aci_staging_to_production(topology, mod_type=ldap.MOD_DELETE, - target_from=STAGING_DN, target_to=PRODUCTION_DN) - _bind_normal(topology) - - # - # add the 'add' right to the production DN - # Then do a successfull moddn - # - ACI_ALLOW = "(version 3.0; acl \"ADD rights to allow moddn\"; allow (add)" - ACI_SUBJECT = " userdn = \"ldap:///%s\";)" % BIND_DN - ACI_BODY = ACI_ALLOW + ACI_SUBJECT - - _bind_manager(topology) - mod = [(ldap.MOD_ADD, 'aci', ACI_BODY)] - topology.master1.modify_s(PRODUCTION_DN, mod) - _write_aci_staging(topology, mod_type=ldap.MOD_ADD) - _bind_normal(topology) - - topology.master1.log.info("Try to MODDN %s -> %s,%s" % (old_dn, new_rdn, new_superior)) - topology.master1.rename_s(old_dn, new_rdn, newsuperior=new_superior) - - _bind_manager(topology) - mod = [(ldap.MOD_DELETE, 'aci', ACI_BODY)] - topology.master1.modify_s(PRODUCTION_DN, mod) - _write_aci_staging(topology, mod_type=ldap.MOD_DELETE) - _bind_normal(topology) - - ############# - # Now do tests with support of moddn aci - ############# - topology.master1.log.info("Enable the moddn right") - _bind_manager(topology) - mod = [(ldap.MOD_REPLACE, CONFIG_MODDN_ACI_ATTR, 'on')] - topology.master1.modify_s(DN_CONFIG, mod) - - topology.master1.log.info("\n\n######## MOVE staging -> Prod (10) ########\n") - - _bind_normal(topology) - old_rdn = "cn=%s10" % NEW_ACCOUNT - old_dn = "%s,%s" % (old_rdn, STAGING_DN) - new_rdn = old_rdn - new_superior = PRODUCTION_DN - - # - # Try to rename without the apropriate ACI => INSUFFICIENT_ACCESS - # - try: - topology.master1.log.info("Try to MODDN %s -> %s,%s" % (old_dn, new_rdn, new_superior)) - topology.master1.rename_s(old_dn, new_rdn, newsuperior=new_superior) - assert 0 - except AssertionError: - topology.master1.log.info("Exception (not really expected exception but that is fine as it fails to rename)") - except Exception as e: - topology.master1.log.info("Exception (expected): %s" % type(e).__name__) - assert isinstance(e, ldap.INSUFFICIENT_ACCESS) - - # - # add the 'add' right to the production DN - # Then do a failing moddn - # - ACI_ALLOW = "(version 3.0; acl \"ADD rights to allow moddn\"; allow (add)" - ACI_SUBJECT = " userdn = \"ldap:///%s\";)" % BIND_DN - ACI_BODY = ACI_ALLOW + ACI_SUBJECT - - _bind_manager(topology) - mod = [(ldap.MOD_ADD, 'aci', ACI_BODY)] - topology.master1.modify_s(PRODUCTION_DN, mod) - _write_aci_staging(topology, mod_type=ldap.MOD_ADD) - _bind_normal(topology) - - try: - topology.master1.log.info("Try to MODDN %s -> %s,%s" % (old_dn, new_rdn, new_superior)) - topology.master1.rename_s(old_dn, new_rdn, newsuperior=new_superior) - assert 0 - except AssertionError: - topology.master1.log.info("Exception (not really expected exception but that is fine as it fails to rename)") - except Exception as e: - topology.master1.log.info("Exception (expected): %s" % type(e).__name__) - assert isinstance(e, ldap.INSUFFICIENT_ACCESS) - - _bind_manager(topology) - mod = [(ldap.MOD_DELETE, 'aci', ACI_BODY)] - topology.master1.modify_s(PRODUCTION_DN, mod) - _write_aci_staging(topology, mod_type=ldap.MOD_DELETE) - _bind_normal(topology) - - # Add the moddn aci that will be evaluated because of the config flag - topology.master1.log.info("\n\n######## MOVE to and from equality filter ########\n") - _bind_manager(topology) - _moddn_aci_staging_to_production(topology, mod_type=ldap.MOD_ADD, - target_from=STAGING_DN, target_to=PRODUCTION_DN) - _bind_normal(topology) - - topology.master1.log.info("Try to MODDN %s -> %s,%s" % (old_dn, new_rdn, new_superior)) - topology.master1.rename_s(old_dn, new_rdn, newsuperior=new_superior) - - # remove the moddn aci - _bind_manager(topology) - _moddn_aci_staging_to_production(topology, mod_type=ldap.MOD_DELETE, - target_from=STAGING_DN, target_to=PRODUCTION_DN) - _bind_normal(topology) - - -def test_moddn_prod_staging(topology, moddn_setup): - """This test checks that we can move ACCOUNT11 from staging to prod - but not move back ACCOUNT11 from prod to staging - """ - - topology.master1.log.info("\n\n######## MOVE staging -> Prod (11) ########\n") - - _bind_normal(topology) - - old_rdn = "cn=%s11" % NEW_ACCOUNT - old_dn = "%s,%s" % (old_rdn, STAGING_DN) - new_rdn = old_rdn - new_superior = PRODUCTION_DN - - # - # Try to rename without the apropriate ACI => INSUFFICIENT_ACCESS - # - try: - topology.master1.log.info("Try to MODDN %s -> %s,%s" % (old_dn, new_rdn, new_superior)) - topology.master1.rename_s(old_dn, new_rdn, newsuperior=new_superior) - assert 0 - except AssertionError: - topology.master1.log.info("Exception (not really expected exception but that is fine as it fails to rename)") - except Exception as e: - topology.master1.log.info("Exception (expected): %s" % type(e).__name__) - assert isinstance(e, ldap.INSUFFICIENT_ACCESS) - - # successfull MOD with the ACI - topology.master1.log.info("\n\n######## MOVE to and from equality filter ########\n") - _bind_manager(topology) - _moddn_aci_staging_to_production(topology, mod_type=ldap.MOD_ADD, - target_from=STAGING_DN, target_to=PRODUCTION_DN) - _bind_normal(topology) - - topology.master1.log.info("Try to MODDN %s -> %s,%s" % (old_dn, new_rdn, new_superior)) - topology.master1.rename_s(old_dn, new_rdn, newsuperior=new_superior) - - # Now check we can not move back the entry to staging - old_rdn = "cn=%s11" % NEW_ACCOUNT - old_dn = "%s,%s" % (old_rdn, PRODUCTION_DN) - new_rdn = old_rdn - new_superior = STAGING_DN - - # add the write right because we want to check the moddn - _bind_manager(topology) - _write_aci_production(topology, mod_type=ldap.MOD_ADD) - _bind_normal(topology) - - try: - topology.master1.log.info("Try to move back MODDN %s -> %s,%s" % (old_dn, new_rdn, new_superior)) - topology.master1.rename_s(old_dn, new_rdn, newsuperior=new_superior) - assert 0 - except AssertionError: - topology.master1.log.info("Exception (not really expected exception but that is fine as it fails to rename)") - except Exception as e: - topology.master1.log.info("Exception (expected): %s" % type(e).__name__) - assert isinstance(e, ldap.INSUFFICIENT_ACCESS) - - _bind_manager(topology) - _write_aci_production(topology, mod_type=ldap.MOD_DELETE) - _bind_normal(topology) - - # successfull MOD with the both ACI - _bind_manager(topology) - _moddn_aci_staging_to_production(topology, mod_type=ldap.MOD_DELETE, - target_from=STAGING_DN, target_to=PRODUCTION_DN) - _bind_normal(topology) - - -def test_check_repl_M2_to_M1(topology, moddn_setup): - """Checks that replication is still working M2->M1, using ACCOUNT12""" - - topology.master1.log.info("Bind as %s (M2)" % DN_DM) - topology.master2.simple_bind_s(DN_DM, PASSWORD) - - rdn = "cn=%s12" % NEW_ACCOUNT - dn = "%s,%s" % (rdn, STAGING_DN) - - # First wait for the ACCOUNT19 entry being replicated on M2 - loop = 0 - while loop <= 10: - try: - ent = topology.master2.getEntry(dn, ldap.SCOPE_BASE, "(objectclass=*)") - break - except ldap.NO_SUCH_OBJECT: - time.sleep(1) - loop += 1 - assert loop <= 10 - - attribute = 'description' - tested_value = 'Hello world' - mod = [(ldap.MOD_ADD, attribute, tested_value)] - topology.master1.log.info("Update (M2) %s (%s)" % (dn, attribute)) - topology.master2.modify_s(dn, mod) - - loop = 0 - while loop <= 10: - ent = topology.master1.getEntry(dn, ldap.SCOPE_BASE, "(objectclass=*)") - assert ent is not None - if ent.hasAttr(attribute) and (ent.getValue(attribute) == tested_value): - break - - time.sleep(1) - loop += 1 - assert loop < 10 - topology.master1.log.info("Update %s (%s) replicated on M1" % (dn, attribute)) - - -def test_moddn_staging_prod_except(topology, moddn_setup): - """This test case MOVE entry NEW_ACCOUNT13 from staging to prod - but fails to move entry NEW_ACCOUNT14 from staging to prod_except - """ - - topology.master1.log.info("\n\n######## MOVE staging -> Prod (13) ########\n") - _bind_normal(topology) - - old_rdn = "cn=%s13" % NEW_ACCOUNT - old_dn = "%s,%s" % (old_rdn, STAGING_DN) - new_rdn = old_rdn - new_superior = PRODUCTION_DN - - # - # Try to rename without the apropriate ACI => INSUFFICIENT_ACCESS - # - try: - topology.master1.log.info("Try to MODDN %s -> %s,%s" % (old_dn, new_rdn, new_superior)) - topology.master1.rename_s(old_dn, new_rdn, newsuperior=new_superior) - assert 0 - except AssertionError: - topology.master1.log.info("Exception (not really expected exception but that is fine as it fails to rename)") - except Exception as e: - topology.master1.log.info("Exception (expected): %s" % type(e).__name__) - assert isinstance(e, ldap.INSUFFICIENT_ACCESS) - - # successfull MOD with the ACI - topology.master1.log.info("\n\n######## MOVE to and from equality filter ########\n") - _bind_manager(topology) - _moddn_aci_staging_to_production(topology, mod_type=ldap.MOD_ADD, - target_from=STAGING_DN, target_to=PRODUCTION_DN) - _moddn_aci_deny_tree(topology, mod_type=ldap.MOD_ADD) - _bind_normal(topology) - - topology.master1.log.info("Try to MODDN %s -> %s,%s" % (old_dn, new_rdn, new_superior)) - topology.master1.rename_s(old_dn, new_rdn, newsuperior=new_superior) - - # - # Now try to move an entry under except - # - topology.master1.log.info("\n\n######## MOVE staging -> Prod/Except (14) ########\n") - old_rdn = "cn=%s14" % NEW_ACCOUNT - old_dn = "%s,%s" % (old_rdn, STAGING_DN) - new_rdn = old_rdn - new_superior = PROD_EXCEPT_DN - try: - topology.master1.log.info("Try to MODDN %s -> %s,%s" % (old_dn, new_rdn, new_superior)) - topology.master1.rename_s(old_dn, new_rdn, newsuperior=new_superior) - assert 0 - except AssertionError: - topology.master1.log.info("Exception (not really expected exception but that is fine as it fails to rename)") - except Exception as e: - topology.master1.log.info("Exception (expected): %s" % type(e).__name__) - assert isinstance(e, ldap.INSUFFICIENT_ACCESS) - - # successfull MOD with the both ACI - _bind_manager(topology) - _moddn_aci_staging_to_production(topology, mod_type=ldap.MOD_DELETE, - target_from=STAGING_DN, target_to=PRODUCTION_DN) - _moddn_aci_deny_tree(topology, mod_type=ldap.MOD_DELETE) - _bind_normal(topology) - - -def test_mode_default_ger_no_moddn(topology, moddn_setup): - topology.master1.log.info("\n\n######## mode moddn_aci : GER no moddn ########\n") - request_ctrl = GetEffectiveRightsControl(criticality=True, authzId="dn: " + BIND_DN) - msg_id = topology.master1.search_ext(PRODUCTION_DN, - ldap.SCOPE_SUBTREE, - "objectclass=*", - serverctrls=[request_ctrl]) - rtype, rdata, rmsgid, response_ctrl = topology.master1.result3(msg_id) - #ger={} - value = '' - for dn, attrs in rdata: - topology.master1.log.info("dn: %s" % dn) - value = attrs['entryLevelRights'][0] - - topology.master1.log.info("######## entryLevelRights: %r" % value) - assert 'n' not in value - - -def test_mode_default_ger_with_moddn(topology, moddn_setup): - """This test case adds the moddn aci and check ger contains 'n'""" - - topology.master1.log.info("\n\n######## mode moddn_aci: GER with moddn ########\n") - - # successfull MOD with the ACI - _bind_manager(topology) - _moddn_aci_staging_to_production(topology, mod_type=ldap.MOD_ADD, - target_from=STAGING_DN, target_to=PRODUCTION_DN) - _bind_normal(topology) - - request_ctrl = GetEffectiveRightsControl(criticality=True, authzId="dn: " + BIND_DN) - msg_id = topology.master1.search_ext(PRODUCTION_DN, - ldap.SCOPE_SUBTREE, - "objectclass=*", - serverctrls=[request_ctrl]) - rtype, rdata, rmsgid, response_ctrl = topology.master1.result3(msg_id) - #ger={} - value = '' - for dn, attrs in rdata: - topology.master1.log.info("dn: %s" % dn) - value = attrs['entryLevelRights'][0] - - topology.master1.log.info("######## entryLevelRights: %r" % value) - assert 'n' in value - - # successfull MOD with the both ACI - _bind_manager(topology) - _moddn_aci_staging_to_production(topology, mod_type=ldap.MOD_DELETE, - target_from=STAGING_DN, target_to=PRODUCTION_DN) - _bind_normal(topology) - - -def test_mode_switch_default_to_legacy(topology, moddn_setup): - """This test switch the server from default mode to legacy""" - - topology.master1.log.info("\n\n######## Disable the moddn aci mod ########\n") - _bind_manager(topology) - mod = [(ldap.MOD_REPLACE, CONFIG_MODDN_ACI_ATTR, 'off')] - topology.master1.modify_s(DN_CONFIG, mod) - - -def test_mode_legacy_ger_no_moddn1(topology, moddn_setup): - topology.master1.log.info("\n\n######## mode legacy 1: GER no moddn ########\n") - request_ctrl = GetEffectiveRightsControl(criticality=True, authzId="dn: " + BIND_DN) - msg_id = topology.master1.search_ext(PRODUCTION_DN, - ldap.SCOPE_SUBTREE, - "objectclass=*", - serverctrls=[request_ctrl]) - rtype, rdata, rmsgid, response_ctrl = topology.master1.result3(msg_id) - #ger={} - value = '' - for dn, attrs in rdata: - topology.master1.log.info("dn: %s" % dn) - value = attrs['entryLevelRights'][0] - - topology.master1.log.info("######## entryLevelRights: %r" % value) - assert 'n' not in value - - -def test_mode_legacy_ger_no_moddn2(topology, moddn_setup): - topology.master1.log.info("\n\n######## mode legacy 2: GER no moddn ########\n") - # successfull MOD with the ACI - _bind_manager(topology) - _moddn_aci_staging_to_production(topology, mod_type=ldap.MOD_ADD, - target_from=STAGING_DN, target_to=PRODUCTION_DN) - _bind_normal(topology) - - request_ctrl = GetEffectiveRightsControl(criticality=True, authzId="dn: " + BIND_DN) - msg_id = topology.master1.search_ext(PRODUCTION_DN, - ldap.SCOPE_SUBTREE, - "objectclass=*", - serverctrls=[request_ctrl]) - rtype, rdata, rmsgid, response_ctrl = topology.master1.result3(msg_id) - #ger={} - value = '' - for dn, attrs in rdata: - topology.master1.log.info("dn: %s" % dn) - value = attrs['entryLevelRights'][0] - - topology.master1.log.info("######## entryLevelRights: %r" % value) - assert 'n' not in value - - # successfull MOD with the both ACI - _bind_manager(topology) - _moddn_aci_staging_to_production(topology, mod_type=ldap.MOD_DELETE, - target_from=STAGING_DN, target_to=PRODUCTION_DN) - _bind_normal(topology) - - -def test_mode_legacy_ger_with_moddn(topology, moddn_setup): - topology.master1.log.info("\n\n######## mode legacy : GER with moddn ########\n") - - # being allowed to read/write the RDN attribute use to allow the RDN - ACI_TARGET = "(target = \"ldap:///%s\")(targetattr=\"cn\")" % (PRODUCTION_DN) - ACI_ALLOW = "(version 3.0; acl \"MODDN production changing the RDN attribute\"; allow (read,search,write)" - ACI_SUBJECT = " userdn = \"ldap:///%s\";)" % BIND_DN - ACI_BODY = ACI_TARGET + ACI_ALLOW + ACI_SUBJECT - - # successfull MOD with the ACI - _bind_manager(topology) - mod = [(ldap.MOD_ADD, 'aci', ACI_BODY)] - topology.master1.modify_s(SUFFIX, mod) - _bind_normal(topology) - - request_ctrl = GetEffectiveRightsControl(criticality=True, authzId="dn: " + BIND_DN) - msg_id = topology.master1.search_ext(PRODUCTION_DN, - ldap.SCOPE_SUBTREE, - "objectclass=*", - serverctrls=[request_ctrl]) - rtype, rdata, rmsgid, response_ctrl = topology.master1.result3(msg_id) - #ger={} - value = '' - for dn, attrs in rdata: - topology.master1.log.info("dn: %s" % dn) - value = attrs['entryLevelRights'][0] - - topology.master1.log.info("######## entryLevelRights: %r" % value) - assert 'n' in value - - # successfull MOD with the both ACI - _bind_manager(topology) - mod = [(ldap.MOD_DELETE, 'aci', ACI_BODY)] - topology.master1.modify_s(SUFFIX, mod) - #_bind_normal(topology) - - -@pytest.fixture(scope="module") -def rdn_write_setup(topology): - topology.master1.log.info("\n\n######## Add entry tuser ########\n") - topology.master1.add_s(Entry((SRC_ENTRY_DN, { - 'objectclass': "top person".split(), - 'sn': SRC_ENTRY_CN, - 'cn': SRC_ENTRY_CN}))) - - -def test_rdn_write_get_ger(topology, rdn_write_setup): - ANONYMOUS_DN = "" - topology.master1.log.info("\n\n######## GER rights for anonymous ########\n") - request_ctrl = GetEffectiveRightsControl(criticality=True, - authzId="dn:" + ANONYMOUS_DN) - msg_id = topology.master1.search_ext(SUFFIX, - ldap.SCOPE_SUBTREE, - "objectclass=*", - serverctrls=[request_ctrl]) - rtype, rdata, rmsgid, response_ctrl = topology.master1.result3(msg_id) - value = '' - for dn, attrs in rdata: - topology.master1.log.info("dn: %s" % dn) - for value in attrs['entryLevelRights']: - topology.master1.log.info("######## entryLevelRights: %r" % value) - assert 'n' not in value - - -def test_rdn_write_modrdn_anonymous(topology, rdn_write_setup): - ANONYMOUS_DN = "" - topology.master1.close() - topology.master1.binddn = ANONYMOUS_DN - topology.master1.open() - msg_id = topology.master1.search_ext("", ldap.SCOPE_BASE, "objectclass=*") - rtype, rdata, rmsgid, response_ctrl = topology.master1.result3(msg_id) - for dn, attrs in rdata: - topology.master1.log.info("dn: %s" % dn) - for attr in attrs: - topology.master1.log.info("######## %r: %r" % (attr, attrs[attr])) - - try: - topology.master1.rename_s(SRC_ENTRY_DN, "cn=%s" % DST_ENTRY_CN, delold=True) - except Exception as e: - topology.master1.log.info("Exception (expected): %s" % type(e).__name__) - isinstance(e, ldap.INSUFFICIENT_ACCESS) - - try: - topology.master1.getEntry(DST_ENTRY_DN, ldap.SCOPE_BASE, "objectclass=*") - assert False - except Exception as e: - topology.master1.log.info("The entry was not renamed (expected)") - isinstance(e, ldap.NO_SUCH_OBJECT) - - _bind_manager(topology) - - -if __name__ == '__main__': - # Run isolated - # -s for DEBUG mode - CURRENT_FILE = os.path.realpath(__file__) - pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/suites/attr_encryption/attr_encrypt_test.py b/dirsrvtests/suites/attr_encryption/attr_encrypt_test.py deleted file mode 100644 index 7d14a76..0000000 --- a/dirsrvtests/suites/attr_encryption/attr_encrypt_test.py +++ /dev/null @@ -1,93 +0,0 @@ -# --- BEGIN COPYRIGHT BLOCK --- -# Copyright (C) 2015 Red Hat, Inc. -# All rights reserved. -# -# License: GPL (version 3 or any later version). -# See LICENSE for details. -# --- END COPYRIGHT BLOCK --- -# -import os -import sys -import time -import ldap -import logging -import pytest -from lib389 import DirSrv, Entry, tools, tasks -from lib389.tools import DirSrvTools -from lib389._constants import * -from lib389.properties import * -from lib389.tasks import * -from lib389.utils import * - -logging.getLogger(__name__).setLevel(logging.DEBUG) -log = logging.getLogger(__name__) - -installation1_prefix = None - - -class TopologyStandalone(object): - def __init__(self, standalone): - standalone.open() - self.standalone = standalone - - -@pytest.fixture(scope="module") -def topology(request): - global installation1_prefix - if installation1_prefix: - args_instance[SER_DEPLOYED_DIR] = installation1_prefix - - # Creating standalone instance ... - standalone = DirSrv(verbose=False) - args_instance[SER_HOST] = HOST_STANDALONE - args_instance[SER_PORT] = PORT_STANDALONE - args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE - args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX - args_standalone = args_instance.copy() - standalone.allocate(args_standalone) - instance_standalone = standalone.exists() - if instance_standalone: - standalone.delete() - standalone.create() - standalone.open() - - # Clear out the tmp dir - standalone.clearTmpDir(__file__) - - return TopologyStandalone(standalone) - - -def test_attr_encrypt_init(topology): - ''' - Write any test suite initialization here(if needed) - ''' - - return - - -def test_attr_encrypt_(topology): - ''' - Write a single test here... - ''' - - return - - -def test_attr_encrypt_final(topology): - topology.standalone.delete() - log.info('attr_encrypt test suite PASSED') - - -def run_isolated(): - global installation1_prefix - installation1_prefix = None - - topo = topology(True) - test_attr_encrypt_init(topo) - test_attr_encrypt_(topo) - test_attr_encrypt_final(topo) - - -if __name__ == '__main__': - run_isolated() - diff --git a/dirsrvtests/suites/attr_uniqueness_plugin/attr_uniqueness_test.py b/dirsrvtests/suites/attr_uniqueness_plugin/attr_uniqueness_test.py deleted file mode 100644 index 06e7425..0000000 --- a/dirsrvtests/suites/attr_uniqueness_plugin/attr_uniqueness_test.py +++ /dev/null @@ -1,248 +0,0 @@ -# --- BEGIN COPYRIGHT BLOCK --- -# Copyright (C) 2015 Red Hat, Inc. -# All rights reserved. -# -# License: GPL (version 3 or any later version). -# See LICENSE for details. -# --- END COPYRIGHT BLOCK --- -# -import os -import sys -import time -import ldap -import logging -import pytest -from lib389 import DirSrv, Entry, tools, tasks -from lib389.tools import DirSrvTools -from lib389._constants import * -from lib389.properties import * -from lib389.tasks import * -from lib389.utils import * - -logging.getLogger(__name__).setLevel(logging.DEBUG) -log = logging.getLogger(__name__) -USER1_DN = 'uid=user1,' + DEFAULT_SUFFIX -USER2_DN = 'uid=user2,' + DEFAULT_SUFFIX -installation1_prefix = None - - -class TopologyStandalone(object): - def __init__(self, standalone): - standalone.open() - self.standalone = standalone - - -@pytest.fixture(scope="module") -def topology(request): - global installation1_prefix - if installation1_prefix: - args_instance[SER_DEPLOYED_DIR] = installation1_prefix - - # Creating standalone instance ... - standalone = DirSrv(verbose=False) - args_instance[SER_HOST] = HOST_STANDALONE - args_instance[SER_PORT] = PORT_STANDALONE - args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE - args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX - args_standalone = args_instance.copy() - standalone.allocate(args_standalone) - instance_standalone = standalone.exists() - if instance_standalone: - standalone.delete() - standalone.create() - standalone.open() - - # Clear out the tmp dir - standalone.clearTmpDir(__file__) - - return TopologyStandalone(standalone) - - -def test_attr_uniqueness_init(topology): - ''' - Enable dynamic plugins - makes things easier - ''' - try: - topology.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-dynamic-plugins', 'on')]) - except ldap.LDAPError as e: - ldap.fatal('Failed to enable dynamic plugin!' + e.message['desc']) - assert False - - topology.standalone.plugins.enable(name=PLUGIN_ATTR_UNIQUENESS) - - -def test_attr_uniqueness(topology): - log.info('Running test_attr_uniqueness...') - - # - # Configure plugin - # - try: - topology.standalone.modify_s('cn=' + PLUGIN_ATTR_UNIQUENESS + ',cn=plugins,cn=config', - [(ldap.MOD_REPLACE, 'uniqueness-attribute-name', 'uid')]) - - except ldap.LDAPError as e: - log.fatal('test_attr_uniqueness: Failed to configure plugin for "uid": error ' + e.message['desc']) - assert False - - # Add an entry - try: - topology.standalone.add_s(Entry((USER1_DN, {'objectclass': "top extensibleObject".split(), - 'sn': '1', - 'cn': 'user 1', - 'uid': 'user1', - 'mail': 'user1@example.com', - 'mailAlternateAddress': 'user1@alt.example.com', - 'userpassword': 'password'}))) - except ldap.LDAPError as e: - log.fatal('test_attr_uniqueness: Failed to add test user' + USER1_DN + ': error ' + e.message['desc']) - assert False - - # Add an entry with a duplicate "uid" - try: - topology.standalone.add_s(Entry((USER2_DN, {'objectclass': "top extensibleObject".split(), - 'sn': '2', - 'cn': 'user 2', - 'uid': 'user2', - 'uid': 'user1', - 'userpassword': 'password'}))) - except ldap.CONSTRAINT_VIOLATION: - pass - else: - log.fatal('test_attr_uniqueness: Adding of 2nd entry(uid) incorrectly succeeded') - assert False - - # - # Change config to use "mail" instead of "uid" - # - try: - topology.standalone.modify_s('cn=' + PLUGIN_ATTR_UNIQUENESS + ',cn=plugins,cn=config', - [(ldap.MOD_REPLACE, 'uniqueness-attribute-name', 'mail')]) - - except ldap.LDAPError as e: - log.fatal('test_attr_uniqueness: Failed to configure plugin for "mail": error ' + e.message['desc']) - assert False - - # - # Test plugin - Add an entry, that has a duplicate "mail" value - # - try: - topology.standalone.add_s(Entry((USER2_DN, {'objectclass': "top extensibleObject".split(), - 'sn': '2', - 'cn': 'user 2', - 'uid': 'user2', - 'mail': 'user1@example.com', - 'userpassword': 'password'}))) - except ldap.CONSTRAINT_VIOLATION: - pass - else: - log.fatal('test_attr_uniqueness: Adding of 2nd entry(mail) incorrectly succeeded') - assert False - - # - # Reconfigure plugin for mail and mailAlternateAddress - # - try: - topology.standalone.modify_s('cn=' + PLUGIN_ATTR_UNIQUENESS + ',cn=plugins,cn=config', - [(ldap.MOD_REPLACE, 'uniqueness-attribute-name', 'mail'), - (ldap.MOD_ADD, 'uniqueness-attribute-name', - 'mailAlternateAddress')]) - except ldap.LDAPError as e: - log.error('test_attr_uniqueness: Failed to reconfigure plugin for "mail mailAlternateAddress": error ' + - e.message['desc']) - assert False - - # - # Test plugin - Add an entry, that has a duplicate "mail" value - # - try: - topology.standalone.add_s(Entry((USER2_DN, {'objectclass': "top extensibleObject".split(), - 'sn': '2', - 'cn': 'user 2', - 'uid': 'user2', - 'mail': 'user1@example.com', - 'userpassword': 'password'}))) - except ldap.CONSTRAINT_VIOLATION: - pass - else: - log.error('test_attr_uniqueness: Adding of 3rd entry(mail) incorrectly succeeded') - assert False - - # - # Test plugin - Add an entry, that has a duplicate "mailAlternateAddress" value - # - try: - topology.standalone.add_s(Entry((USER2_DN, {'objectclass': "top extensibleObject".split(), - 'sn': '2', - 'cn': 'user 2', - 'uid': 'user2', - 'mailAlternateAddress': 'user1@alt.example.com', - 'userpassword': 'password'}))) - except ldap.CONSTRAINT_VIOLATION: - pass - else: - log.error('test_attr_uniqueness: Adding of 4th entry(mailAlternateAddress) incorrectly succeeded') - assert False - - # - # Test plugin - Add an entry, that has a duplicate "mail" value conflicting mailAlternateAddress - # - try: - topology.standalone.add_s(Entry((USER2_DN, {'objectclass': "top extensibleObject".split(), - 'sn': '2', - 'cn': 'user 2', - 'uid': 'user2', - 'mail': 'user1@alt.example.com', - 'userpassword': 'password'}))) - except ldap.CONSTRAINT_VIOLATION: - pass - else: - log.error('test_attr_uniqueness: Adding of 5th entry(mailAlternateAddress) incorrectly succeeded') - assert False - - # - # Test plugin - Add an entry, that has a duplicate "mailAlternateAddress" conflicting mail - # - try: - topology.standalone.add_s(Entry((USER2_DN, {'objectclass': "top extensibleObject".split(), - 'sn': '2', - 'cn': 'user 2', - 'uid': 'user2', - 'mailAlternateAddress': 'user1@example.com', - 'userpassword': 'password'}))) - except ldap.CONSTRAINT_VIOLATION: - pass - else: - log.error('test_attr_uniqueness: Adding of 6th entry(mail) incorrectly succeeded') - assert False - - # - # Cleanup - # - try: - topology.standalone.delete_s(USER1_DN) - except ldap.LDAPError as e: - log.fatal('test_attr_uniqueness: Failed to delete test entry: ' + e.message['desc']) - assert False - - log.info('test_attr_uniqueness: PASS\n') - - -def test_attr_uniqueness_final(topology): - topology.standalone.delete() - log.info('attr_uniqueness test suite PASSED') - - -def run_isolated(): - global installation1_prefix - installation1_prefix = None - - topo = topology(True) - test_attr_uniqueness_init(topo) - test_attr_uniqueness(topo) - test_attr_uniqueness_final(topo) - - -if __name__ == '__main__': - run_isolated() - diff --git a/dirsrvtests/suites/automember_plugin/automember_test.py b/dirsrvtests/suites/automember_plugin/automember_test.py deleted file mode 100644 index 3e5f020..0000000 --- a/dirsrvtests/suites/automember_plugin/automember_test.py +++ /dev/null @@ -1,93 +0,0 @@ -# --- BEGIN COPYRIGHT BLOCK --- -# Copyright (C) 2015 Red Hat, Inc. -# All rights reserved. -# -# License: GPL (version 3 or any later version). -# See LICENSE for details. -# --- END COPYRIGHT BLOCK --- -# -import os -import sys -import time -import ldap -import logging -import pytest -from lib389 import DirSrv, Entry, tools, tasks -from lib389.tools import DirSrvTools -from lib389._constants import * -from lib389.properties import * -from lib389.tasks import * -from lib389.utils import * - -logging.getLogger(__name__).setLevel(logging.DEBUG) -log = logging.getLogger(__name__) - -installation1_prefix = None - - -class TopologyStandalone(object): - def __init__(self, standalone): - standalone.open() - self.standalone = standalone - - -@pytest.fixture(scope="module") -def topology(request): - global installation1_prefix - if installation1_prefix: - args_instance[SER_DEPLOYED_DIR] = installation1_prefix - - # Creating standalone instance ... - standalone = DirSrv(verbose=False) - args_instance[SER_HOST] = HOST_STANDALONE - args_instance[SER_PORT] = PORT_STANDALONE - args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE - args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX - args_standalone = args_instance.copy() - standalone.allocate(args_standalone) - instance_standalone = standalone.exists() - if instance_standalone: - standalone.delete() - standalone.create() - standalone.open() - - # Clear out the tmp dir - standalone.clearTmpDir(__file__) - - return TopologyStandalone(standalone) - - -def test_automember_init(topology): - ''' - Write any test suite initialization here(if needed) - ''' - - return - - -def test_automember_(topology): - ''' - Write a single test here... - ''' - - return - - -def test_automember_final(topology): - topology.standalone.delete() - log.info('automember test suite PASSED') - - -def run_isolated(): - global installation1_prefix - installation1_prefix = None - - topo = topology(True) - test_automember_init(topo) - test_automember_(topo) - test_automember_final(topo) - - -if __name__ == '__main__': - run_isolated() - diff --git a/dirsrvtests/suites/basic/basic_test.py b/dirsrvtests/suites/basic/basic_test.py deleted file mode 100644 index d2f81ff..0000000 --- a/dirsrvtests/suites/basic/basic_test.py +++ /dev/null @@ -1,775 +0,0 @@ -# --- BEGIN COPYRIGHT BLOCK --- -# Copyright (C) 2015 Red Hat, Inc. -# All rights reserved. -# -# License: GPL (version 3 or any later version). -# See LICENSE for details. -# --- END COPYRIGHT BLOCK --- -# -import os -import sys -import time -import ldap -import ldap.sasl -import logging -import pytest -import shutil -from subprocess import check_output -from lib389 import DirSrv, Entry, tools, tasks -from lib389.tools import DirSrvTools -from lib389._constants import * -from lib389.properties import * -from lib389.tasks import * -from lib389.utils import * - -log = logging.getLogger(__name__) - -installation_prefix = None - -# Globals -USER1_DN = 'uid=user1,' + DEFAULT_SUFFIX -USER2_DN = 'uid=user2,' + DEFAULT_SUFFIX -USER3_DN = 'uid=user3,' + DEFAULT_SUFFIX - -ROOTDSE_DEF_ATTR_LIST = ('namingContexts', - 'supportedLDAPVersion', - 'supportedControl', - 'supportedExtension', - 'supportedSASLMechanisms', - 'vendorName', - 'vendorVersion') - -class TopologyStandalone(object): - def __init__(self, standalone): - standalone.open() - self.standalone = standalone - - -@pytest.fixture(scope="module") -def topology(request): - """This fixture is used to standalone topology for the 'module'.""" - - global installation_prefix - - if installation_prefix: - args_instance[SER_DEPLOYED_DIR] = installation_prefix - - standalone = DirSrv(verbose=False) - - # Args for the standalone instance - args_instance[SER_HOST] = HOST_STANDALONE - args_instance[SER_PORT] = PORT_STANDALONE - args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE - args_standalone = args_instance.copy() - standalone.allocate(args_standalone) - - # Get the status of the instance and restart it if it exists - instance_standalone = standalone.exists() - - # Remove the instance - if instance_standalone: - standalone.delete() - - # Create the instance - standalone.create() - - # Used to retrieve configuration information (dbdir, confdir...) - standalone.open() - - # Delete each instance in the end - def fin(): - standalone.delete() - request.addfinalizer(fin) - - # clear the tmp directory - standalone.clearTmpDir(__file__) - - # Here we have standalone instance up and running - return TopologyStandalone(standalone) - - -@pytest.fixture(scope="module") -def import_example_ldif(topology): - """Import the Example LDIF for the tests in this suite""" - - log.info('Initializing the "basic" test suite') - - import_ldif = '%s/Example.ldif' % get_data_dir(topology.standalone.prefix) - try: - topology.standalone.tasks.importLDIF(suffix=DEFAULT_SUFFIX, - input_file=import_ldif, - args={TASK_WAIT: True}) - except ValueError: - log.error('Online import failed') - assert False - - -@pytest.fixture(params=ROOTDSE_DEF_ATTR_LIST) -def rootdse_attr(topology, request): - """Adds an attr from the list - as the default attr to the rootDSE - """ - - RETURN_DEFAULT_OPATTR = "nsslapd-return-default-opattr" - rootdse_attr_name = request.param - - log.info(" Add the %s: %s to rootdse" % (RETURN_DEFAULT_OPATTR, - rootdse_attr_name)) - mod = [(ldap.MOD_ADD, RETURN_DEFAULT_OPATTR, rootdse_attr_name)] - try: - topology.standalone.modify_s("", mod) - except ldap.LDAPError as e: - log.fatal('Failed to add attr: error (%s)' % (e.message['desc'])) - assert False - - def fin(): - log.info(" Delete the %s: %s from rootdse" % (RETURN_DEFAULT_OPATTR, - rootdse_attr_name)) - mod = [(ldap.MOD_DELETE, RETURN_DEFAULT_OPATTR, rootdse_attr_name)] - try: - topology.standalone.modify_s("", mod) - except ldap.LDAPError as e: - log.fatal('Failed to delete attr: error (%s)' % (e.message['desc'])) - assert False - request.addfinalizer(fin) - - return rootdse_attr_name - - -def test_basic_ops(topology, import_example_ldif): - """Test doing adds, mods, modrdns, and deletes""" - - log.info('Running test_basic_ops...') - - USER1_NEWDN = 'cn=user1' - USER2_NEWDN = 'cn=user2' - USER3_NEWDN = 'cn=user3' - NEW_SUPERIOR = 'ou=people,' + DEFAULT_SUFFIX - USER1_RDN_DN = 'cn=user1,' + DEFAULT_SUFFIX - USER2_RDN_DN = 'cn=user2,' + DEFAULT_SUFFIX - USER3_RDN_DN = 'cn=user3,' + NEW_SUPERIOR # New superior test - - # - # Adds - # - try: - topology.standalone.add_s(Entry((USER1_DN, - {'objectclass': "top extensibleObject".split(), - 'sn': '1', - 'cn': 'user1', - 'uid': 'user1', - 'userpassword': 'password'}))) - except ldap.LDAPError as e: - log.error('Failed to add test user' + USER1_DN + ': error ' + e.message['desc']) - assert False - - try: - topology.standalone.add_s(Entry((USER2_DN, - {'objectclass': "top extensibleObject".split(), - 'sn': '2', - 'cn': 'user2', - 'uid': 'user2', - 'userpassword': 'password'}))) - except ldap.LDAPError as e: - log.error('Failed to add test user' + USER2_DN + ': error ' + e.message['desc']) - assert False - - try: - topology.standalone.add_s(Entry((USER3_DN, - {'objectclass': "top extensibleObject".split(), - 'sn': '3', - 'cn': 'user3', - 'uid': 'user3', - 'userpassword': 'password'}))) - except ldap.LDAPError as e: - log.error('Failed to add test user' + USER3_DN + ': error ' + e.message['desc']) - assert False - - # - # Mods - # - try: - topology.standalone.modify_s(USER1_DN, [(ldap.MOD_ADD, 'description', - 'New description')]) - except ldap.LDAPError as e: - log.error('Failed to add description: error ' + e.message['desc']) - assert False - - try: - topology.standalone.modify_s(USER1_DN, [(ldap.MOD_REPLACE, 'description', - 'Modified description')]) - except ldap.LDAPError as e: - log.error('Failed to modify description: error ' + e.message['desc']) - assert False - - try: - topology.standalone.modify_s(USER1_DN, [(ldap.MOD_DELETE, 'description', - None)]) - except ldap.LDAPError as e: - log.error('Failed to delete description: error ' + e.message['desc']) - assert False - - # - # Modrdns - # - try: - topology.standalone.rename_s(USER1_DN, USER1_NEWDN, delold=1) - except ldap.LDAPError as e: - log.error('Failed to modrdn user1: error ' + e.message['desc']) - assert False - - try: - topology.standalone.rename_s(USER2_DN, USER2_NEWDN, delold=0) - except ldap.LDAPError as e: - log.error('Failed to modrdn user2: error ' + e.message['desc']) - assert False - - # Modrdn - New superior - try: - topology.standalone.rename_s(USER3_DN, USER3_NEWDN, - newsuperior=NEW_SUPERIOR, delold=1) - except ldap.LDAPError as e: - log.error('Failed to modrdn(new superior) user3: error ' + e.message['desc']) - assert False - - # - # Deletes - # - try: - topology.standalone.delete_s(USER1_RDN_DN) - except ldap.LDAPError as e: - log.error('Failed to delete test entry1: ' + e.message['desc']) - assert False - - try: - topology.standalone.delete_s(USER2_RDN_DN) - except ldap.LDAPError as e: - log.error('Failed to delete test entry2: ' + e.message['desc']) - assert False - - try: - topology.standalone.delete_s(USER3_RDN_DN) - except ldap.LDAPError as e: - log.error('Failed to delete test entry3: ' + e.message['desc']) - assert False - - log.info('test_basic_ops: PASSED') - - -def test_basic_import_export(topology, import_example_ldif): - """Test online and offline LDIF imports & exports""" - - log.info('Running test_basic_import_export...') - - tmp_dir = topology.standalone.getDir(__file__, TMP_DIR) - - # - # Test online/offline LDIF imports - # - - # Generate a test ldif (50k entries) - import_ldif = tmp_dir + '/basic_import.ldif' - try: - topology.standalone.buildLDIF(50000, import_ldif) - except OSError as e: - log.fatal('test_basic_import_export: failed to create test ldif,\ - error: %s - %s' % (e.errno, e.strerror)) - assert False - - # Online - try: - topology.standalone.tasks.importLDIF(suffix=DEFAULT_SUFFIX, - input_file=import_ldif, - args={TASK_WAIT: True}) - except ValueError: - log.fatal('test_basic_import_export: Online import failed') - assert False - - # Offline - if not topology.standalone.ldif2db(DEFAULT_BENAME, None, None, None, import_ldif): - log.fatal('test_basic_import_export: Offline import failed') - assert False - - # - # Test online and offline LDIF export - # - - # Online export - export_ldif = tmp_dir + 'export.ldif' - exportTask = Tasks(topology.standalone) - try: - args = {TASK_WAIT: True} - exportTask.exportLDIF(DEFAULT_SUFFIX, None, export_ldif, args) - except ValueError: - log.fatal('test_basic_import_export: Online export failed') - assert False - - # Offline export - if not topology.standalone.db2ldif(DEFAULT_BENAME, (DEFAULT_SUFFIX,), - None, None, None, export_ldif): - log.fatal('test_basic_import_export: Failed to run offline db2ldif') - assert False - - # - # Cleanup - Import the Example LDIF for the other tests in this suite - # - import_ldif = '%s/Example.ldif' % get_data_dir(topology.standalone.prefix) - try: - topology.standalone.tasks.importLDIF(suffix=DEFAULT_SUFFIX, - input_file=import_ldif, - args={TASK_WAIT: True}) - except ValueError: - log.fatal('test_basic_import_export: Online import failed') - assert False - - log.info('test_basic_import_export: PASSED') - - -def test_basic_backup(topology, import_example_ldif): - """Test online and offline back and restore""" - - log.info('Running test_basic_backup...') - - backup_dir = '%sbasic_backup/' % topology.standalone.getDir(__file__, TMP_DIR) - - # Test online backup - try: - topology.standalone.tasks.db2bak(backup_dir=backup_dir, - args={TASK_WAIT: True}) - except ValueError: - log.fatal('test_basic_backup: Online backup failed') - assert False - - # Test online restore - try: - topology.standalone.tasks.bak2db(backup_dir=backup_dir, - args={TASK_WAIT: True}) - except ValueError: - log.fatal('test_basic_backup: Online restore failed') - assert False - - # Test offline backup - if not topology.standalone.db2bak(backup_dir): - log.fatal('test_basic_backup: Offline backup failed') - assert False - - # Test offline restore - if not topology.standalone.bak2db(backup_dir): - log.fatal('test_basic_backup: Offline backup failed') - assert False - - log.info('test_basic_backup: PASSED') - - -def test_basic_acl(topology, import_example_ldif): - """Run some basic access control(ACL) tests""" - - log.info('Running test_basic_acl...') - - DENY_ACI = ('(targetattr = "*") (version 3.0;acl "deny user";deny (all)' + - '(userdn = "ldap:///' + USER1_DN + '");)') - - # - # Add two users - # - try: - topology.standalone.add_s(Entry((USER1_DN, - {'objectclass': "top extensibleObject".split(), - 'sn': '1', - 'cn': 'user 1', - 'uid': 'user1', - 'userpassword': PASSWORD}))) - except ldap.LDAPError as e: - log.fatal('test_basic_acl: Failed to add test user ' + USER1_DN - + ': error ' + e.message['desc']) - assert False - - try: - topology.standalone.add_s(Entry((USER2_DN, - {'objectclass': "top extensibleObject".split(), - 'sn': '2', - 'cn': 'user 2', - 'uid': 'user2', - 'userpassword': PASSWORD}))) - except ldap.LDAPError as e: - log.fatal('test_basic_acl: Failed to add test user ' + USER1_DN - + ': error ' + e.message['desc']) - assert False - - # - # Add an aci that denies USER1 from doing anything, - # and also set the default anonymous access - # - try: - topology.standalone.modify_s(DEFAULT_SUFFIX, [(ldap.MOD_ADD, 'aci', DENY_ACI)]) - except ldap.LDAPError as e: - log.fatal('test_basic_acl: Failed to add DENY ACI: error ' + e.message['desc']) - assert False - - # - # Make sure USER1_DN can not search anything, but USER2_dn can... - # - try: - topology.standalone.simple_bind_s(USER1_DN, PASSWORD) - except ldap.LDAPError as e: - log.fatal('test_basic_acl: Failed to bind as user1, error: ' + e.message['desc']) - assert False - - try: - entries = topology.standalone.search_s(DEFAULT_SUFFIX, - ldap.SCOPE_SUBTREE, - '(uid=*)') - if entries: - log.fatal('test_basic_acl: User1 was incorrectly able to search the suffix!') - assert False - except ldap.LDAPError as e: - log.fatal('test_basic_acl: Search suffix failed(as user1): ' + e.message['desc']) - assert False - - # Now try user2... Also check that userpassword is stripped out - try: - topology.standalone.simple_bind_s(USER2_DN, PASSWORD) - except ldap.LDAPError as e: - log.fatal('test_basic_acl: Failed to bind as user2, error: ' + e.message['desc']) - assert False - - try: - entries = topology.standalone.search_s(DEFAULT_SUFFIX, - ldap.SCOPE_SUBTREE, - '(uid=user1)') - if not entries: - log.fatal('test_basic_acl: User1 incorrectly not able to search the suffix') - assert False - if entries[0].hasAttr('userpassword'): - # The default anonymous access aci should have stripped out userpassword - log.fatal('test_basic_acl: User2 was incorrectly able to see userpassword') - assert False - except ldap.LDAPError as e: - log.fatal('test_basic_acl: Search for user1 failed(as user2): ' + e.message['desc']) - assert False - - # Make sure Root DN can also search (this also resets the bind dn to the - # Root DN for future operations) - try: - topology.standalone.simple_bind_s(DN_DM, PW_DM) - except ldap.LDAPError as e: - log.fatal('test_basic_acl: Failed to bind as ROotDN, error: ' + e.message['desc']) - assert False - - try: - entries = topology.standalone.search_s(DEFAULT_SUFFIX, - ldap.SCOPE_SUBTREE, - '(uid=*)') - if not entries: - log.fatal('test_basic_acl: Root DN incorrectly not able to search the suffix') - assert False - except ldap.LDAPError as e: - log.fatal('test_basic_acl: Search for user1 failed(as user2): ' + e.message['desc']) - assert False - - # - # Cleanup - # - try: - topology.standalone.modify_s(DEFAULT_SUFFIX, [(ldap.MOD_DELETE, 'aci', DENY_ACI)]) - except ldap.LDAPError as e: - log.fatal('test_basic_acl: Failed to delete DENY ACI: error ' + e.message['desc']) - assert False - - try: - topology.standalone.delete_s(USER1_DN) - except ldap.LDAPError as e: - log.fatal('test_basic_acl: Failed to delete test entry1: ' + e.message['desc']) - assert False - - try: - topology.standalone.delete_s(USER2_DN) - except ldap.LDAPError as e: - log.fatal('test_basic_acl: Failed to delete test entry2: ' + e.message['desc']) - assert False - - log.info('test_basic_acl: PASSED') - - -def test_basic_searches(topology, import_example_ldif): - """The search results are gathered from testing with Example.ldif""" - - log.info('Running test_basic_searches...') - - filters = (('(uid=scarter)', 1), - ('(uid=tmorris*)', 1), - ('(uid=*hunt*)', 4), - ('(uid=*cope)', 2), - ('(mail=*)', 150), - ('(roomnumber>=4000)', 35), - ('(roomnumber<=4000)', 115), - ('(&(roomnumber>=4000)(roomnumber<=4500))', 18), - ('(!(l=sunnyvale))', 120), - ('(&(uid=t*)(l=santa clara))', 7), - ('(|(uid=k*)(uid=r*))', 18), - ('(|(uid=t*)(l=sunnyvale))', 50), - ('(&(!(uid=r*))(ou=people))', 139), - ('(&(uid=m*)(l=sunnyvale)(ou=people)(mail=*example*)(roomNumber=*))', 3), - ('(&(|(uid=m*)(l=santa clara))(roomNumber=22*))', 5), - ('(&(|(uid=m*)(l=santa clara))(roomNumber=22*)(!(roomnumber=2254)))', 4)) - - for (search_filter, search_result) in filters: - try: - entries = topology.standalone.search_s(DEFAULT_SUFFIX, - ldap.SCOPE_SUBTREE, - search_filter) - if len(entries) != search_result: - log.fatal('test_basic_searches: An incorrect number of entries\ - was returned from filter (%s): (%d) expected (%d)' % - (search_filter, len(entries), search_result)) - assert False - except ldap.LDAPError as e: - log.fatal('Search failed: ' + e.message['desc']) - assert False - - log.info('test_basic_searches: PASSED') - - -def test_basic_referrals(topology, import_example_ldif): - """Set the server to referral mode, - and make sure we recive the referal error(10) - """ - - log.info('Running test_basic_referrals...') - - SUFFIX_CONFIG = 'cn="dc=example,dc=com",cn=mapping tree,cn=config' - - # - # Set the referral, adn the backend state - # - try: - topology.standalone.modify_s(SUFFIX_CONFIG, - [(ldap.MOD_REPLACE, - 'nsslapd-referral', - 'ldap://localhost.localdomain:389/o%3dnetscaperoot')]) - except ldap.LDAPError as e: - log.fatal('test_basic_referrals: Failed to set referral: error ' + e.message['desc']) - assert False - - try: - topology.standalone.modify_s(SUFFIX_CONFIG, [(ldap.MOD_REPLACE, - 'nsslapd-state', 'Referral')]) - except ldap.LDAPError as e: - log.fatal('test_basic_referrals: Failed to set backend state: error ' - + e.message['desc']) - assert False - - # - # Test that a referral error is returned - # - topology.standalone.set_option(ldap.OPT_REFERRALS, 0) # Do not follow referral - try: - topology.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, 'objectclass=top') - except ldap.REFERRAL: - pass - except ldap.LDAPError as e: - log.fatal('test_basic_referrals: Search failed: ' + e.message['desc']) - assert False - - # - # Make sure server can restart in referral mode - # - topology.standalone.restart(timeout=10) - - # - # Cleanup - # - try: - topology.standalone.modify_s(SUFFIX_CONFIG, [(ldap.MOD_REPLACE, - 'nsslapd-state', 'Backend')]) - except ldap.LDAPError as e: - log.fatal('test_basic_referrals: Failed to set backend state: error ' - + e.message['desc']) - assert False - - try: - topology.standalone.modify_s(SUFFIX_CONFIG, [(ldap.MOD_DELETE, - 'nsslapd-referral', None)]) - except ldap.LDAPError as e: - log.fatal('test_basic_referrals: Failed to delete referral: error ' - + e.message['desc']) - assert False - topology.standalone.set_option(ldap.OPT_REFERRALS, 1) - - log.info('test_basic_referrals: PASSED') - - -def test_basic_systemctl(topology, import_example_ldif): - """Test systemctl can stop and start the server. Also test that start reports an - error when the instance does not start. Only for RPM builds - """ - - log.info('Running test_basic_systemctl...') - - # We can only use systemctl on RPM installations - if topology.standalone.prefix and topology.standalone.prefix != '/': - return - - data_dir = topology.standalone.getDir(__file__, DATA_DIR) - tmp_dir = topology.standalone.getDir(__file__, TMP_DIR) - config_dir = topology.standalone.confdir - start_ds = 'sudo systemctl start dirsrv@' + topology.standalone.serverid + '.service' - stop_ds = 'sudo systemctl stop dirsrv@' + topology.standalone.serverid + '.service' - is_running = 'sudo systemctl is-active dirsrv@' + topology.standalone.serverid + '.service' - - # - # Stop the server - # - log.info('Stopping the server...') - rc = os.system(stop_ds) - log.info('Check the status...') - if rc != 0 or os.system(is_running) == 0: - log.fatal('test_basic_systemctl: Failed to stop the server') - assert False - log.info('Stopped the server.') - - # - # Start the server - # - log.info('Starting the server...') - rc = os.system(start_ds) - log.info('Check the status...') - if rc != 0 or os.system(is_running) != 0: - log.fatal('test_basic_systemctl: Failed to start the server') - assert False - log.info('Started the server.') - - # - # Stop the server, break the dse.ldif so a start fails, - # and verify that systemctl detects the failed start - # - log.info('Stopping the server...') - rc = os.system(stop_ds) - log.info('Check the status...') - if rc != 0 or os.system(is_running) == 0: - log.fatal('test_basic_systemctl: Failed to stop the server') - assert False - log.info('Stopped the server before breaking the dse.ldif.') - - shutil.copy(config_dir + '/dse.ldif', tmp_dir) - shutil.copy(data_dir + 'basic/dse.ldif.broken', config_dir + '/dse.ldif') - - log.info('Attempting to start the server with broken dse.ldif...') - rc = os.system(start_ds) - log.info('Check the status...') - if rc == 0 or os.system(is_running) == 0: - log.fatal('test_basic_systemctl: The server incorrectly started') - assert False - log.info('Server failed to start as expected') - time.sleep(5) - - # - # Fix the dse.ldif, and make sure the server starts up, - # and systemctl correctly identifies the successful start - # - shutil.copy(tmp_dir + 'dse.ldif', config_dir) - log.info('Starting the server with good dse.ldif...') - rc = os.system(start_ds) - time.sleep(5) - log.info('Check the status...') - if rc != 0 or os.system(is_running) != 0: - log.fatal('test_basic_systemctl: Failed to start the server') - assert False - log.info('Server started after fixing dse.ldif.') - time.sleep(1) - - log.info('test_basic_systemctl: PASSED') - - -def test_basic_ldapagent(topology, import_example_ldif): - """Test that the ldap agent starts""" - - log.info('Running test_basic_ldapagent...') - - tmp_dir = topology.standalone.getDir(__file__, TMP_DIR) - var_dir = topology.standalone.prefix + '/var' - config_file = tmp_dir + '/agent.conf' - cmd = 'sudo %s/ldap-agent %s' % (get_sbin_dir(prefix=topology.standalone.prefix), - config_file) - - agent_config_file = open(config_file, 'w') - agent_config_file.write('agentx-master ' + var_dir + '/agentx/master\n') - agent_config_file.write('agent-logdir ' + var_dir + '/log/dirsrv\n') - agent_config_file.write('server slapd-' + topology.standalone.serverid + '\n') - agent_config_file.close() - - rc = os.system(cmd) - if rc != 0: - log.fatal('test_basic_ldapagent: Failed to start snmp ldap agent: error %d' % rc) - assert False - - log.info('snmp ldap agent started') - - # - # Cleanup - kill the agent - # - pid = check_output(['pidof', '-s', 'ldap-agent-bin']) - log.info('Cleanup - killing agent: ' + pid) - rc = os.system('sudo kill -9 ' + pid) - - log.info('test_basic_ldapagent: PASSED') - - -def test_basic_dse(topology, import_example_ldif): - """Test that the dse.ldif is not wipped out - after the process is killed (bug 910581) - """ - - log.info('Running test_basic_dse...') - - dse_file = topology.standalone.confdir + '/dse.ldif' - pid = check_output(['pidof', '-s', 'ns-slapd']) - os.system('sudo kill -9 ' + pid) - if os.path.getsize(dse_file) == 0: - log.fatal('test_basic_dse: dse.ldif\'s content was incorrectly removed!') - assert False - - topology.standalone.start(timeout=10) - log.info('dse.ldif was not corrupted, and the server was restarted') - - log.info('test_basic_dse: PASSED') - - -@pytest.mark.parametrize("rootdse_attr_name", ROOTDSE_DEF_ATTR_LIST) -def test_def_rootdse_attr(topology, import_example_ldif, rootdse_attr_name): - """Tests that operational attributes - are not returned by default in rootDSE searches - """ - - log.info(" Assert rootdse search hasn't %s attr" % rootdse_attr_name) - try: - entries = topology.standalone.search_s("", ldap.SCOPE_BASE) - entry = str(entries[0]) - assert rootdse_attr_name not in entry - - except ldap.LDAPError as e: - log.fatal('Search failed, error: ' + e.message['desc']) - assert False - - -def test_mod_def_rootdse_attr(topology, import_example_ldif, rootdse_attr): - """Tests that operational attributes are returned - by default in rootDSE searches after config modification - """ - - log.info(" Assert rootdse search has %s attr" % rootdse_attr) - try: - entries = topology.standalone.search_s("", ldap.SCOPE_BASE) - entry = str(entries[0]) - assert rootdse_attr in entry - - except ldap.LDAPError as e: - log.fatal('Search failed, error: ' + e.message['desc']) - assert False - - -if __name__ == '__main__': - # Run isolated - # -s for DEBUG mode - CURRENT_FILE = os.path.realpath(__file__) - pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/suites/betxns/betxn_test.py b/dirsrvtests/suites/betxns/betxn_test.py deleted file mode 100644 index 7bef791..0000000 --- a/dirsrvtests/suites/betxns/betxn_test.py +++ /dev/null @@ -1,258 +0,0 @@ -# --- BEGIN COPYRIGHT BLOCK --- -# Copyright (C) 2015 Red Hat, Inc. -# All rights reserved. -# -# License: GPL (version 3 or any later version). -# See LICENSE for details. -# --- END COPYRIGHT BLOCK --- -# -import os -import sys -import time -import ldap -import logging -import pytest -import six -from lib389 import DirSrv, Entry, tools, tasks -from lib389.tools import DirSrvTools -from lib389._constants import * -from lib389.properties import * -from lib389.tasks import * -from lib389.utils import * -logging.getLogger(__name__).setLevel(logging.DEBUG) -log = logging.getLogger(__name__) - -installation1_prefix = None - - -class TopologyStandalone(object): - def __init__(self, standalone): - standalone.open() - self.standalone = standalone - - -@pytest.fixture(scope="module") -def topology(request): - global installation1_prefix - if installation1_prefix: - args_instance[SER_DEPLOYED_DIR] = installation1_prefix - - # Creating standalone instance ... - standalone = DirSrv(verbose=False) - args_instance[SER_HOST] = HOST_STANDALONE - args_instance[SER_PORT] = PORT_STANDALONE - args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE - args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX - args_standalone = args_instance.copy() - standalone.allocate(args_standalone) - instance_standalone = standalone.exists() - if instance_standalone: - standalone.delete() - standalone.create() - standalone.open() - - # Clear out the tmp dir - standalone.clearTmpDir(__file__) - - return TopologyStandalone(standalone) - - -def test_betxn_init(topology): - # First enable dynamic plugins - makes plugin testing much easier - try: - topology.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-dynamic-plugins', 'on')]) - except ldap.LDAPError as e: - ldap.error('Failed to enable dynamic plugin!' + e.message['desc']) - assert False - - -def test_betxt_7bit(topology): - ''' - Test that the 7-bit plugin correctly rejects an invlaid update - ''' - - log.info('Running test_betxt_7bit...') - - USER_DN = 'uid=test_entry,' + DEFAULT_SUFFIX - eight_bit_rdn = six.u('uid=Fu\u00c4\u00e8') - BAD_RDN = eight_bit_rdn.encode('utf-8') - - # This plugin should on by default, but just in case... - topology.standalone.plugins.enable(name=PLUGIN_7_BIT_CHECK) - - # Add our test user - try: - topology.standalone.add_s(Entry((USER_DN, {'objectclass': "top extensibleObject".split(), - 'sn': '1', - 'cn': 'test 1', - 'uid': 'test_entry', - 'userpassword': 'password'}))) - except ldap.LDAPError as e: - log.error('Failed to add test user' + USER_DN + ': error ' + e.message['desc']) - assert False - - # Attempt a modrdn, this should fail - try: - topology.standalone.rename_s(USER_DN, BAD_RDN, delold=0) - log.fatal('test_betxt_7bit: Modrdn operation incorrectly succeeded') - assert False - except ldap.LDAPError as e: - log.info('Modrdn failed as expected: error ' + e.message['desc']) - - # Make sure the operation did not succeed, attempt to search for the new RDN - try: - entries = topology.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, BAD_RDN) - if entries: - log.fatal('test_betxt_7bit: Incorrectly found the entry using the invalid RDN') - assert False - except ldap.LDAPError as e: - log.fatal('Error whiles earching for test entry: ' + e.message['desc']) - assert False - - # - # Cleanup - remove the user - # - try: - topology.standalone.delete_s(USER_DN) - except ldap.LDAPError as e: - log.fatal('Failed to delete test entry: ' + e.message['desc']) - assert False - - log.info('test_betxt_7bit: PASSED') - - -def test_betxn_attr_uniqueness(topology): - ''' - Test that we can not add two entries that have the same attr value that is - defined by the plugin. - ''' - - log.info('Running test_betxn_attr_uniqueness...') - - USER1_DN = 'uid=test_entry1,' + DEFAULT_SUFFIX - USER2_DN = 'uid=test_entry2,' + DEFAULT_SUFFIX - - topology.standalone.plugins.enable(name=PLUGIN_ATTR_UNIQUENESS) - - # Add the first entry - try: - topology.standalone.add_s(Entry((USER1_DN, {'objectclass': "top extensibleObject".split(), - 'sn': '1', - 'cn': 'test 1', - 'uid': 'test_entry1', - 'userpassword': 'password1'}))) - except ldap.LDAPError as e: - log.fatal('test_betxn_attr_uniqueness: Failed to add test user: ' + - USER1_DN + ', error ' + e.message['desc']) - assert False - - # Add the second entry with a dupliate uid - try: - topology.standalone.add_s(Entry((USER2_DN, {'objectclass': "top extensibleObject".split(), - 'sn': '2', - 'cn': 'test 2', - 'uid': 'test_entry2', - 'uid': 'test_entry1', # Duplicate value - 'userpassword': 'password2'}))) - log.fatal('test_betxn_attr_uniqueness: The second entry was incorrectly added.') - assert False - except ldap.LDAPError as e: - log.error('test_betxn_attr_uniqueness: Failed to add test user as expected: ' + - USER1_DN + ', error ' + e.message['desc']) - - # - # Cleanup - disable plugin, remove test entry - # - topology.standalone.plugins.disable(name=PLUGIN_ATTR_UNIQUENESS) - - try: - topology.standalone.delete_s(USER1_DN) - except ldap.LDAPError as e: - log.fatal('test_betxn_attr_uniqueness: Failed to delete test entry1: ' + - e.message['desc']) - assert False - - log.info('test_betxn_attr_uniqueness: PASSED') - - -def test_betxn_memberof(topology): - ENTRY1_DN = 'cn=group1,' + DEFAULT_SUFFIX - ENTRY2_DN = 'cn=group2,' + DEFAULT_SUFFIX - PLUGIN_DN = 'cn=' + PLUGIN_MEMBER_OF + ',cn=plugins,cn=config' - - # Enable and configure memberOf plugin - topology.standalone.plugins.enable(name=PLUGIN_MEMBER_OF) - try: - topology.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, 'memberofgroupattr', 'member')]) - except ldap.LDAPError as e: - log.fatal('test_betxn_memberof: Failed to update config(member): error ' + e.message['desc']) - assert False - - # Add our test entries - try: - topology.standalone.add_s(Entry((ENTRY1_DN, {'objectclass': "top groupofnames".split(), - 'cn': 'group1'}))) - except ldap.LDAPError as e: - log.error('test_betxn_memberof: Failed to add group1:' + - ENTRY1_DN + ', error ' + e.message['desc']) - assert False - - try: - topology.standalone.add_s(Entry((ENTRY2_DN, {'objectclass': "top groupofnames".split(), - 'cn': 'group1'}))) - except ldap.LDAPError as e: - log.error('test_betxn_memberof: Failed to add group2:' + - ENTRY2_DN + ', error ' + e.message['desc']) - assert False - - # - # Test mod replace - # - - # Add group2 to group1 - it should fail with objectclass violation - try: - topology.standalone.modify_s(ENTRY1_DN, [(ldap.MOD_REPLACE, 'member', ENTRY2_DN)]) - log.fatal('test_betxn_memberof: Group2 was incorrectly allowed to be added to group1') - assert False - except ldap.LDAPError as e: - log.info('test_betxn_memberof: Group2 was correctly rejected (mod replace): error ' + e.message['desc']) - - # - # Test mod add - # - - # Add group2 to group1 - it should fail with objectclass violation - try: - topology.standalone.modify_s(ENTRY1_DN, [(ldap.MOD_ADD, 'member', ENTRY2_DN)]) - log.fatal('test_betxn_memberof: Group2 was incorrectly allowed to be added to group1') - assert False - except ldap.LDAPError as e: - log.info('test_betxn_memberof: Group2 was correctly rejected (mod add): error ' + e.message['desc']) - - # - # Done - # - - log.info('test_betxn_memberof: PASSED') - - -def test_betxn_final(topology): - topology.standalone.delete() - log.info('betxn test suite PASSED') - - -def run_isolated(): - global installation1_prefix - installation1_prefix = None - - topo = topology(True) - test_betxn_init(topo) - test_betxt_7bit(topo) - test_betxn_attr_uniqueness(topo) - test_betxn_memberof(topo) - test_betxn_final(topo) - - -if __name__ == '__main__': - run_isolated() - diff --git a/dirsrvtests/suites/chaining_plugin/chaining_test.py b/dirsrvtests/suites/chaining_plugin/chaining_test.py deleted file mode 100644 index 50eed9a..0000000 --- a/dirsrvtests/suites/chaining_plugin/chaining_test.py +++ /dev/null @@ -1,93 +0,0 @@ -# --- BEGIN COPYRIGHT BLOCK --- -# Copyright (C) 2015 Red Hat, Inc. -# All rights reserved. -# -# License: GPL (version 3 or any later version). -# See LICENSE for details. -# --- END COPYRIGHT BLOCK --- -# -import os -import sys -import time -import ldap -import logging -import pytest -from lib389 import DirSrv, Entry, tools, tasks -from lib389.tools import DirSrvTools -from lib389._constants import * -from lib389.properties import * -from lib389.tasks import * -from lib389.utils import * - -logging.getLogger(__name__).setLevel(logging.DEBUG) -log = logging.getLogger(__name__) - -installation1_prefix = None - - -class TopologyStandalone(object): - def __init__(self, standalone): - standalone.open() - self.standalone = standalone - - -@pytest.fixture(scope="module") -def topology(request): - global installation1_prefix - if installation1_prefix: - args_instance[SER_DEPLOYED_DIR] = installation1_prefix - - # Creating standalone instance ... - standalone = DirSrv(verbose=False) - args_instance[SER_HOST] = HOST_STANDALONE - args_instance[SER_PORT] = PORT_STANDALONE - args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE - args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX - args_standalone = args_instance.copy() - standalone.allocate(args_standalone) - instance_standalone = standalone.exists() - if instance_standalone: - standalone.delete() - standalone.create() - standalone.open() - - # Clear out the tmp dir - standalone.clearTmpDir(__file__) - - return TopologyStandalone(standalone) - - -def test_chaining_init(topology): - ''' - Write any test suite initialization here(if needed) - ''' - - return - - -def test_chaining_(topology): - ''' - Write a single test here... - ''' - - return - - -def test_chaining_final(topology): - topology.standalone.delete() - log.info('chaining test suite PASSED') - - -def run_isolated(): - global installation1_prefix - installation1_prefix = None - - topo = topology(True) - test_chaining_init(topo) - test_chaining_(topo) - test_chaining_final(topo) - - -if __name__ == '__main__': - run_isolated() - diff --git a/dirsrvtests/suites/clu/clu_test.py b/dirsrvtests/suites/clu/clu_test.py deleted file mode 100644 index 4f2804f..0000000 --- a/dirsrvtests/suites/clu/clu_test.py +++ /dev/null @@ -1,115 +0,0 @@ -# --- BEGIN COPYRIGHT BLOCK --- -# Copyright (C) 2015 Red Hat, Inc. -# All rights reserved. -# -# License: GPL (version 3 or any later version). -# See LICENSE for details. -# --- END COPYRIGHT BLOCK --- -# -import os -import sys -import time -import ldap -import logging -import pytest -from lib389 import DirSrv, Entry, tools, tasks -from lib389.tools import DirSrvTools -from lib389._constants import * -from lib389.properties import * -from lib389.tasks import * -from lib389.utils import * -logging.getLogger(__name__).setLevel(logging.DEBUG) -log = logging.getLogger(__name__) - -installation1_prefix = None - - -class TopologyStandalone(object): - def __init__(self, standalone): - standalone.open() - self.standalone = standalone - - -@pytest.fixture(scope="module") -def topology(request): - global installation1_prefix - if installation1_prefix: - args_instance[SER_DEPLOYED_DIR] = installation1_prefix - - # Creating standalone instance ... - standalone = DirSrv(verbose=False) - args_instance[SER_HOST] = HOST_STANDALONE - args_instance[SER_PORT] = PORT_STANDALONE - args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE - args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX - args_standalone = args_instance.copy() - standalone.allocate(args_standalone) - instance_standalone = standalone.exists() - if instance_standalone: - standalone.delete() - standalone.create() - standalone.open() - - # Clear out the tmp dir - standalone.clearTmpDir(__file__) - - return TopologyStandalone(standalone) - - -def test_clu_init(topology): - ''' - Write any test suite initialization here(if needed) - ''' - - return - - -def test_clu_pwdhash(topology): - ''' - Test the pwdhash script - ''' - - log.info('Running test_clu_pwdhash...') - - cmd = 'pwdhash -s ssha testpassword' - - p = os.popen(cmd) - result = p.readline() - p.close() - - if not result: - log.fatal('test_clu_pwdhash: Failed to run pwdhash') - assert False - - if len(result) < 20: - log.fatal('test_clu_pwdhash: Encrypted password is too short') - assert False - - log.info('pwdhash generated: ' + result) - log.info('test_clu_pwdhash: PASSED') - - -def test_clu_final(topology): - topology.standalone.delete() - log.info('clu test suite PASSED') - - -def run_isolated(): - ''' - This test is for the simple scripts that don't have a lot of options or - points of failure. Scripts that do, should have their own individual tests. - ''' - global installation1_prefix - installation1_prefix = None - - topo = topology(True) - test_clu_init(topo) - - test_clu_pwdhash(topo) - - test_clu_final(topo) - - -if __name__ == '__main__': - run_isolated() - diff --git a/dirsrvtests/suites/clu/db2ldif_test.py b/dirsrvtests/suites/clu/db2ldif_test.py deleted file mode 100644 index dbfb0d0..0000000 --- a/dirsrvtests/suites/clu/db2ldif_test.py +++ /dev/null @@ -1,92 +0,0 @@ -# --- BEGIN COPYRIGHT BLOCK --- -# Copyright (C) 2015 Red Hat, Inc. -# All rights reserved. -# -# License: GPL (version 3 or any later version). -# See LICENSE for details. -# --- END COPYRIGHT BLOCK --- -# -import os -import sys -import time -import ldap -import logging -import pytest -from lib389 import DirSrv, Entry, tools, tasks -from lib389.tools import DirSrvTools -from lib389._constants import * -from lib389.properties import * -from lib389.tasks import * -from lib389.utils import * -logging.getLogger(__name__).setLevel(logging.DEBUG) -log = logging.getLogger(__name__) - -installation1_prefix = None - - -class TopologyStandalone(object): - def __init__(self, standalone): - standalone.open() - self.standalone = standalone - - -@pytest.fixture(scope="module") -def topology(request): - global installation1_prefix - if installation1_prefix: - args_instance[SER_DEPLOYED_DIR] = installation1_prefix - - # Creating standalone instance ... - standalone = DirSrv(verbose=False) - args_instance[SER_HOST] = HOST_STANDALONE - args_instance[SER_PORT] = PORT_STANDALONE - args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE - args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX - args_standalone = args_instance.copy() - standalone.allocate(args_standalone) - instance_standalone = standalone.exists() - if instance_standalone: - standalone.delete() - standalone.create() - standalone.open() - - # Clear out the tmp dir - standalone.clearTmpDir(__file__) - - return TopologyStandalone(standalone) - - -def test_db2ldif_init(topology): - ''' - Write any test suite initialization here(if needed) - ''' - - return - - -def test_db2ldif_final(topology): - topology.standalone.delete() - log.info('db2ldif test suite PASSED') - - -def run_isolated(): - ''' - Test db2lidf/db2ldif.pl - test/stress functionality, all the command line options, - valid/invalid option combinations, etc, etc. - ''' - global installation1_prefix - installation1_prefix = None - - topo = topology(True) - test_db2ldif_init(topo) - - # test 1 function... - # test 2 function... - # ... - - test_db2ldif_final(topo) - - -if __name__ == '__main__': - run_isolated() - diff --git a/dirsrvtests/suites/collation_plugin/collatation_test.py b/dirsrvtests/suites/collation_plugin/collatation_test.py deleted file mode 100644 index 1a918c6..0000000 --- a/dirsrvtests/suites/collation_plugin/collatation_test.py +++ /dev/null @@ -1,93 +0,0 @@ -# --- BEGIN COPYRIGHT BLOCK --- -# Copyright (C) 2015 Red Hat, Inc. -# All rights reserved. -# -# License: GPL (version 3 or any later version). -# See LICENSE for details. -# --- END COPYRIGHT BLOCK --- -# -import os -import sys -import time -import ldap -import logging -import pytest -from lib389 import DirSrv, Entry, tools, tasks -from lib389.tools import DirSrvTools -from lib389._constants import * -from lib389.properties import * -from lib389.tasks import * -from lib389.utils import * - -logging.getLogger(__name__).setLevel(logging.DEBUG) -log = logging.getLogger(__name__) - -installation1_prefix = None - - -class TopologyStandalone(object): - def __init__(self, standalone): - standalone.open() - self.standalone = standalone - - -@pytest.fixture(scope="module") -def topology(request): - global installation1_prefix - if installation1_prefix: - args_instance[SER_DEPLOYED_DIR] = installation1_prefix - - # Creating standalone instance ... - standalone = DirSrv(verbose=False) - args_instance[SER_HOST] = HOST_STANDALONE - args_instance[SER_PORT] = PORT_STANDALONE - args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE - args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX - args_standalone = args_instance.copy() - standalone.allocate(args_standalone) - instance_standalone = standalone.exists() - if instance_standalone: - standalone.delete() - standalone.create() - standalone.open() - - # Clear out the tmp dir - standalone.clearTmpDir(__file__) - - return TopologyStandalone(standalone) - - -def test_collatation_init(topology): - ''' - Write any test suite initialization here(if needed) - ''' - - return - - -def test_collatation_(topology): - ''' - Write a single test here... - ''' - - return - - -def test_collatation_final(topology): - topology.standalone.delete() - log.info('collatation test suite PASSED') - - -def run_isolated(): - global installation1_prefix - installation1_prefix = None - - topo = topology(True) - test_collatation_init(topo) - test_collatation_(topo) - test_collatation_final(topo) - - -if __name__ == '__main__': - run_isolated() - diff --git a/dirsrvtests/suites/config/config_test.py b/dirsrvtests/suites/config/config_test.py deleted file mode 100644 index d3631e3..0000000 --- a/dirsrvtests/suites/config/config_test.py +++ /dev/null @@ -1,198 +0,0 @@ -# --- BEGIN COPYRIGHT BLOCK --- -# Copyright (C) 2015 Red Hat, Inc. -# All rights reserved. -# -# License: GPL (version 3 or any later version). -# See LICENSE for details. -# --- END COPYRIGHT BLOCK --- -# -import os -import sys -import time -import ldap -import logging -import pytest -from lib389 import DirSrv, Entry, tools, tasks -from lib389.tools import DirSrvTools -from lib389._constants import * -from lib389.properties import * -from lib389.tasks import * - -logging.getLogger(__name__).setLevel(logging.DEBUG) -log = logging.getLogger(__name__) - -installation1_prefix = None - - -class TopologyStandalone(object): - def __init__(self, standalone): - standalone.open() - self.standalone = standalone - - -@pytest.fixture(scope="module") -def topology(request): - global installation1_prefix - if installation1_prefix: - args_instance[SER_DEPLOYED_DIR] = installation1_prefix - - # Creating standalone instance ... - standalone = DirSrv(verbose=False) - args_instance[SER_HOST] = HOST_STANDALONE - args_instance[SER_PORT] = PORT_STANDALONE - args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE - args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX - args_standalone = args_instance.copy() - standalone.allocate(args_standalone) - instance_standalone = standalone.exists() - if instance_standalone: - standalone.delete() - standalone.create() - standalone.open() - - # Clear out the tmp dir - standalone.clearTmpDir(__file__) - - return TopologyStandalone(standalone) - - -def test_config_init(topology): - ''' - Initialization function - ''' - return - - -def test_config_listen_backport_size(topology): - ''' - We need to check that we can search on nsslapd-listen-backlog-size, - and change its value: to a psoitive number and a negative number. - Verify invalid value is rejected. - ''' - - log.info('Running test_config_listen_backport_size...') - - try: - entry = topology.standalone.search_s(DN_CONFIG, ldap.SCOPE_BASE, 'objectclass=top', - ['nsslapd-listen-backlog-size']) - default_val = entry[0].getValue('nsslapd-listen-backlog-size') - if not default_val: - log.fatal('test_config_listen_backport_size: Failed to get nsslapd-listen-backlog-size from config') - assert False - except ldap.LDAPError as e: - log.fatal('test_config_listen_backport_size: Failed to search config, error: ' + e.message('desc')) - assert False - - try: - topology.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-listen-backlog-size', '256')]) - except ldap.LDAPError as e: - log.fatal('test_config_listen_backport_size: Failed to modify config, error: ' + e.message('desc')) - assert False - - try: - topology.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-listen-backlog-size', '-1')]) - except ldap.LDAPError as e: - log.fatal('test_config_listen_backport_size: Failed to modify config(negative value), error: ' + - e.message('desc')) - assert False - - try: - topology.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-listen-backlog-size', 'ZZ')]) - log.fatal('test_config_listen_backport_size: Invalid value was successfully added') - assert False - except ldap.LDAPError as e: - pass - - # - # Cleanup - undo what we've done - # - try: - topology.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-listen-backlog-size', default_val)]) - except ldap.LDAPError as e: - log.fatal('test_config_listen_backport_size: Failed to reset config, error: ' + e.message('desc')) - assert False - - log.info('test_config_listen_backport_size: PASSED') - - -def test_config_deadlock_policy(topology): - ''' - We need to check that nsslapd-db-deadlock-policy exists, that we can - change the value, and invalid values are rejected - ''' - - log.info('Running test_config_deadlock_policy...') - - LDBM_DN = 'cn=config,cn=ldbm database,cn=plugins,cn=config' - default_val = '9' - - try: - entry = topology.standalone.search_s(LDBM_DN, ldap.SCOPE_BASE, 'objectclass=top', - ['nsslapd-db-deadlock-policy']) - val = entry[0].getValue('nsslapd-db-deadlock-policy') - if not val: - log.fatal('test_config_deadlock_policy: Failed to get nsslapd-db-deadlock-policy from config') - assert False - if val != default_val: - log.fatal('test_config_deadlock_policy: The wrong derfualt value was present: (%s) but expected (%s)' % - (val, default_val)) - assert False - except ldap.LDAPError as e: - log.fatal('test_config_deadlock_policy: Failed to search config, error: ' + e.message('desc')) - assert False - - # Try a range of valid values - for val in ('0', '5', '9'): - try: - topology.standalone.modify_s(LDBM_DN, [(ldap.MOD_REPLACE, 'nsslapd-db-deadlock-policy', val)]) - except ldap.LDAPError as e: - log.fatal('test_config_deadlock_policy: Failed to modify config: nsslapd-db-deadlock-policy to (%s), error: %s' % - (val, e.message('desc'))) - assert False - - # Try a range of invalid values - for val in ('-1', '10'): - try: - topology.standalone.modify_s(LDBM_DN, [(ldap.MOD_REPLACE, 'nsslapd-db-deadlock-policy', val)]) - log.fatal('test_config_deadlock_policy: Able to add invalid value to nsslapd-db-deadlock-policy(%s)' % (val)) - assert False - except ldap.LDAPError as e: - pass - # - # Cleanup - undo what we've done - # - try: - topology.standalone.modify_s(LDBM_DN, [(ldap.MOD_REPLACE, 'nsslapd-db-deadlock-policy', default_val)]) - except ldap.LDAPError as e: - log.fatal('test_config_deadlock_policy: Failed to reset nsslapd-db-deadlock-policy to the default value(%s), error: %s' % - (default_val, e.message('desc'))) - - log.info('test_config_deadlock_policy: PASSED') - - -def test_config_final(topology): - topology.standalone.delete() - log.info('Testcase PASSED') - - -def run_isolated(): - ''' - This test suite is designed to test all things cn=config Like, the core cn=config settings, - or the ldbm database settings, etc. This suite shoud not test individual plugins - there - should be individual suites for each plugin. - ''' - global installation1_prefix - installation1_prefix = None - - topo = topology(True) - test_config_init(topo) - - test_config_listen_backport_size(topo) - test_config_deadlock_policy(topo) - - test_config_final(topo) - - -if __name__ == '__main__': - run_isolated() - diff --git a/dirsrvtests/suites/cos_plugin/cos_test.py b/dirsrvtests/suites/cos_plugin/cos_test.py deleted file mode 100644 index 2dc3ac9..0000000 --- a/dirsrvtests/suites/cos_plugin/cos_test.py +++ /dev/null @@ -1,93 +0,0 @@ -# --- BEGIN COPYRIGHT BLOCK --- -# Copyright (C) 2015 Red Hat, Inc. -# All rights reserved. -# -# License: GPL (version 3 or any later version). -# See LICENSE for details. -# --- END COPYRIGHT BLOCK --- -# -import os -import sys -import time -import ldap -import logging -import pytest -from lib389 import DirSrv, Entry, tools, tasks -from lib389.tools import DirSrvTools -from lib389._constants import * -from lib389.properties import * -from lib389.tasks import * -from lib389.utils import * - -logging.getLogger(__name__).setLevel(logging.DEBUG) -log = logging.getLogger(__name__) - -installation1_prefix = None - - -class TopologyStandalone(object): - def __init__(self, standalone): - standalone.open() - self.standalone = standalone - - -@pytest.fixture(scope="module") -def topology(request): - global installation1_prefix - if installation1_prefix: - args_instance[SER_DEPLOYED_DIR] = installation1_prefix - - # Creating standalone instance ... - standalone = DirSrv(verbose=False) - args_instance[SER_HOST] = HOST_STANDALONE - args_instance[SER_PORT] = PORT_STANDALONE - args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE - args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX - args_standalone = args_instance.copy() - standalone.allocate(args_standalone) - instance_standalone = standalone.exists() - if instance_standalone: - standalone.delete() - standalone.create() - standalone.open() - - # Clear out the tmp dir - standalone.clearTmpDir(__file__) - - return TopologyStandalone(standalone) - - -def test_cos_init(topology): - ''' - Write any test suite initialization here(if needed) - ''' - - return - - -def test_cos_(topology): - ''' - Write a single test here... - ''' - - return - - -def test_cos_final(topology): - topology.standalone.delete() - log.info('cos test suite PASSED') - - -def run_isolated(): - global installation1_prefix - installation1_prefix = None - - topo = topology(True) - test_cos_init(topo) - test_cos_(topo) - test_cos_final(topo) - - -if __name__ == '__main__': - run_isolated() - diff --git a/dirsrvtests/suites/deref_plugin/deref_test.py b/dirsrvtests/suites/deref_plugin/deref_test.py deleted file mode 100644 index 9beaa38..0000000 --- a/dirsrvtests/suites/deref_plugin/deref_test.py +++ /dev/null @@ -1,93 +0,0 @@ -# --- BEGIN COPYRIGHT BLOCK --- -# Copyright (C) 2015 Red Hat, Inc. -# All rights reserved. -# -# License: GPL (version 3 or any later version). -# See LICENSE for details. -# --- END COPYRIGHT BLOCK --- -# -import os -import sys -import time -import ldap -import logging -import pytest -from lib389 import DirSrv, Entry, tools, tasks -from lib389.tools import DirSrvTools -from lib389._constants import * -from lib389.properties import * -from lib389.tasks import * -from lib389.utils import * - -logging.getLogger(__name__).setLevel(logging.DEBUG) -log = logging.getLogger(__name__) - -installation1_prefix = None - - -class TopologyStandalone(object): - def __init__(self, standalone): - standalone.open() - self.standalone = standalone - - -@pytest.fixture(scope="module") -def topology(request): - global installation1_prefix - if installation1_prefix: - args_instance[SER_DEPLOYED_DIR] = installation1_prefix - - # Creating standalone instance ... - standalone = DirSrv(verbose=False) - args_instance[SER_HOST] = HOST_STANDALONE - args_instance[SER_PORT] = PORT_STANDALONE - args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE - args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX - args_standalone = args_instance.copy() - standalone.allocate(args_standalone) - instance_standalone = standalone.exists() - if instance_standalone: - standalone.delete() - standalone.create() - standalone.open() - - # Clear out the tmp dir - standalone.clearTmpDir(__file__) - - return TopologyStandalone(standalone) - - -def test_deref_init(topology): - ''' - Write any test suite initialization here(if needed) - ''' - - return - - -def test_deref_(topology): - ''' - Write a single test here... - ''' - - return - - -def test_deref_final(topology): - topology.standalone.delete() - log.info('deref test suite PASSED') - - -def run_isolated(): - global installation1_prefix - installation1_prefix = None - - topo = topology(True) - test_deref_init(topo) - test_deref_(topo) - test_deref_final(topo) - - -if __name__ == '__main__': - run_isolated() - diff --git a/dirsrvtests/suites/disk_monitoring/disk_monitor_test.py b/dirsrvtests/suites/disk_monitoring/disk_monitor_test.py deleted file mode 100644 index 0b84c54..0000000 --- a/dirsrvtests/suites/disk_monitoring/disk_monitor_test.py +++ /dev/null @@ -1,93 +0,0 @@ -# --- BEGIN COPYRIGHT BLOCK --- -# Copyright (C) 2015 Red Hat, Inc. -# All rights reserved. -# -# License: GPL (version 3 or any later version). -# See LICENSE for details. -# --- END COPYRIGHT BLOCK --- -# -import os -import sys -import time -import ldap -import logging -import pytest -from lib389 import DirSrv, Entry, tools, tasks -from lib389.tools import DirSrvTools -from lib389._constants import * -from lib389.properties import * -from lib389.tasks import * -from lib389.utils import * - -logging.getLogger(__name__).setLevel(logging.DEBUG) -log = logging.getLogger(__name__) - -installation1_prefix = None - - -class TopologyStandalone(object): - def __init__(self, standalone): - standalone.open() - self.standalone = standalone - - -@pytest.fixture(scope="module") -def topology(request): - global installation1_prefix - if installation1_prefix: - args_instance[SER_DEPLOYED_DIR] = installation1_prefix - - # Creating standalone instance ... - standalone = DirSrv(verbose=False) - args_instance[SER_HOST] = HOST_STANDALONE - args_instance[SER_PORT] = PORT_STANDALONE - args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE - args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX - args_standalone = args_instance.copy() - standalone.allocate(args_standalone) - instance_standalone = standalone.exists() - if instance_standalone: - standalone.delete() - standalone.create() - standalone.open() - - # Clear out the tmp dir - standalone.clearTmpDir(__file__) - - return TopologyStandalone(standalone) - - -def test_disk_monitor_init(topology): - ''' - Write any test suite initialization here(if needed) - ''' - - return - - -def test_disk_monitor_(topology): - ''' - Write a single test here... - ''' - - return - - -def test_disk_monitor_final(topology): - topology.standalone.delete() - log.info('disk_monitor test suite PASSED') - - -def run_isolated(): - global installation1_prefix - installation1_prefix = None - - topo = topology(True) - test_disk_monitor_init(topo) - test_disk_monitor_(topo) - test_disk_monitor_final(topo) - - -if __name__ == '__main__': - run_isolated() - diff --git a/dirsrvtests/suites/distrib_plugin/distrib_test.py b/dirsrvtests/suites/distrib_plugin/distrib_test.py deleted file mode 100644 index ab1cf87..0000000 --- a/dirsrvtests/suites/distrib_plugin/distrib_test.py +++ /dev/null @@ -1,93 +0,0 @@ -# --- BEGIN COPYRIGHT BLOCK --- -# Copyright (C) 2015 Red Hat, Inc. -# All rights reserved. -# -# License: GPL (version 3 or any later version). -# See LICENSE for details. -# --- END COPYRIGHT BLOCK --- -# -import os -import sys -import time -import ldap -import logging -import pytest -from lib389 import DirSrv, Entry, tools, tasks -from lib389.tools import DirSrvTools -from lib389._constants import * -from lib389.properties import * -from lib389.tasks import * -from lib389.utils import * - -logging.getLogger(__name__).setLevel(logging.DEBUG) -log = logging.getLogger(__name__) - -installation1_prefix = None - - -class TopologyStandalone(object): - def __init__(self, standalone): - standalone.open() - self.standalone = standalone - - -@pytest.fixture(scope="module") -def topology(request): - global installation1_prefix - if installation1_prefix: - args_instance[SER_DEPLOYED_DIR] = installation1_prefix - - # Creating standalone instance ... - standalone = DirSrv(verbose=False) - args_instance[SER_HOST] = HOST_STANDALONE - args_instance[SER_PORT] = PORT_STANDALONE - args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE - args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX - args_standalone = args_instance.copy() - standalone.allocate(args_standalone) - instance_standalone = standalone.exists() - if instance_standalone: - standalone.delete() - standalone.create() - standalone.open() - - # Clear out the tmp dir - standalone.clearTmpDir(__file__) - - return TopologyStandalone(standalone) - - -def test_distrib_init(topology): - ''' - Write any test suite initialization here(if needed) - ''' - - return - - -def test_distrib_(topology): - ''' - Write a single test here... - ''' - - return - - -def test_distrib_final(topology): - topology.standalone.delete() - log.info('distrib test suite PASSED') - - -def run_isolated(): - global installation1_prefix - installation1_prefix = None - - topo = topology(True) - test_distrib_init(topo) - test_distrib_(topo) - test_distrib_final(topo) - - -if __name__ == '__main__': - run_isolated() - diff --git a/dirsrvtests/suites/dna_plugin/dna_test.py b/dirsrvtests/suites/dna_plugin/dna_test.py deleted file mode 100644 index 6b0ab8b..0000000 --- a/dirsrvtests/suites/dna_plugin/dna_test.py +++ /dev/null @@ -1,93 +0,0 @@ -# --- BEGIN COPYRIGHT BLOCK --- -# Copyright (C) 2015 Red Hat, Inc. -# All rights reserved. -# -# License: GPL (version 3 or any later version). -# See LICENSE for details. -# --- END COPYRIGHT BLOCK --- -# -import os -import sys -import time -import ldap -import logging -import pytest -from lib389 import DirSrv, Entry, tools, tasks -from lib389.tools import DirSrvTools -from lib389._constants import * -from lib389.properties import * -from lib389.tasks import * -from lib389.utils import * - -logging.getLogger(__name__).setLevel(logging.DEBUG) -log = logging.getLogger(__name__) - -installation1_prefix = None - - -class TopologyStandalone(object): - def __init__(self, standalone): - standalone.open() - self.standalone = standalone - - -@pytest.fixture(scope="module") -def topology(request): - global installation1_prefix - if installation1_prefix: - args_instance[SER_DEPLOYED_DIR] = installation1_prefix - - # Creating standalone instance ... - standalone = DirSrv(verbose=False) - args_instance[SER_HOST] = HOST_STANDALONE - args_instance[SER_PORT] = PORT_STANDALONE - args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE - args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX - args_standalone = args_instance.copy() - standalone.allocate(args_standalone) - instance_standalone = standalone.exists() - if instance_standalone: - standalone.delete() - standalone.create() - standalone.open() - - # Clear out the tmp dir - standalone.clearTmpDir(__file__) - - return TopologyStandalone(standalone) - - -def test_dna_init(topology): - ''' - Write any test suite initialization here(if needed) - ''' - - return - - -def test_dna_(topology): - ''' - Write a single test here... - ''' - - return - - -def test_dna_final(topology): - topology.standalone.delete() - log.info('dna test suite PASSED') - - -def run_isolated(): - global installation1_prefix - installation1_prefix = None - - topo = topology(True) - test_dna_init(topo) - test_dna_(topo) - test_dna_final(topo) - - -if __name__ == '__main__': - run_isolated() - diff --git a/dirsrvtests/suites/ds_logs/ds_logs_test.py b/dirsrvtests/suites/ds_logs/ds_logs_test.py deleted file mode 100644 index 9d870d4..0000000 --- a/dirsrvtests/suites/ds_logs/ds_logs_test.py +++ /dev/null @@ -1,93 +0,0 @@ -# --- BEGIN COPYRIGHT BLOCK --- -# Copyright (C) 2015 Red Hat, Inc. -# All rights reserved. -# -# License: GPL (version 3 or any later version). -# See LICENSE for details. -# --- END COPYRIGHT BLOCK --- -# -import os -import sys -import time -import ldap -import logging -import pytest -from lib389 import DirSrv, Entry, tools, tasks -from lib389.tools import DirSrvTools -from lib389._constants import * -from lib389.properties import * -from lib389.tasks import * -from lib389.utils import * - -logging.getLogger(__name__).setLevel(logging.DEBUG) -log = logging.getLogger(__name__) - -installation1_prefix = None - - -class TopologyStandalone(object): - def __init__(self, standalone): - standalone.open() - self.standalone = standalone - - -@pytest.fixture(scope="module") -def topology(request): - global installation1_prefix - if installation1_prefix: - args_instance[SER_DEPLOYED_DIR] = installation1_prefix - - # Creating standalone instance ... - standalone = DirSrv(verbose=False) - args_instance[SER_HOST] = HOST_STANDALONE - args_instance[SER_PORT] = PORT_STANDALONE - args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE - args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX - args_standalone = args_instance.copy() - standalone.allocate(args_standalone) - instance_standalone = standalone.exists() - if instance_standalone: - standalone.delete() - standalone.create() - standalone.open() - - # Clear out the tmp dir - standalone.clearTmpDir(__file__) - - return TopologyStandalone(standalone) - - -def test_ds_logs_init(topology): - ''' - Write any test suite initialization here(if needed) - ''' - - return - - -def test_ds_logs_(topology): - ''' - Write a single test here... - ''' - - return - - -def test_ds_logs_final(topology): - topology.standalone.delete() - log.info('ds_logs test suite PASSED') - - -def run_isolated(): - global installation1_prefix - installation1_prefix = None - - topo = topology(True) - test_ds_logs_init(topo) - test_ds_logs_(topo) - test_ds_logs_final(topo) - - -if __name__ == '__main__': - run_isolated() - diff --git a/dirsrvtests/suites/dynamic-plugins/plugin_tests.py b/dirsrvtests/suites/dynamic-plugins/plugin_tests.py deleted file mode 100644 index 30dfa88..0000000 --- a/dirsrvtests/suites/dynamic-plugins/plugin_tests.py +++ /dev/null @@ -1,2406 +0,0 @@ -# --- BEGIN COPYRIGHT BLOCK --- -# Copyright (C) 2015 Red Hat, Inc. -# All rights reserved. -# -# License: GPL (version 3 or any later version). -# See LICENSE for details. -# --- END COPYRIGHT BLOCK --- -# -''' -Created on Dec 09, 2014 - -@author: mreynolds -''' -import os -import sys -import time -import ldap -import logging -import pytest -from lib389 import DirSrv, Entry, tools, tasks -from lib389.tools import DirSrvTools -from lib389._constants import * -from lib389.properties import * -from lib389.tasks import * - -log = logging.getLogger(__name__) - -USER1_DN = 'uid=user1,' + DEFAULT_SUFFIX -USER2_DN = 'uid=user2,' + DEFAULT_SUFFIX -USER3_DN = 'uid=user3,' + DEFAULT_SUFFIX -BUSER1_DN = 'uid=user1,ou=branch1,' + DEFAULT_SUFFIX -BUSER2_DN = 'uid=user2,ou=branch2,' + DEFAULT_SUFFIX -BUSER3_DN = 'uid=user3,ou=branch2,' + DEFAULT_SUFFIX -BRANCH1_DN = 'ou=branch1,' + DEFAULT_SUFFIX -BRANCH2_DN = 'ou=branch2,' + DEFAULT_SUFFIX -GROUP_OU = 'ou=groups,' + DEFAULT_SUFFIX -PEOPLE_OU = 'ou=people,' + DEFAULT_SUFFIX -GROUP_DN = 'cn=group,' + DEFAULT_SUFFIX -CONFIG_AREA = 'nsslapd-pluginConfigArea' - -''' - Functional tests for each plugin - - Test: - plugin restarts (test when on and off) - plugin config validation - plugin dependencies - plugin functionality (including plugin tasks) -''' - - -################################################################################ -# -# Test Plugin Dependency -# -################################################################################ -def test_dependency(inst, plugin): - """ - Set the "account usabilty" plugin to depend on this plugin. This plugin - is generic, always enabled, and perfect for our testing - """ - - try: - inst.modify_s('cn=' + PLUGIN_ACCT_USABILITY + ',cn=plugins,cn=config', - [(ldap.MOD_REPLACE, 'nsslapd-plugin-depends-on-named', plugin)]) - - except ldap.LDAPError as e: - log.fatal('test_dependency: Failed to modify ' + PLUGIN_ACCT_USABILITY + ': error ' + e.message['desc']) - assert False - - try: - inst.modify_s('cn=' + plugin + ',cn=plugins,cn=config', - [(ldap.MOD_REPLACE, 'nsslapd-pluginenabled', 'off')]) - - except ldap.UNWILLING_TO_PERFORM: - # failed as expected - pass - else: - # Incorrectly succeeded - log.fatal('test_dependency: Plugin dependency check failed (%s)' % plugin) - assert False - - # Now undo the change - try: - inst.modify_s('cn=' + PLUGIN_ACCT_USABILITY + ',cn=plugins,cn=config', - [(ldap.MOD_DELETE, 'nsslapd-plugin-depends-on-named', None)]) - except ldap.LDAPError as e: - log.fatal('test_dependency: Failed to reset ' + plugin + ': error ' + e.message['desc']) - assert False - - -################################################################################ -# -# Wait for task to complete -# -################################################################################ -def wait_for_task(conn, task_dn): - finished = False - count = 0 - while count < 60: - try: - task_entry = conn.search_s(task_dn, ldap.SCOPE_BASE, 'objectclass=*') - if not task_entry: - log.fatal('wait_for_task: Search failed to find task: ' + task_dn) - assert False - if task_entry[0].hasAttr('nstaskexitcode'): - # task is done - finished = True - break - except ldap.LDAPError as e: - log.fatal('wait_for_task: Search failed: ' + e.message['desc']) - assert False - - time.sleep(1) - count += 1 - if not finished: - log.fatal('wait_for_task: Task (%s) did not complete!' % task_dn) - assert False - - -################################################################################ -# -# Test Account Policy Plugin (0) -# -################################################################################ -def test_acctpolicy(inst, args=None): - # stop the plugin, and start it - inst.plugins.disable(name=PLUGIN_ACCT_POLICY) - inst.plugins.enable(name=PLUGIN_ACCT_POLICY) - - if args == "restart": - return True - - CONFIG_DN = 'cn=config,cn=Account Policy Plugin,cn=plugins,cn=config' - - log.info('Testing ' + PLUGIN_ACCT_POLICY + '...') - - ############################################################################ - # Configure plugin - ############################################################################ - - # Add the config entry - try: - inst.add_s(Entry((CONFIG_DN, { - 'objectclass': 'top extensibleObject'.split(), - 'cn': 'config', - 'alwaysrecordlogin': 'yes', - 'stateattrname': 'lastLoginTime' - }))) - except ldap.ALREADY_EXISTS: - try: - inst.modify_s(CONFIG_DN, - [(ldap.MOD_REPLACE, 'alwaysrecordlogin', 'yes'), - (ldap.MOD_REPLACE, 'stateattrname', 'lastLoginTime')]) - except ldap.LDAPError as e: - log.fatal('test_acctpolicy: Failed to modify config entry: error ' + e.message['desc']) - assert False - except ldap.LDAPError as e: - log.fatal('test_acctpolicy: Failed to add config entry: error ' + e.message['desc']) - assert False - - ############################################################################ - # Test plugin - ############################################################################ - - # Add an entry - time.sleep(1) - try: - inst.add_s(Entry((USER1_DN, {'objectclass': "top extensibleObject".split(), - 'sn': '1', - 'cn': 'user 1', - 'uid': 'user1', - 'userpassword': 'password'}))) - except ldap.LDAPError as e: - log.fatal('test_acctpolicy: Failed to add test user' + USER1_DN + ': error ' + e.message['desc']) - assert False - - # bind as user - try: - inst.simple_bind_s(USER1_DN, "password") - except ldap.LDAPError as e: - log.fatal('test_acctpolicy: Failed to bind as user1: ' + e.message['desc']) - assert False - - # Bind as Root DN - time.sleep(1) - try: - inst.simple_bind_s(DN_DM, PASSWORD) - except ldap.LDAPError as e: - log.fatal('test_acctpolicy: Failed to bind as rootDN: ' + e.message['desc']) - assert False - - # Check lastLoginTime of USER1 - try: - entries = inst.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, 'lastLoginTime=*') - if not entries: - log.fatal('test_acctpolicy: Search failed to find an entry with lastLoginTime.') - assert False - except ldap.LDAPError as e: - log.fatal('test_acctpolicy: Search failed: ' + e.message['desc']) - assert False - - ############################################################################ - # Change config - change the stateAttrName to a new attribute - ############################################################################ - - try: - inst.modify_s(CONFIG_DN, [(ldap.MOD_REPLACE, 'stateattrname', 'testLastLoginTime')]) - - except ldap.LDAPError as e: - log.fatal('test_acctpolicy: Failed to modify config entry: error ' + e.message['desc']) - assert False - - ############################################################################ - # Test plugin - ############################################################################ - - time.sleep(1) - # login as user - try: - inst.simple_bind_s(USER1_DN, "password") - except ldap.LDAPError as e: - log.fatal('test_acctpolicy: Failed to bind(2nd) as user1: ' + e.message['desc']) - assert False - - time.sleep(1) - # Bind as Root DN - try: - inst.simple_bind_s(DN_DM, PASSWORD) - except ldap.LDAPError as e: - log.fatal('test_acctpolicy: Failed to bind as rootDN: ' + e.message['desc']) - assert False - - # Check testLastLoginTime was added to USER1 - try: - entries = inst.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, '(testLastLoginTime=*)') - if not entries: - log.fatal('test_acctpolicy: Search failed to find an entry with testLastLoginTime.') - assert False - except ldap.LDAPError as e: - log.fatal('test_acctpolicy: Search failed: ' + e.message['desc']) - assert False - - ############################################################################ - # Test plugin dependency - ############################################################################ - - test_dependency(inst, PLUGIN_ACCT_POLICY) - - ############################################################################ - # Cleanup - ############################################################################ - - try: - inst.delete_s(USER1_DN) - except ldap.LDAPError as e: - log.fatal('test_acctpolicy: Failed to delete test entry: ' + e.message['desc']) - assert False - - ############################################################################ - # Test passed - ############################################################################ - - log.info('test_acctpolicy: PASS\n') - - return - - -################################################################################ -# -# Test Attribute Uniqueness Plugin (1) -# -################################################################################ -def test_attruniq(inst, args=None): - # stop the plugin, and start it - inst.plugins.disable(name=PLUGIN_ATTR_UNIQUENESS) - inst.plugins.enable(name=PLUGIN_ATTR_UNIQUENESS) - - if args == "restart": - return - - log.info('Testing ' + PLUGIN_ATTR_UNIQUENESS + '...') - - ############################################################################ - # Configure plugin - ############################################################################ - - try: - inst.modify_s('cn=' + PLUGIN_ATTR_UNIQUENESS + ',cn=plugins,cn=config', - [(ldap.MOD_REPLACE, 'uniqueness-attribute-name', 'uid')]) - - except ldap.LDAPError as e: - log.fatal('test_attruniq: Failed to configure plugin for "uid": error ' + e.message['desc']) - assert False - - ############################################################################ - # Test plugin - ############################################################################ - - # Add an entry - try: - inst.add_s(Entry((USER1_DN, {'objectclass': "top extensibleObject".split(), - 'sn': '1', - 'cn': 'user 1', - 'uid': 'user1', - 'mail': 'user1@example.com', - 'mailAlternateAddress' : 'user1@alt.example.com', - 'userpassword': 'password'}))) - except ldap.LDAPError as e: - log.fatal('test_attruniq: Failed to add test user' + USER1_DN + ': error ' + e.message['desc']) - assert False - - # Add an entry with a duplicate "uid" - try: - inst.add_s(Entry((USER2_DN, {'objectclass': "top extensibleObject".split(), - 'sn': '2', - 'cn': 'user 2', - 'uid': 'user2', - 'uid': 'user1', - 'userpassword': 'password'}))) - - except ldap.CONSTRAINT_VIOLATION: - pass - else: - log.fatal('test_attruniq: Adding of 2nd entry(uid) incorrectly succeeded') - assert False - - ############################################################################ - # Change config to use "mail" instead of "uid" - ############################################################################ - - try: - inst.modify_s('cn=' + PLUGIN_ATTR_UNIQUENESS + ',cn=plugins,cn=config', - [(ldap.MOD_REPLACE, 'uniqueness-attribute-name', 'mail')]) - - except ldap.LDAPError as e: - log.fatal('test_attruniq: Failed to configure plugin for "mail": error ' + e.message['desc']) - assert False - - ############################################################################ - # Test plugin - Add an entry, that has a duplicate "mail" value - ############################################################################ - - try: - inst.add_s(Entry((USER2_DN, {'objectclass': "top extensibleObject".split(), - 'sn': '2', - 'cn': 'user 2', - 'uid': 'user2', - 'mail': 'user1@example.com', - 'userpassword': 'password'}))) - except ldap.CONSTRAINT_VIOLATION: - pass - else: - log.fatal('test_attruniq: Adding of 2nd entry(mail) incorrectly succeeded') - assert False - - ############################################################################ - # Reconfigure plugin for mail and mailAlternateAddress - ############################################################################ - - try: - inst.modify_s('cn=' + PLUGIN_ATTR_UNIQUENESS + ',cn=plugins,cn=config', - [(ldap.MOD_REPLACE, 'uniqueness-attribute-name', 'mail'), - (ldap.MOD_ADD, 'uniqueness-attribute-name', - 'mailAlternateAddress')]) - - except ldap.LDAPError as e: - log.error('test_attruniq: Failed to reconfigure plugin for "mail mailAlternateAddress": error ' + e.message['desc']) - assert False - - ############################################################################ - # Test plugin - Add an entry, that has a duplicate "mail" value - ############################################################################ - - try: - inst.add_s(Entry((USER2_DN, {'objectclass': "top extensibleObject".split(), - 'sn': '2', - 'cn': 'user 2', - 'uid': 'user2', - 'mail': 'user1@example.com', - 'userpassword': 'password'}))) - except ldap.CONSTRAINT_VIOLATION: - pass - else: - log.error('test_attruniq: Adding of 3rd entry(mail) incorrectly succeeded') - assert False - - ############################################################################ - # Test plugin - Add an entry, that has a duplicate "mailAlternateAddress" value - ############################################################################ - - try: - inst.add_s(Entry((USER2_DN, {'objectclass': "top extensibleObject".split(), - 'sn': '2', - 'cn': 'user 2', - 'uid': 'user2', - 'mailAlternateAddress': 'user1@alt.example.com', - 'userpassword': 'password'}))) - except ldap.CONSTRAINT_VIOLATION: - pass - else: - log.error('test_attruniq: Adding of 4th entry(mailAlternateAddress) incorrectly succeeded') - assert False - - ############################################################################ - # Test plugin - Add an entry, that has a duplicate "mail" value conflicting mailAlternateAddress - ############################################################################ - - try: - inst.add_s(Entry((USER2_DN, {'objectclass': "top extensibleObject".split(), - 'sn': '2', - 'cn': 'user 2', - 'uid': 'user2', - 'mail': 'user1@alt.example.com', - 'userpassword': 'password'}))) - except ldap.CONSTRAINT_VIOLATION: - pass - else: - log.error('test_attruniq: Adding of 5th entry(mailAlternateAddress) incorrectly succeeded') - assert False - - ############################################################################ - # Test plugin - Add an entry, that has a duplicate "mailAlternateAddress" conflicting mail - ############################################################################ - - try: - inst.add_s(Entry((USER2_DN, {'objectclass': "top extensibleObject".split(), - 'sn': '2', - 'cn': 'user 2', - 'uid': 'user2', - 'mailAlternateAddress': 'user1@example.com', - 'userpassword': 'password'}))) - except ldap.CONSTRAINT_VIOLATION: - pass - else: - log.error('test_attruniq: Adding of 6th entry(mail) incorrectly succeeded') - assert False - - ############################################################################ - # Test plugin dependency - ############################################################################ - - test_dependency(inst, PLUGIN_ATTR_UNIQUENESS) - - ############################################################################ - # Cleanup - ############################################################################ - - try: - inst.delete_s(USER1_DN) - except ldap.LDAPError as e: - log.fatal('test_attruniq: Failed to delete test entry: ' + e.message['desc']) - assert False - - ############################################################################ - # Test passed - ############################################################################ - - log.info('test_attruniq: PASS\n') - return - - -################################################################################ -# -# Test Auto Membership Plugin (2) -# -################################################################################ -def test_automember(inst, args=None): - # stop the plugin, and start it - inst.plugins.disable(name=PLUGIN_AUTOMEMBER) - inst.plugins.enable(name=PLUGIN_AUTOMEMBER) - - if args == "restart": - return - - CONFIG_DN = 'cn=config,cn=' + PLUGIN_AUTOMEMBER + ',cn=plugins,cn=config' - - log.info('Testing ' + PLUGIN_AUTOMEMBER + '...') - - ############################################################################ - # Configure plugin - ############################################################################ - - # Add the automember group - try: - inst.add_s(Entry((GROUP_DN, { - 'objectclass': 'top extensibleObject'.split(), - 'cn': 'group' - }))) - except ldap.LDAPError as e: - log.fatal('test_automember: Failed to add group: error ' + e.message['desc']) - assert False - - # Add ou=branch1 - try: - inst.add_s(Entry((BRANCH1_DN, { - 'objectclass': 'top extensibleObject'.split(), - 'ou': 'branch1' - }))) - except ldap.LDAPError as e: - log.fatal('test_automember: Failed to add branch1: error ' + e.message['desc']) - assert False - - # Add ou=branch2 - try: - inst.add_s(Entry((BRANCH2_DN, { - 'objectclass': 'top extensibleObject'.split(), - 'ou': 'branch2' - }))) - except ldap.LDAPError as e: - log.fatal('test_automember: Failed to add branch2: error ' + e.message['desc']) - assert False - - # Add the automember config entry - try: - inst.add_s(Entry((CONFIG_DN, { - 'objectclass': 'top autoMemberDefinition'.split(), - 'cn': 'config', - 'autoMemberScope': 'ou=branch1,' + DEFAULT_SUFFIX, - 'autoMemberFilter': 'objectclass=top', - 'autoMemberDefaultGroup': 'cn=group,' + DEFAULT_SUFFIX, - 'autoMemberGroupingAttr': 'member:dn' - }))) - except ldap.LDAPError as e: - log.fatal('test_automember: Failed to add config entry: error ' + e.message['desc']) - assert False - - ############################################################################ - # Test the plugin - ############################################################################ - - # Add a user that should get added to the group - try: - inst.add_s(Entry((BUSER1_DN, { - 'objectclass': 'top extensibleObject'.split(), - 'uid': 'user1' - }))) - except ldap.LDAPError as e: - log.fatal('test_automember: Failed to add user: error ' + e.message['desc']) - assert False - - # Check the group - try: - entries = inst.search_s(GROUP_DN, ldap.SCOPE_BASE, - '(member=' + BUSER1_DN + ')') - if not entries: - log.fatal('test_automember: Search failed to find member user1') - assert False - except ldap.LDAPError as e: - log.fatal('test_automember: Search failed: ' + e.message['desc']) - assert False - - ############################################################################ - # Change config - ############################################################################ - - try: - inst.modify_s(CONFIG_DN, - [(ldap.MOD_REPLACE, 'autoMemberGroupingAttr', 'uniquemember:dn'), - (ldap.MOD_REPLACE, 'autoMemberScope', 'ou=branch2,' + DEFAULT_SUFFIX)]) - - except ldap.LDAPError as e: - log.fatal('test_automember: Failed to modify config entry: error ' + e.message['desc']) - assert False - - ############################################################################ - # Test plugin - ############################################################################ - - # Add a user that should get added to the group - try: - inst.add_s(Entry((BUSER2_DN, { - 'objectclass': 'top extensibleObject'.split(), - 'uid': 'user2' - }))) - except ldap.LDAPError as e: - log.fatal('test_automember: Failed to user to branch2: error ' + e.message['desc']) - assert False - - # Check the group - try: - entries = inst.search_s(GROUP_DN, ldap.SCOPE_BASE, - '(uniquemember=' + BUSER2_DN + ')') - if not entries: - log.fatal('test_automember: Search failed to find uniquemember user2') - assert False - except ldap.LDAPError as e: - log.fatal('test_automember: Search failed: ' + e.message['desc']) - assert False - - ############################################################################ - # Test Task - ############################################################################ - - # Disable plugin - inst.plugins.disable(name=PLUGIN_AUTOMEMBER) - - # Add an entry that should be picked up by automember - verify it is not(yet) - try: - inst.add_s(Entry((BUSER3_DN, { - 'objectclass': 'top extensibleObject'.split(), - 'uid': 'user3' - }))) - except ldap.LDAPError as e: - log.fatal('test_automember: Failed to user3 to branch2: error ' + e.message['desc']) - assert False - - # Check the group - uniquemember should not exist - try: - entries = inst.search_s(GROUP_DN, ldap.SCOPE_BASE, - '(uniquemember=' + BUSER3_DN + ')') - if entries: - log.fatal('test_automember: user3 was incorrectly added to the group') - assert False - except ldap.LDAPError as e: - log.fatal('test_automember: Search failed: ' + e.message['desc']) - assert False - - # Enable plugin - inst.plugins.enable(name=PLUGIN_AUTOMEMBER) - - TASK_DN = 'cn=task-' + str(int(time.time())) + ',cn=automember rebuild membership,cn=tasks,cn=config' - # Add the task - try: - inst.add_s(Entry((TASK_DN, { - 'objectclass': 'top extensibleObject'.split(), - 'basedn': 'ou=branch2,' + DEFAULT_SUFFIX, - 'filter': 'objectclass=top'}))) - except ldap.LDAPError as e: - log.fatal('test_automember: Failed to add task: error ' + e.message['desc']) - assert False - - wait_for_task(inst, TASK_DN) - - # Verify the fixup task worked - try: - entries = inst.search_s(GROUP_DN, ldap.SCOPE_BASE, - '(uniquemember=' + BUSER3_DN + ')') - if not entries: - log.fatal('test_automember: user3 was not added to the group') - assert False - except ldap.LDAPError as e: - log.fatal('test_automember: Search failed: ' + e.message['desc']) - assert False - - ############################################################################ - # Test plugin dependency - ############################################################################ - - test_dependency(inst, PLUGIN_AUTOMEMBER) - - ############################################################################ - # Cleanup - ############################################################################ - - try: - inst.delete_s(BUSER1_DN) - except ldap.LDAPError as e: - log.fatal('test_automember: Failed to delete test entry1: ' + e.message['desc']) - assert False - - try: - inst.delete_s(BUSER2_DN) - except ldap.LDAPError as e: - log.fatal('test_automember: Failed to delete test entry2: ' + e.message['desc']) - assert False - - try: - inst.delete_s(BUSER3_DN) - except ldap.LDAPError as e: - log.fatal('test_automember: Failed to delete test entry3: ' + e.message['desc']) - assert False - - try: - inst.delete_s(BRANCH1_DN) - except ldap.LDAPError as e: - log.fatal('test_automember: Failed to delete branch1: ' + e.message['desc']) - assert False - - try: - inst.delete_s(BRANCH2_DN) - except ldap.LDAPError as e: - log.fatal('test_automember: Failed to delete test branch2: ' + e.message['desc']) - assert False - - try: - inst.delete_s(GROUP_DN) - except ldap.LDAPError as e: - log.fatal('test_automember: Failed to delete test group: ' + e.message['desc']) - assert False - - try: - inst.delete_s(CONFIG_DN) - except ldap.LDAPError as e: - log.fatal('test_automember: Failed to delete plugin config entry: ' + e.message['desc']) - assert False - - ############################################################################ - # Test passed - ############################################################################ - - log.info('test_automember: PASS\n') - return - - -################################################################################ -# -# Test DNA Plugin (3) -# -################################################################################ -def test_dna(inst, args=None): - # stop the plugin, and start it - inst.plugins.disable(name=PLUGIN_DNA) - inst.plugins.enable(name=PLUGIN_DNA) - - if args == "restart": - return - - CONFIG_DN = 'cn=config,cn=' + PLUGIN_DNA + ',cn=plugins,cn=config' - - log.info('Testing ' + PLUGIN_DNA + '...') - - ############################################################################ - # Configure plugin - ############################################################################ - - try: - inst.add_s(Entry((CONFIG_DN, { - 'objectclass': 'top dnaPluginConfig'.split(), - 'cn': 'config', - 'dnatype': 'uidNumber', - 'dnafilter': '(objectclass=top)', - 'dnascope': DEFAULT_SUFFIX, - 'dnaMagicRegen': '-1', - 'dnaMaxValue': '50000', - 'dnaNextValue': '1' - }))) - except ldap.ALREADY_EXISTS: - try: - inst.modify_s(CONFIG_DN, [(ldap.MOD_REPLACE, 'dnaNextValue', '1'), - (ldap.MOD_REPLACE, 'dnaMagicRegen', '-1')]) - except ldap.LDAPError as e: - log.fatal('test_dna: Failed to set the DNA plugin: error ' + e.message['desc']) - assert False - except ldap.LDAPError as e: - log.fatal('test_dna: Failed to add config entry: error ' + e.message['desc']) - assert False - - ############################################################################ - # Test plugin - ############################################################################ - - try: - inst.add_s(Entry((USER1_DN, { - 'objectclass': 'top extensibleObject'.split(), - 'uid': 'user1' - }))) - except ldap.LDAPError as e: - log.fatal('test_dna: Failed to user1: error ' + e.message['desc']) - assert False - - # See if the entry now has the new uidNumber assignment - uidNumber=1 - try: - entries = inst.search_s(USER1_DN, ldap.SCOPE_BASE, '(uidNumber=1)') - if not entries: - log.fatal('test_dna: user1 was not updated - (looking for uidNumber: 1)') - assert False - except ldap.LDAPError as e: - log.fatal('test_dna: Search for user1 failed: ' + e.message['desc']) - assert False - - # Test the magic regen value - try: - inst.modify_s(USER1_DN, [(ldap.MOD_REPLACE, 'uidNumber', '-1')]) - except ldap.LDAPError as e: - log.fatal('test_dna: Failed to set the magic reg value: error ' + e.message['desc']) - assert False - - # See if the entry now has the new uidNumber assignment - uidNumber=2 - try: - entries = inst.search_s(USER1_DN, ldap.SCOPE_BASE, '(uidNumber=2)') - if not entries: - log.fatal('test_dna: user1 was not updated (looking for uidNumber: 2)') - assert False - except ldap.LDAPError as e: - log.fatal('test_dna: Search for user1 failed: ' + e.message['desc']) - assert False - - ################################################################################ - # Change the config - ################################################################################ - - try: - inst.modify_s(CONFIG_DN, [(ldap.MOD_REPLACE, 'dnaMagicRegen', '-2')]) - except ldap.LDAPError as e: - log.fatal('test_dna: Failed to set the magic reg value to -2: error ' + e.message['desc']) - assert False - - ################################################################################ - # Test plugin - ################################################################################ - - # Test the magic regen value - try: - inst.modify_s(USER1_DN, [(ldap.MOD_REPLACE, 'uidNumber', '-2')]) - except ldap.LDAPError as e: - log.fatal('test_dna: Failed to set the magic reg value: error ' + e.message['desc']) - assert False - - # See if the entry now has the new uidNumber assignment - uidNumber=3 - try: - entries = inst.search_s(USER1_DN, ldap.SCOPE_BASE, '(uidNumber=3)') - if not entries: - log.fatal('test_dna: user1 was not updated (looking for uidNumber: 3)') - assert False - except ldap.LDAPError as e: - log.fatal('test_dna: Search for user1 failed: ' + e.message['desc']) - assert False - - ############################################################################ - # Test plugin dependency - ############################################################################ - - test_dependency(inst, PLUGIN_AUTOMEMBER) - - ############################################################################ - # Cleanup - ############################################################################ - - try: - inst.delete_s(USER1_DN) - except ldap.LDAPError as e: - log.fatal('test_dna: Failed to delete test entry1: ' + e.message['desc']) - assert False - - inst.plugins.disable(name=PLUGIN_DNA) - - ############################################################################ - # Test passed - ############################################################################ - - log.info('test_dna: PASS\n') - - return - - -################################################################################ -# -# Test Linked Attrs Plugin (4) -# -################################################################################ -def test_linkedattrs(inst, args=None): - # stop the plugin, and start it - inst.plugins.disable(name=PLUGIN_LINKED_ATTRS) - inst.plugins.enable(name=PLUGIN_LINKED_ATTRS) - - if args == "restart": - return - - CONFIG_DN = 'cn=config,cn=' + PLUGIN_LINKED_ATTRS + ',cn=plugins,cn=config' - - log.info('Testing ' + PLUGIN_LINKED_ATTRS + '...') - - ############################################################################ - # Configure plugin - ############################################################################ - - # Add test entries - try: - inst.add_s(Entry((USER1_DN, { - 'objectclass': 'top extensibleObject'.split(), - 'uid': 'user1' - }))) - except ldap.LDAPError as e: - log.fatal('test_linkedattrs: Failed to user1: error ' + e.message['desc']) - assert False - - try: - inst.add_s(Entry((USER2_DN, { - 'objectclass': 'top extensibleObject'.split(), - 'uid': 'user2' - }))) - except ldap.LDAPError as e: - log.fatal('test_linkedattrs: Failed to user1: error ' + e.message['desc']) - assert False - - # Add the linked attrs config entry - try: - inst.add_s(Entry((CONFIG_DN, { - 'objectclass': 'top extensibleObject'.split(), - 'cn': 'config', - 'linkType': 'directReport', - 'managedType': 'manager' - }))) - except ldap.LDAPError as e: - log.fatal('test_linkedattrs: Failed to add config entry: error ' + e.message['desc']) - assert False - - ############################################################################ - # Test plugin - ############################################################################ - - # Set "directReport" should add "manager" to the other entry - try: - inst.modify_s(USER1_DN, [(ldap.MOD_REPLACE, 'directReport', USER2_DN)]) - except ldap.LDAPError as e: - log.fatal('test_linkedattrs: Failed to add "directReport" to user1: error ' + e.message['desc']) - assert False - - # See if manager was added to the other entry - try: - entries = inst.search_s(USER2_DN, ldap.SCOPE_BASE, '(manager=*)') - if not entries: - log.fatal('test_linkedattrs: user2 missing "manager" attribute') - assert False - except ldap.LDAPError as e: - log.fatal('test_linkedattrs: Search for user1 failed: ' + e.message['desc']) - assert False - - # Remove "directReport" should remove "manager" to the other entry - try: - inst.modify_s(USER1_DN, [(ldap.MOD_DELETE, 'directReport', None)]) - except ldap.LDAPError as e: - log.fatal('test_linkedattrs: Failed to delete directReport: error ' + e.message['desc']) - assert False - - # See if manager was removed - try: - entries = inst.search_s(USER2_DN, ldap.SCOPE_BASE, '(manager=*)') - if entries: - log.fatal('test_linkedattrs: user2 "manager" attribute not removed') - assert False - except ldap.LDAPError as e: - log.fatal('test_linkedattrs: Search for user1 failed: ' + e.message['desc']) - assert False - - ############################################################################ - # Change the config - using linkType "indirectReport" now - ############################################################################ - - try: - inst.modify_s(CONFIG_DN, [(ldap.MOD_REPLACE, 'linkType', 'indirectReport')]) - except ldap.LDAPError as e: - log.error('test_linkedattrs: Failed to set linkTypee: error ' + e.message['desc']) - assert False - - ############################################################################ - # Test plugin - ############################################################################ - - # Make sure the old linkType(directManager) is not working - try: - inst.modify_s(USER1_DN, [(ldap.MOD_REPLACE, 'directReport', USER2_DN)]) - except ldap.LDAPError as e: - log.fatal('test_linkedattrs: Failed to add "directReport" to user1: error ' + e.message['desc']) - assert False - - # See if manager was added to the other entry, better not be... - try: - entries = inst.search_s(USER2_DN, ldap.SCOPE_BASE, '(manager=*)') - if entries: - log.fatal('test_linkedattrs: user2 had "manager" added unexpectedly') - assert False - except ldap.LDAPError as e: - log.fatal('test_linkedattrs: Search for user2 failed: ' + e.message['desc']) - assert False - - # Now, set the new linkType "indirectReport", which should add "manager" to the other entry - try: - inst.modify_s(USER1_DN, [(ldap.MOD_REPLACE, 'indirectReport', USER2_DN)]) - except ldap.LDAPError as e: - log.fatal('test_linkedattrs: Failed to add "indirectReport" to user1: error ' + e.message['desc']) - assert False - - # See if manager was added to the other entry, better not be - try: - entries = inst.search_s(USER2_DN, ldap.SCOPE_BASE, '(manager=*)') - if not entries: - log.fatal('test_linkedattrs: user2 missing "manager"') - assert False - except ldap.LDAPError as e: - log.fatal('test_linkedattrs: Search for user2 failed: ' + e.message['desc']) - assert False - - # Remove "indirectReport" should remove "manager" to the other entry - try: - inst.modify_s(USER1_DN, [(ldap.MOD_DELETE, 'indirectReport', None)]) - except ldap.LDAPError as e: - log.fatal('test_linkedattrs: Failed to delete directReport: error ' + e.message['desc']) - assert False - - # See if manager was removed - try: - entries = inst.search_s(USER2_DN, ldap.SCOPE_BASE, '(manager=*)') - if entries: - log.fatal('test_linkedattrs: user2 "manager" attribute not removed') - assert False - except ldap.LDAPError as e: - log.fatal('test_linkedattrs: Search for user1 failed: ' + e.message['desc']) - assert False - - ############################################################################ - # Test Fixup Task - ############################################################################ - - # Disable plugin and make some updates that would of triggered the plugin - inst.plugins.disable(name=PLUGIN_LINKED_ATTRS) - - try: - inst.modify_s(USER1_DN, [(ldap.MOD_REPLACE, 'indirectReport', USER2_DN)]) - except ldap.LDAPError as e: - log.fatal('test_linkedattrs: Failed to add "indirectReport" to user1: error ' + e.message['desc']) - assert False - - # The entry should not have a manager attribute - try: - entries = inst.search_s(USER2_DN, ldap.SCOPE_BASE, '(manager=*)') - if entries: - log.fatal('test_linkedattrs: user2 incorrectly has a "manager" attr') - assert False - except ldap.LDAPError as e: - log.fatal('test_linkedattrs: Search for user1 failed: ' + e.message['desc']) - assert False - - # Enable the plugin and rerun the task entry - inst.plugins.enable(name=PLUGIN_LINKED_ATTRS) - - # Add the task again - TASK_DN = 'cn=task-' + str(int(time.time())) + ',cn=fixup linked attributes,cn=tasks,cn=config' - try: - inst.add_s(Entry(('cn=task-' + str(int(time.time())) + ',cn=fixup linked attributes,cn=tasks,cn=config', { - 'objectclass': 'top extensibleObject'.split(), - 'basedn': DEFAULT_SUFFIX, - 'filter': 'objectclass=top'}))) - except ldap.LDAPError as e: - log.fatal('test_linkedattrs: Failed to add task: error ' + e.message['desc']) - assert False - - wait_for_task(inst, TASK_DN) - - # Check if user2 now has a manager attribute now - try: - entries = inst.search_s(USER2_DN, ldap.SCOPE_BASE, '(manager=*)') - if not entries: - log.fatal('test_linkedattrs: task failed: user2 missing "manager" attr') - assert False - except ldap.LDAPError as e: - log.fatal('test_linkedattrs: Search for user1 failed: ' + e.message['desc']) - assert False - - ############################################################################ - # Test plugin dependency - ############################################################################ - - test_dependency(inst, PLUGIN_LINKED_ATTRS) - - ############################################################################ - # Cleanup - ############################################################################ - - try: - inst.delete_s(USER1_DN) - except ldap.LDAPError as e: - log.fatal('test_linkedattrs: Failed to delete test entry1: ' + e.message['desc']) - assert False - - try: - inst.delete_s(USER2_DN) - except ldap.LDAPError as e: - log.fatal('test_linkedattrs: Failed to delete test entry2: ' + e.message['desc']) - assert False - - try: - inst.delete_s(CONFIG_DN) - except ldap.LDAPError as e: - log.fatal('test_linkedattrs: Failed to delete plugin config entry: ' + e.message['desc']) - assert False - - ############################################################################ - # Test passed - ############################################################################ - - log.info('test_linkedattrs: PASS\n') - return - - -################################################################################ -# -# Test MemberOf Plugin (5) -# -################################################################################ -def test_memberof(inst, args=None): - # stop the plugin, and start it - inst.plugins.disable(name=PLUGIN_MEMBER_OF) - inst.plugins.enable(name=PLUGIN_MEMBER_OF) - - if args == "restart": - return - - PLUGIN_DN = 'cn=' + PLUGIN_MEMBER_OF + ',cn=plugins,cn=config' - SHARED_CONFIG_DN = 'cn=memberOf Config,' + DEFAULT_SUFFIX - - log.info('Testing ' + PLUGIN_MEMBER_OF + '...') - - ############################################################################ - # Configure plugin - ############################################################################ - - try: - inst.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, 'memberofgroupattr', 'member')]) - except ldap.LDAPError as e: - log.fatal('test_memberof: Failed to update config(member): error ' + e.message['desc']) - assert False - - ############################################################################ - # Test plugin - ############################################################################ - - # Add our test entries - try: - inst.add_s(Entry((USER1_DN, { - 'objectclass': 'top extensibleObject'.split(), - 'uid': 'user1' - }))) - except ldap.LDAPError as e: - log.fatal('test_memberof: Failed to add user1: error ' + e.message['desc']) - assert False - - try: - inst.add_s(Entry((GROUP_DN, { - 'objectclass': 'top groupOfNames groupOfUniqueNames extensibleObject'.split(), - 'cn': 'group', - 'member': USER1_DN - }))) - except ldap.LDAPError as e: - log.fatal('test_memberof: Failed to add group: error ' + e.message['desc']) - assert False - - try: - inst.add_s(Entry((SHARED_CONFIG_DN, { - 'objectclass': 'top extensibleObject'.split(), - 'memberofgroupattr': 'member', - 'memberofattr': 'memberof' - }))) - except ldap.LDAPError as e: - log.fatal('test_memberof: Failed to shared config entry: error ' + e.message['desc']) - assert False - - # Check if the user now has a "memberOf" attribute - try: - entries = inst.search_s(USER1_DN, ldap.SCOPE_BASE, '(memberOf=*)') - if not entries: - log.fatal('test_memberof: user1 missing memberOf') - assert False - except ldap.LDAPError as e: - log.fatal('test_memberof: Search for user1 failed: ' + e.message['desc']) - assert False - - # Remove "member" should remove "memberOf" from the entry - try: - inst.modify_s(GROUP_DN, [(ldap.MOD_DELETE, 'member', None)]) - except ldap.LDAPError as e: - log.fatal('test_memberof: Failed to delete member: error ' + e.message['desc']) - assert False - - # Check that "memberOf" was removed - try: - entries = inst.search_s(USER1_DN, ldap.SCOPE_BASE, '(memberOf=*)') - if entries: - log.fatal('test_memberof: user1 incorrectly has memberOf attr') - assert False - except ldap.LDAPError as e: - log.fatal('test_memberof: Search for user1 failed: ' + e.message['desc']) - assert False - - ############################################################################ - # Change the config - ############################################################################ - - try: - inst.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, 'memberofgroupattr', 'uniquemember')]) - except ldap.LDAPError as e: - log.fatal('test_memberof: Failed to update config(uniquemember): error ' + e.message['desc']) - assert False - - ############################################################################ - # Test plugin - ############################################################################ - - try: - inst.modify_s(GROUP_DN, [(ldap.MOD_REPLACE, 'uniquemember', USER1_DN)]) - except ldap.LDAPError as e: - log.fatal('test_memberof: Failed to add uniquemember: error ' + e.message['desc']) - assert False - - # Check if the user now has a "memberOf" attribute - try: - entries = inst.search_s(USER1_DN, ldap.SCOPE_BASE, '(memberOf=*)') - if not entries: - log.fatal('test_memberof: user1 missing memberOf') - assert False - except ldap.LDAPError as e: - log.fatal('test_memberof: Search for user1 failed: ' + e.message['desc']) - assert False - - # Remove "uniquemember" should remove "memberOf" from the entry - try: - inst.modify_s(GROUP_DN, [(ldap.MOD_DELETE, 'uniquemember', None)]) - except ldap.LDAPError as e: - log.fatal('test_memberof: Failed to delete member: error ' + e.message['desc']) - assert False - - # Check that "memberOf" was removed - try: - entries = inst.search_s(USER1_DN, ldap.SCOPE_BASE, '(memberOf=*)') - if entries: - log.fatal('test_memberof: user1 incorrectly has memberOf attr') - assert False - except ldap.LDAPError as e: - log.fatal('test_memberof: Search for user1 failed: ' + e.message['desc']) - assert False - - ############################################################################ - # Set the shared config entry and test the plugin - ############################################################################ - - # The shared config entry uses "member" - the above test uses "uniquemember" - try: - inst.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, CONFIG_AREA, SHARED_CONFIG_DN)]) - except ldap.LDAPError as e: - log.fatal('test_memberof: Failed to set plugin area: error ' + e.message['desc']) - assert False - - # Delete the test entries then readd them to start with a clean slate - try: - inst.delete_s(USER1_DN) - except ldap.LDAPError as e: - log.fatal('test_memberof: Failed to delete test entry1: ' + e.message['desc']) - assert False - - try: - inst.delete_s(GROUP_DN) - except ldap.LDAPError as e: - log.fatal('test_memberof: Failed to delete test group: ' + e.message['desc']) - assert False - - try: - inst.add_s(Entry((USER1_DN, { - 'objectclass': 'top extensibleObject'.split(), - 'uid': 'user1' - }))) - except ldap.LDAPError as e: - log.fatal('test_memberof: Failed to add user1: error ' + e.message['desc']) - assert False - - try: - inst.add_s(Entry((GROUP_DN, { - 'objectclass': 'top groupOfNames groupOfUniqueNames extensibleObject'.split(), - 'cn': 'group', - 'member': USER1_DN - }))) - except ldap.LDAPError as e: - log.fatal('test_memberof: Failed to add group: error ' + e.message['desc']) - assert False - - # Test the shared config - # Check if the user now has a "memberOf" attribute - try: - entries = inst.search_s(USER1_DN, ldap.SCOPE_BASE, '(memberOf=*)') - if not entries: - log.fatal('test_memberof: user1 missing memberOf') - assert False - except ldap.LDAPError as e: - log.fatal('test_memberof: Search for user1 failed: ' + e.message['desc']) - assert False - - # Remove "member" should remove "memberOf" from the entry - try: - inst.modify_s(GROUP_DN, [(ldap.MOD_DELETE, 'member', None)]) - except ldap.LDAPError as e: - log.fatal('test_memberof: Failed to delete member: error ' + e.message['desc']) - assert False - - # Check that "memberOf" was removed - try: - entries = inst.search_s(USER1_DN, ldap.SCOPE_BASE, '(memberOf=*)') - if entries: - log.fatal('test_memberof: user1 incorrectly has memberOf attr') - assert False - except ldap.LDAPError as e: - log.fatal('test_memberof: Search for user1 failed: ' + e.message['desc']) - assert False - - ############################################################################ - # Change the shared config entry to use 'uniquemember' and test the plugin - ############################################################################ - - try: - inst.modify_s(SHARED_CONFIG_DN, [(ldap.MOD_REPLACE, 'memberofgroupattr', 'uniquemember')]) - except ldap.LDAPError as e: - log.fatal('test_memberof: Failed to set shared plugin entry(uniquemember): error ' - + e.message['desc']) - assert False - - try: - inst.modify_s(GROUP_DN, [(ldap.MOD_REPLACE, 'uniquemember', USER1_DN)]) - except ldap.LDAPError as e: - log.fatal('test_memberof: Failed to add uniquemember: error ' + e.message['desc']) - assert False - - # Check if the user now has a "memberOf" attribute - try: - entries = inst.search_s(USER1_DN, ldap.SCOPE_BASE, '(memberOf=*)') - if not entries: - log.fatal('test_memberof: user1 missing memberOf') - assert False - except ldap.LDAPError as e: - log.fatal('test_memberof: Search for user1 failed: ' + e.message['desc']) - assert False - - # Remove "uniquemember" should remove "memberOf" from the entry - try: - inst.modify_s(GROUP_DN, [(ldap.MOD_DELETE, 'uniquemember', None)]) - except ldap.LDAPError as e: - log.fatal('test_memberof: Failed to delete member: error ' + e.message['desc']) - assert False - - # Check that "memberOf" was removed - try: - entries = inst.search_s(USER1_DN, ldap.SCOPE_BASE, '(memberOf=*)') - if entries: - log.fatal('test_memberof: user1 incorrectly has memberOf attr') - assert False - except ldap.LDAPError as e: - log.fatal('test_memberof: Search for user1 failed: ' + e.message['desc']) - assert False - - ############################################################################ - # Remove shared config from plugin, and retest - ############################################################################ - - # First change the plugin to use member before we move the shared config that uses uniquemember - try: - inst.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, 'memberofgroupattr', 'member')]) - except ldap.LDAPError as e: - log.fatal('test_memberof: Failed to update config(uniquemember): error ' + e.message['desc']) - assert False - - # Remove shared config from plugin - try: - inst.modify_s(PLUGIN_DN, [(ldap.MOD_DELETE, CONFIG_AREA, None)]) - except ldap.LDAPError as e: - log.fatal('test_memberof: Failed to add uniquemember: error ' + e.message['desc']) - assert False - - try: - inst.modify_s(GROUP_DN, [(ldap.MOD_REPLACE, 'member', USER1_DN)]) - except ldap.LDAPError as e: - log.fatal('test_memberof: Failed to add uniquemember: error ' + e.message['desc']) - assert False - - # Check if the user now has a "memberOf" attribute - try: - entries = inst.search_s(USER1_DN, ldap.SCOPE_BASE, '(memberOf=*)') - if not entries: - log.fatal('test_memberof: user1 missing memberOf') - assert False - except ldap.LDAPError as e: - log.fatal('test_memberof: Search for user1 failed: ' + e.message['desc']) - assert False - - # Remove "uniquemember" should remove "memberOf" from the entry - try: - inst.modify_s(GROUP_DN, [(ldap.MOD_DELETE, 'member', None)]) - except ldap.LDAPError as e: - log.fatal('test_memberof: Failed to delete member: error ' + e.message['desc']) - assert False - - # Check that "memberOf" was removed - try: - entries = inst.search_s(USER1_DN, ldap.SCOPE_BASE, '(memberOf=*)') - if entries: - log.fatal('test_memberof: user1 incorrectly has memberOf attr') - assert False - except ldap.LDAPError as e: - log.fatal('test_memberof: Search for user1 failed: ' + e.message['desc']) - assert False - - ############################################################################ - # Test Fixup Task - ############################################################################ - - inst.plugins.disable(name=PLUGIN_MEMBER_OF) - - # First change the plugin to use uniquemember - try: - inst.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, 'memberofgroupattr', 'uniquemember')]) - except ldap.LDAPError as e: - log.fatal('test_memberof: Failed to update config(uniquemember): error ' + e.message['desc']) - assert False - - # Add uniquemember, should not update USER1 - try: - inst.modify_s(GROUP_DN, [(ldap.MOD_REPLACE, 'uniquemember', USER1_DN)]) - except ldap.LDAPError as e: - log.fatal('test_memberof: Failed to add uniquemember: error ' + e.message['desc']) - assert False - - # Check for "memberOf" - try: - entries = inst.search_s(USER1_DN, ldap.SCOPE_BASE, '(memberOf=*)') - if entries: - log.fatal('test_memberof: user1 incorrect has memberOf attr') - assert False - except ldap.LDAPError as e: - log.fatal('test_memberof: Search for user1 failed: ' + e.message['desc']) - assert False - - # Enable the plugin, and run the task - inst.plugins.enable(name=PLUGIN_MEMBER_OF) - - TASK_DN = 'cn=task-' + str(int(time.time())) + ',' + DN_MBO_TASK - try: - inst.add_s(Entry((TASK_DN, { - 'objectclass': 'top extensibleObject'.split(), - 'basedn': DEFAULT_SUFFIX, - 'filter': 'objectclass=top'}))) - except ldap.LDAPError as e: - log.fatal('test_memberof: Failed to add task: error ' + e.message['desc']) - assert False - - wait_for_task(inst, TASK_DN) - - # Check for "memberOf" - try: - entries = inst.search_s(USER1_DN, ldap.SCOPE_BASE, '(memberOf=*)') - if not entries: - log.fatal('test_memberof: user1 missing memberOf attr') - assert False - except ldap.LDAPError as e: - log.fatal('test_memberof: Search for user1 failed: ' + e.message['desc']) - assert False - - ############################################################################ - # Test plugin dependency - ############################################################################ - - test_dependency(inst, PLUGIN_MEMBER_OF) - - ############################################################################ - # Cleanup - ############################################################################ - - try: - inst.delete_s(USER1_DN) - except ldap.LDAPError as e: - log.fatal('test_memberof: Failed to delete test entry1: ' + e.message['desc']) - assert False - - try: - inst.delete_s(GROUP_DN) - except ldap.LDAPError as e: - log.fatal('test_memberof: Failed to delete test group: ' + e.message['desc']) - assert False - - try: - inst.delete_s(SHARED_CONFIG_DN) - except ldap.LDAPError as e: - log.fatal('test_memberof: Failed to delete shared config entry: ' + e.message['desc']) - assert False - - ############################################################################ - # Test passed - ############################################################################ - - log.info('test_memberof: PASS\n') - - return - - -################################################################################ -# -# Test Managed Entry Plugin (6) -# -################################################################################ -def test_mep(inst, args=None): - # stop the plugin, and start it - inst.plugins.disable(name=PLUGIN_MANAGED_ENTRY) - inst.plugins.enable(name=PLUGIN_MANAGED_ENTRY) - - if args == "restart": - return - - USER_DN = 'uid=user1,ou=people,' + DEFAULT_SUFFIX - MEP_USER_DN = 'cn=user1,ou=groups,' + DEFAULT_SUFFIX - USER_DN2 = 'uid=user 1,ou=people,' + DEFAULT_SUFFIX - MEP_USER_DN2 = 'uid=user 1,ou=groups,' + DEFAULT_SUFFIX - CONFIG_DN = 'cn=config,cn=' + PLUGIN_MANAGED_ENTRY + ',cn=plugins,cn=config' - TEMPLATE_DN = 'cn=MEP Template,' + DEFAULT_SUFFIX - TEMPLATE_DN2 = 'cn=MEP Template2,' + DEFAULT_SUFFIX - - log.info('Testing ' + PLUGIN_MANAGED_ENTRY + '...') - - ############################################################################ - # Configure plugin - ############################################################################ - - # Add our org units - try: - inst.add_s(Entry((PEOPLE_OU, { - 'objectclass': 'top extensibleObject'.split(), - 'ou': 'people'}))) - except ldap.ALREADY_EXISTS: - pass - except ldap.LDAPError as e: - log.fatal('test_mep: Failed to add people org unit: error ' + e.message['desc']) - assert False - - try: - inst.add_s(Entry((GROUP_OU, { - 'objectclass': 'top extensibleObject'.split(), - 'ou': 'people'}))) - except ldap.ALREADY_EXISTS: - pass - except ldap.LDAPError as e: - log.fatal('test_mep: Failed to add people org unit: error ' + e.message['desc']) - assert False - - # Add the template entry - try: - inst.add_s(Entry((TEMPLATE_DN, { - 'objectclass': 'top mepTemplateEntry extensibleObject'.split(), - 'cn': 'MEP Template', - 'mepRDNAttr': 'cn', - 'mepStaticAttr': 'objectclass: posixGroup|objectclass: extensibleObject'.split('|'), - 'mepMappedAttr': 'cn: $cn|uid: $cn|gidNumber: $uidNumber'.split('|') - }))) - except ldap.LDAPError as e: - log.fatal('test_mep: Failed to add template entry: error ' + e.message['desc']) - assert False - - # Add the config entry - try: - inst.add_s(Entry((CONFIG_DN, { - 'objectclass': 'top extensibleObject'.split(), - 'cn': 'config', - 'originScope': PEOPLE_OU, - 'originFilter': 'objectclass=posixAccount', - 'managedBase': GROUP_OU, - 'managedTemplate': TEMPLATE_DN - }))) - except ldap.LDAPError as e: - log.fatal('test_mep: Failed to add config entry: error ' + e.message['desc']) - assert False - - ############################################################################ - # Test plugin - ############################################################################ - - # Add an entry that meets the MEP scope - try: - inst.add_s(Entry((USER_DN, { - 'objectclass': 'top posixAccount extensibleObject'.split(), - 'uid': 'user1', - 'cn': 'user1', - 'uidNumber': '1', - 'gidNumber': '1', - 'homeDirectory': '/home/user1' - }))) - except ldap.LDAPError as e: - log.fatal('test_mep: Failed to user1: error ' + e.message['desc']) - assert False - - # Check if a managed group entry was created - try: - inst.search_s(MEP_USER_DN, ldap.SCOPE_BASE, '(objectclass=top)') - except ldap.LDAPError as e: - log.fatal('test_mep: Unable to find MEP entry: ' + e.message['desc']) - assert False - - ############################################################################ - # Change the config - ############################################################################ - - # Add a new template entry - try: - inst.add_s(Entry((TEMPLATE_DN2, { - 'objectclass': 'top mepTemplateEntry extensibleObject'.split(), - 'cn': 'MEP Template2', - 'mepRDNAttr': 'uid', - 'mepStaticAttr': 'objectclass: posixGroup|objectclass: extensibleObject'.split('|'), - 'mepMappedAttr': 'cn: $uid|uid: $cn|gidNumber: $gidNumber'.split('|') - }))) - except ldap.LDAPError as e: - log.fatal('test_mep: Failed to add template entry2: error ' + e.message['desc']) - assert False - - # Set the new template dn - try: - inst.modify_s(CONFIG_DN, [(ldap.MOD_REPLACE, 'managedTemplate', TEMPLATE_DN2)]) - except ldap.LDAPError as e: - log.fatal('test_mep: Failed to set mep plugin config: error ' + e.message['desc']) - assert False - - ############################################################################ - # Test plugin - ############################################################################ - - # Add an entry that meets the MEP scope - try: - inst.add_s(Entry((USER_DN2, { - 'objectclass': 'top posixAccount extensibleObject'.split(), - 'uid': 'user 1', - 'cn': 'user 1', - 'uidNumber': '1', - 'gidNumber': '1', - 'homeDirectory': '/home/user2' - }))) - except ldap.LDAPError as e: - log.fatal('test_mep: Failed to user2: error ' + e.message['desc']) - assert False - - # Check if a managed group entry was created - try: - inst.search_s(MEP_USER_DN2, ldap.SCOPE_BASE, '(objectclass=top)') - except ldap.LDAPError as e: - log.fatal('test_mep: Unable to find MEP entry2: ' + e.message['desc']) - assert False - - ############################################################################ - # Test plugin dependency - ############################################################################ - - test_dependency(inst, PLUGIN_MANAGED_ENTRY) - - ############################################################################ - # Cleanup - ############################################################################ - - try: - inst.delete_s(USER_DN) - except ldap.LDAPError as e: - log.fatal('test_mep: Failed to delete test user1: ' + e.message['desc']) - assert False - - try: - inst.delete_s(USER_DN2) - except ldap.LDAPError as e: - log.fatal('test_mep: Failed to delete test user 2: ' + e.message['desc']) - assert False - - try: - inst.delete_s(TEMPLATE_DN) - except ldap.LDAPError as e: - log.fatal('test_mep: Failed to delete template1: ' + e.message['desc']) - assert False - - inst.plugins.disable(name=PLUGIN_MANAGED_ENTRY) - - try: - inst.delete_s(TEMPLATE_DN2) - except ldap.LDAPError as e: - log.fatal('test_mep: Failed to delete template2: ' + e.message['desc']) - assert False - - try: - inst.delete_s(CONFIG_DN) - except ldap.LDAPError as e: - log.fatal('test_mep: Failed to delete config: ' + e.message['desc']) - assert False - - ############################################################################ - # Test passed - ############################################################################ - - log.info('test_mep: PASS\n') - return - - -################################################################################ -# -# Test Passthru Plugin (7) -# -################################################################################ -def test_passthru(inst, args=None): - # Passthru is a bit picky about the state of the entry - we can't just restart it - if args == "restart": - return - - # stop the plugin - inst.plugins.disable(name=PLUGIN_PASSTHRU) - - PLUGIN_DN = 'cn=' + PLUGIN_PASSTHRU + ',cn=plugins,cn=config' - PASSTHRU_DN = 'uid=admin,dc=pass,dc=thru' - PASSTHRU_DN2 = 'uid=admin2,dc=pass2,dc=thru' - PASS_SUFFIX1 = 'dc=pass,dc=thru' - PASS_SUFFIX2 = 'dc=pass2,dc=thru' - PASS_BE2 = 'PASS2' - - log.info('Testing ' + PLUGIN_PASSTHRU + '...') - - ############################################################################ - # Add a new "remote" instance, and a user for auth - ############################################################################ - - # Create second instance - passthru_inst = DirSrv(verbose=False) - - # Args for the instance - args_instance[SER_HOST] = LOCALHOST - args_instance[SER_PORT] = 33333 - args_instance[SER_SERVERID_PROP] = 'passthru' - args_instance[SER_CREATION_SUFFIX] = PASS_SUFFIX1 - args_passthru_inst = args_instance.copy() - passthru_inst.allocate(args_passthru_inst) - passthru_inst.create() - passthru_inst.open() - - # Create a second backend - passthru_inst.backend.create(PASS_SUFFIX2, {BACKEND_NAME: PASS_BE2}) - passthru_inst.mappingtree.create(PASS_SUFFIX2, bename=PASS_BE2) - - # Create the top of the tree - try: - passthru_inst.add_s(Entry((PASS_SUFFIX2, { - 'objectclass': 'top domain'.split(), - 'dc': 'pass2'}))) - except ldap.ALREADY_EXISTS: - pass - except ldap.LDAPError as e: - log.fatal('test_passthru: Failed to create suffix entry: error ' + e.message['desc']) - passthru_inst.delete() - assert False - - # Add user to suffix1 - try: - passthru_inst.add_s(Entry((PASSTHRU_DN, { - 'objectclass': 'top extensibleObject'.split(), - 'uid': 'admin', - 'userpassword': 'password' - }))) - except ldap.LDAPError as e: - log.fatal('test_passthru: Failed to admin1: error ' + e.message['desc']) - passthru_inst.delete() - assert False - - # Add user to suffix 2 - try: - passthru_inst.add_s(Entry((PASSTHRU_DN2, { - 'objectclass': 'top extensibleObject'.split(), - 'uid': 'admin2', - 'userpassword': 'password' - }))) - except ldap.LDAPError as e: - log.fatal('test_passthru: Failed to admin2 : error ' + e.message['desc']) - passthru_inst.delete() - assert False - - ############################################################################ - # Configure and start plugin - ############################################################################ - - try: - inst.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, 'nsslapd-pluginenabled', 'on'), - (ldap.MOD_REPLACE, 'nsslapd-pluginarg0', 'ldap://127.0.0.1:33333/dc=pass,dc=thru')]) - except ldap.LDAPError as e: - log.fatal('test_passthru: Failed to set mep plugin config: error ' + e.message['desc']) - passthru_inst.delete() - assert False - - ############################################################################ - # Test plugin - ############################################################################ - - # login as user - try: - inst.simple_bind_s(PASSTHRU_DN, "password") - except ldap.LDAPError as e: - log.fatal('test_passthru: pass through bind failed: ' + e.message['desc']) - passthru_inst.delete() - assert False - - ############################################################################ - # Change the config - ############################################################################ - - # login as root DN - try: - inst.simple_bind_s(DN_DM, PASSWORD) - except ldap.LDAPError as e: - log.fatal('test_passthru: pass through bind failed: ' + e.message['desc']) - passthru_inst.delete() - assert False - - try: - inst.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, 'nsslapd-pluginarg0', 'ldap://127.0.0.1:33333/dc=pass2,dc=thru')]) - except ldap.LDAPError as e: - log.fatal('test_passthru: Failed to set mep plugin config: error ' + e.message['desc']) - passthru_inst.delete() - assert False - - ############################################################################ - # Test plugin - ############################################################################ - - # login as user - try: - inst.simple_bind_s(PASSTHRU_DN2, "password") - except ldap.LDAPError as e: - log.fatal('test_passthru: pass through bind failed: ' + e.message['desc']) - passthru_inst.delete() - assert False - - # login as root DN - try: - inst.simple_bind_s(DN_DM, PASSWORD) - except ldap.LDAPError as e: - log.fatal('test_passthru: pass through bind failed: ' + e.message['desc']) - passthru_inst.delete() - assert False - - ############################################################################ - # Test plugin dependency - ############################################################################ - - test_dependency(inst, PLUGIN_PASSTHRU) - - ############################################################################ - # Cleanup - ############################################################################ - - # remove the passthru instance - passthru_inst.delete() - - ############################################################################ - # Test passed - ############################################################################ - - log.info('test_passthru: PASS\n') - - return - - -################################################################################ -# -# Test Referential Integrity Plugin (8) -# -################################################################################ -def test_referint(inst, args=None): - # stop the plugin, and start it - inst.plugins.disable(name=PLUGIN_REFER_INTEGRITY) - inst.plugins.enable(name=PLUGIN_REFER_INTEGRITY) - - if args == "restart": - return - - log.info('Testing ' + PLUGIN_REFER_INTEGRITY + '...') - PLUGIN_DN = 'cn=' + PLUGIN_REFER_INTEGRITY + ',cn=plugins,cn=config' - SHARED_CONFIG_DN = 'cn=RI Config,' + DEFAULT_SUFFIX - - ############################################################################ - # Configure plugin - ############################################################################ - - try: - inst.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, 'referint-membership-attr', 'member')]) - except ldap.LDAPError as e: - log.fatal('test_referint: Failed to configure RI plugin: error ' + e.message['desc']) - assert False - - ############################################################################ - # Test plugin - ############################################################################ - - # Add some users and a group - try: - inst.add_s(Entry((USER1_DN, { - 'objectclass': 'top extensibleObject'.split(), - 'uid': 'user1' - }))) - except ldap.LDAPError as e: - log.fatal('test_referint: Failed to add user1: error ' + e.message['desc']) - assert False - - try: - inst.add_s(Entry((USER2_DN, { - 'objectclass': 'top extensibleObject'.split(), - 'uid': 'user2' - }))) - except ldap.LDAPError as e: - log.fatal('test_referint: Failed to add user2: error ' + e.message['desc']) - assert False - - try: - inst.add_s(Entry((GROUP_DN, { - 'objectclass': 'top extensibleObject'.split(), - 'cn': 'group', - 'member': USER1_DN, - 'uniquemember': USER2_DN - }))) - except ldap.LDAPError as e: - log.fatal('test_referint: Failed to add group: error ' + e.message['desc']) - assert False - - # Grab the referint log file from the plugin - - try: - entries = inst.search_s(PLUGIN_DN, ldap.SCOPE_BASE, '(objectclass=top)') - REFERINT_LOGFILE = entries[0].getValue('referint-logfile') - except ldap.LDAPError as e: - log.fatal('test_referint: Unable to search plugin entry: ' + e.message['desc']) - assert False - - # Add shared config entry - try: - inst.add_s(Entry((SHARED_CONFIG_DN, { - 'objectclass': 'top extensibleObject'.split(), - 'referint-membership-attr': 'member', - 'referint-update-delay': '0', - 'referint-logfile': REFERINT_LOGFILE, - 'referint-logchanges': '0' - }))) - except ldap.LDAPError as e: - log.fatal('test_referint: Failed to shared config entry: error ' + e.message['desc']) - assert False - - # Delete a user - try: - inst.delete_s(USER1_DN) - except ldap.LDAPError as e: - log.fatal('test_referint: Failed to delete user1: ' + e.message['desc']) - assert False - - # Check for integrity - try: - entry = inst.search_s(GROUP_DN, ldap.SCOPE_BASE, '(member=' + USER1_DN + ')') - if entry: - log.fatal('test_referint: user1 was not removed from group') - assert False - except ldap.LDAPError as e: - log.fatal('test_referint: Unable to search group: ' + e.message['desc']) - assert False - - ############################################################################ - # Change the config - ############################################################################ - - try: - inst.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, 'referint-membership-attr', 'uniquemember')]) - except ldap.LDAPError as e: - log.fatal('test_referint: Failed to configure RI plugin: error ' + e.message['desc']) - assert False - - ############################################################################ - # Test plugin - ############################################################################ - - # Delete a user - try: - inst.delete_s(USER2_DN) - except ldap.LDAPError as e: - log.fatal('test_referint: Failed to delete user1: ' + e.message['desc']) - assert False - - # Check for integrity - try: - entry = inst.search_s(GROUP_DN, ldap.SCOPE_BASE, '(uniquemember=' + USER2_DN + ')') - if entry: - log.fatal('test_referint: user2 was not removed from group') - assert False - except ldap.LDAPError as e: - log.fatal('test_referint: Unable to search group: ' + e.message['desc']) - assert False - - ############################################################################ - # Set the shared config entry and test the plugin - ############################################################################ - - # The shared config entry uses "member" - the above test used "uniquemember" - try: - inst.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, CONFIG_AREA, SHARED_CONFIG_DN)]) - except ldap.LDAPError as e: - log.fatal('test_referint: Failed to set plugin area: error ' + e.message['desc']) - assert False - - # Delete the group, and readd everything - try: - inst.delete_s(GROUP_DN) - except ldap.LDAPError as e: - log.fatal('test_referint: Failed to delete group: ' + e.message['desc']) - assert False - - try: - inst.add_s(Entry((USER1_DN, { - 'objectclass': 'top extensibleObject'.split(), - 'uid': 'user1' - }))) - except ldap.LDAPError as e: - log.fatal('test_referint: Failed to add user1: error ' + e.message['desc']) - assert False - - try: - inst.add_s(Entry((USER2_DN, { - 'objectclass': 'top extensibleObject'.split(), - 'uid': 'user2' - }))) - except ldap.LDAPError as e: - log.fatal('test_referint: Failed to add user2: error ' + e.message['desc']) - assert False - - try: - inst.add_s(Entry((GROUP_DN, { - 'objectclass': 'top extensibleObject'.split(), - 'cn': 'group', - 'member': USER1_DN, - 'uniquemember': USER2_DN - }))) - except ldap.LDAPError as e: - log.fatal('test_referint: Failed to add group: error ' + e.message['desc']) - assert False - - # Delete a user - try: - inst.delete_s(USER1_DN) - except ldap.LDAPError as e: - log.fatal('test_referint: Failed to delete user1: ' + e.message['desc']) - assert False - - # Check for integrity - try: - entry = inst.search_s(GROUP_DN, ldap.SCOPE_BASE, '(member=' + USER1_DN + ')') - if entry: - log.fatal('test_referint: user1 was not removed from group') - assert False - except ldap.LDAPError as e: - log.fatal('test_referint: Unable to search group: ' + e.message['desc']) - assert False - - ############################################################################ - # Change the shared config entry to use 'uniquemember' and test the plugin - ############################################################################ - - try: - inst.modify_s(SHARED_CONFIG_DN, [(ldap.MOD_REPLACE, 'referint-membership-attr', 'uniquemember')]) - except ldap.LDAPError as e: - log.fatal('test_referint: Failed to set shared plugin entry(uniquemember): error ' - + e.message['desc']) - assert False - - # Delete a user - try: - inst.delete_s(USER2_DN) - except ldap.LDAPError as e: - log.fatal('test_referint: Failed to delete user1: ' + e.message['desc']) - assert False - - # Check for integrity - try: - entry = inst.search_s(GROUP_DN, ldap.SCOPE_BASE, '(uniquemember=' + USER2_DN + ')') - if entry: - log.fatal('test_referint: user2 was not removed from group') - assert False - except ldap.LDAPError as e: - log.fatal('test_referint: Unable to search group: ' + e.message['desc']) - assert False - - ############################################################################ - # Remove shared config from plugin, and retest - ############################################################################ - - # First change the plugin to use member before we move the shared config that uses uniquemember - try: - inst.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, 'referint-membership-attr', 'member')]) - except ldap.LDAPError as e: - log.fatal('test_referint: Failed to update config(uniquemember): error ' + e.message['desc']) - assert False - - # Remove shared config from plugin - try: - inst.modify_s(PLUGIN_DN, [(ldap.MOD_DELETE, CONFIG_AREA, None)]) - except ldap.LDAPError as e: - log.fatal('test_referint: Failed to add uniquemember: error ' + e.message['desc']) - assert False - - # Add test user - try: - inst.add_s(Entry((USER1_DN, { - 'objectclass': 'top extensibleObject'.split(), - 'uid': 'user1' - }))) - except ldap.LDAPError as e: - log.fatal('test_referint: Failed to add user1: error ' + e.message['desc']) - assert False - - # Add user to group - try: - inst.modify_s(GROUP_DN, [(ldap.MOD_REPLACE, 'member', USER1_DN)]) - except ldap.LDAPError as e: - log.fatal('test_referint: Failed to add uniquemember: error ' + e.message['desc']) - assert False - - # Delete a user - try: - inst.delete_s(USER1_DN) - except ldap.LDAPError as e: - log.fatal('test_referint: Failed to delete user1: ' + e.message['desc']) - assert False - - # Check for integrity - try: - entry = inst.search_s(GROUP_DN, ldap.SCOPE_BASE, '(member=' + USER1_DN + ')') - if entry: - log.fatal('test_referint: user1 was not removed from group') - assert False - except ldap.LDAPError as e: - log.fatal('test_referint: Unable to search group: ' + e.message['desc']) - assert False - - ############################################################################ - # Test plugin dependency - ############################################################################ - - test_dependency(inst, PLUGIN_REFER_INTEGRITY) - - ############################################################################ - # Cleanup - ############################################################################ - - try: - inst.delete_s(GROUP_DN) - except ldap.LDAPError as e: - log.fatal('test_referint: Failed to delete group: ' + e.message['desc']) - assert False - - try: - inst.delete_s(SHARED_CONFIG_DN) - except ldap.LDAPError as e: - log.fatal('test_referint: Failed to delete shared config entry: ' + e.message['desc']) - assert False - - ############################################################################ - # Test passed - ############################################################################ - - log.info('test_referint: PASS\n') - - return - - -################################################################################ -# -# Test Retro Changelog Plugin (9) -# -################################################################################ -def test_retrocl(inst, args=None): - # stop the plugin, and start it - inst.plugins.disable(name=PLUGIN_RETRO_CHANGELOG) - inst.plugins.enable(name=PLUGIN_RETRO_CHANGELOG) - - if args == "restart": - return - - log.info('Testing ' + PLUGIN_RETRO_CHANGELOG + '...') - - ############################################################################ - # Configure plugin - ############################################################################ - - # Gather the current change count (it's not 1 once we start the stabilty tests) - try: - entry = inst.search_s(RETROCL_SUFFIX, ldap.SCOPE_SUBTREE, '(changenumber=*)') - except ldap.LDAPError as e: - log.fatal('test_retrocl: Failed to get the count: error ' + e.message['desc']) - assert False - - entry_count = len(entry) - - ############################################################################ - # Test plugin - ############################################################################ - - # Add a user - try: - inst.add_s(Entry((USER1_DN, { - 'objectclass': 'top extensibleObject'.split(), - 'uid': 'user1' - }))) - except ldap.LDAPError as e: - log.fatal('test_retrocl: Failed to add user1: error ' + e.message['desc']) - assert False - - # Check we logged this in the retro cl - try: - entry = inst.search_s(RETROCL_SUFFIX, ldap.SCOPE_SUBTREE, '(changenumber=*)') - if not entry or len(entry) == entry_count: - log.fatal('test_retrocl: changelog not updated') - assert False - except ldap.LDAPError as e: - log.fatal('test_retrocl: Unable to search group: ' + e.message['desc']) - assert False - - entry_count += 1 - - ############################################################################ - # Change the config - disable plugin - ############################################################################ - - inst.plugins.disable(name=PLUGIN_RETRO_CHANGELOG) - - ############################################################################ - # Test plugin - ############################################################################ - - try: - inst.delete_s(USER1_DN) - except ldap.LDAPError as e: - log.fatal('test_retrocl: Failed to delete user1: ' + e.message['desc']) - assert False - - # Check we didn't logged this in the retro cl - try: - entry = inst.search_s(RETROCL_SUFFIX, ldap.SCOPE_SUBTREE, '(changenumber=*)') - if len(entry) != entry_count: - log.fatal('test_retrocl: changelog incorrectly updated - change count: ' - + str(len(entry)) + ' - expected 1') - assert False - except ldap.LDAPError as e: - log.fatal('test_retrocl: Unable to search retro changelog: ' + e.message['desc']) - assert False - - ############################################################################ - # Test plugin dependency - ############################################################################ - - inst.plugins.enable(name=PLUGIN_RETRO_CHANGELOG) - test_dependency(inst, PLUGIN_RETRO_CHANGELOG) - - ############################################################################ - # Cleanup - ############################################################################ - - # None - - ############################################################################ - # Test passed - ############################################################################ - - log.info('test_retrocl: PASS\n') - - return - - -################################################################################ -# -# Test Root DN Access Control Plugin (10) -# -################################################################################ -def test_rootdn(inst, args=None): - # stop the plugin, and start it - inst.plugins.disable(name=PLUGIN_ROOTDN_ACCESS) - inst.plugins.enable(name=PLUGIN_ROOTDN_ACCESS) - - if args == "restart": - return - - PLUGIN_DN = 'cn=' + PLUGIN_ROOTDN_ACCESS + ',cn=plugins,cn=config' - - log.info('Testing ' + PLUGIN_ROOTDN_ACCESS + '...') - - ############################################################################ - # Configure plugin - ############################################################################ - - # Add an user and aci to open up cn=config - try: - inst.add_s(Entry((USER1_DN, { - 'objectclass': 'top extensibleObject'.split(), - 'uid': 'user1', - 'userpassword': 'password' - }))) - except ldap.LDAPError as e: - log.fatal('test_rootdn: Failed to add user1: error ' + e.message['desc']) - assert False - - # Set an aci so we can modify the plugin after ew deny the root dn - ACI = ('(target ="ldap:///cn=config")(targetattr = "*")(version 3.0;acl ' + - '"all access";allow (all)(userdn="ldap:///anyone");)') - try: - inst.modify_s(DN_CONFIG, [(ldap.MOD_ADD, 'aci', ACI)]) - except ldap.LDAPError as e: - log.fatal('test_rootdn: Failed to add aci to config: error ' + e.message['desc']) - assert False - - # Set allowed IP to an unknown host - blocks root dn - try: - inst.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, 'rootdn-allow-ip', '10.10.10.10')]) - except ldap.LDAPError as e: - log.fatal('test_rootdn: Failed to set rootDN plugin config: error ' + e.message['desc']) - assert False - - ############################################################################ - # Test plugin - ############################################################################ - - # Bind as Root DN - failed = False - try: - inst.simple_bind_s(DN_DM, PASSWORD) - except ldap.LDAPError as e: - failed = True - - if not failed: - log.fatal('test_rootdn: Root DN was incorrectly able to bind') - assert False - - ############################################################################ - # Change the config - ############################################################################ - - # Bind as the user who can make updates to the config - try: - inst.simple_bind_s(USER1_DN, 'password') - except ldap.LDAPError as e: - log.fatal('test_rootdn: failed to bind as user1') - assert False - - # First, test that invalid plugin changes are rejected - try: - inst.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, 'rootdn-deny-ip', '12.12.ZZZ.12')]) - log.fatal('test_rootdn: Incorrectly allowed to add invalid "rootdn-deny-ip: 12.12.ZZZ.12"') - assert False - except ldap.LDAPError: - pass - - try: - inst.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, 'rootdn-allow-host', 'host._.com')]) - log.fatal('test_rootdn: Incorrectly allowed to add invalid "rootdn-allow-host: host._.com"') - assert False - except ldap.LDAPError: - pass - - # Remove the restriction - try: - inst.modify_s(PLUGIN_DN, [(ldap.MOD_DELETE, 'rootdn-allow-ip', None)]) - except ldap.LDAPError as e: - log.fatal('test_rootdn: Failed to set rootDN plugin config: error ' + e.message['desc']) - assert False - - ############################################################################ - # Test plugin - ############################################################################ - - # Bind as Root DN - failed = False - try: - inst.simple_bind_s(DN_DM, PASSWORD) - except ldap.LDAPError as e: - failed = True - - if failed: - log.fatal('test_rootdn: Root DN was not able to bind') - assert False - - ############################################################################ - # Test plugin dependency - ############################################################################ - - test_dependency(inst, PLUGIN_ROOTDN_ACCESS) - - ############################################################################ - # Cleanup - remove ACI from cn=config and test user - ############################################################################ - - try: - inst.modify_s(DN_CONFIG, [(ldap.MOD_DELETE, 'aci', ACI)]) - except ldap.LDAPError as e: - log.fatal('test_rootdn: Failed to add aci to config: error ' + e.message['desc']) - assert False - - try: - inst.delete_s(USER1_DN) - except ldap.LDAPError as e: - log.fatal('test_rootdn: Failed to delete user1: ' + e.message['desc']) - assert False - - ############################################################################ - # Test passed - ############################################################################ - - log.info('test_rootdn: PASS\n') - - return - - -# Array of test functions -func_tests = [test_acctpolicy, test_attruniq, test_automember, test_dna, - test_linkedattrs, test_memberof, test_mep, test_passthru, - test_referint, test_retrocl, test_rootdn] - - -def test_all_plugins(inst, args=None): - for func in func_tests: - func(inst, args) - - return - diff --git a/dirsrvtests/suites/dynamic-plugins/stress_tests.py b/dirsrvtests/suites/dynamic-plugins/stress_tests.py deleted file mode 100644 index 920d3f6..0000000 --- a/dirsrvtests/suites/dynamic-plugins/stress_tests.py +++ /dev/null @@ -1,146 +0,0 @@ -# --- BEGIN COPYRIGHT BLOCK --- -# Copyright (C) 2015 Red Hat, Inc. -# All rights reserved. -# -# License: GPL (version 3 or any later version). -# See LICENSE for details. -# --- END COPYRIGHT BLOCK --- -# -''' -Created on Dec 16, 2014 - -@author: mreynolds -''' -import os -import sys -import time -import ldap -import logging -import pytest -import threading -from lib389 import DirSrv, Entry, tools, tasks -from lib389.tools import DirSrvTools -from lib389._constants import * -from lib389.properties import * - -log = logging.getLogger(__name__) - -NUM_USERS = 250 -GROUP_DN = 'cn=stress-group,' + DEFAULT_SUFFIX - - -def openConnection(inst): - # Open a new connection to our LDAP server - server = DirSrv(verbose=False) - args_instance[SER_HOST] = HOST_STANDALONE - args_instance[SER_PORT] = PORT_STANDALONE - args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE - args_standalone = args_instance.copy() - server.allocate(args_standalone) - server.open() - - return server - - -# Configure Referential Integrity Plugin for stress test -def configureRI(inst): - inst.plugins.enable(name=PLUGIN_REFER_INTEGRITY) - PLUGIN_DN = 'cn=' + PLUGIN_REFER_INTEGRITY + ',cn=plugins,cn=config' - try: - inst.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, 'referint-membership-attr', 'uniquemember')]) - except ldap.LDAPError as e: - log.fatal('configureRI: Failed to configure RI plugin: error ' + e.message['desc']) - assert False - - -# Configure MemberOf Plugin for stress test -def configureMO(inst): - inst.plugins.enable(name=PLUGIN_MEMBER_OF) - PLUGIN_DN = 'cn=' + PLUGIN_MEMBER_OF + ',cn=plugins,cn=config' - try: - inst.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, 'memberofgroupattr', 'uniquemember')]) - except ldap.LDAPError as e: - log.fatal('configureMO: Failed to update config(uniquemember): error ' + e.message['desc']) - assert False - - -def cleanup(conn): - try: - conn.delete_s(GROUP_DN) - except ldap.LDAPError as e: - log.fatal('cleanup: failed to delete group (' + GROUP_DN + ') error: ' + e.message['desc']) - assert False - - -class DelUsers(threading.Thread): - def __init__(self, inst, rdnval): - threading.Thread.__init__(self) - self.daemon = True - self.inst = inst - self.rdnval = rdnval - - def run(self): - conn = openConnection(self.inst) - idx = 0 - log.info('DelUsers - Deleting ' + str(NUM_USERS) + ' entries (' + self.rdnval + ')...') - while idx < NUM_USERS: - USER_DN = 'uid=' + self.rdnval + str(idx) + ',' + DEFAULT_SUFFIX - try: - conn.delete_s(USER_DN) - except ldap.LDAPError as e: - log.fatal('DeleteUsers: failed to delete (' + USER_DN + ') error: ' + e.message['desc']) - assert False - - idx += 1 - - conn.close() - log.info('DelUsers - Finished deleting ' + str(NUM_USERS) + ' entries (' + self.rdnval + ').') - - -class AddUsers(threading.Thread): - def __init__(self, inst, rdnval, addToGroup): - threading.Thread.__init__(self) - self.daemon = True - self.inst = inst - self.addToGroup = addToGroup - self.rdnval = rdnval - - def run(self): - # Start adding users - conn = openConnection(self.inst) - idx = 0 - - if self.addToGroup: - try: - conn.add_s(Entry((GROUP_DN, - {'objectclass': 'top groupOfNames groupOfUniqueNames extensibleObject'.split(), - 'uid': 'user' + str(idx)}))) - except ldap.ALREADY_EXISTS: - pass - except ldap.LDAPError as e: - log.fatal('AddUsers: failed to add group (' + USER_DN + ') error: ' + e.message['desc']) - assert False - - log.info('AddUsers - Adding ' + str(NUM_USERS) + ' entries (' + self.rdnval + ')...') - - while idx < NUM_USERS: - USER_DN = 'uid=' + self.rdnval + str(idx) + ',' + DEFAULT_SUFFIX - try: - conn.add_s(Entry((USER_DN, {'objectclass': 'top extensibleObject'.split(), - 'uid': 'user' + str(idx)}))) - except ldap.LDAPError as e: - log.fatal('AddUsers: failed to add (' + USER_DN + ') error: ' + e.message['desc']) - assert False - - if self.addToGroup: - # Add the user to the group - try: - conn.modify_s(GROUP_DN, [(ldap.MOD_ADD, 'uniquemember', USER_DN)]) - except ldap.LDAPError as e: - log.fatal('AddUsers: Failed to add user' + USER_DN + ' to group: error ' + e.message['desc']) - assert False - - idx += 1 - - conn.close() - log.info('AddUsers - Finished adding ' + str(NUM_USERS) + ' entries (' + self.rdnval + ').') diff --git a/dirsrvtests/suites/dynamic-plugins/test_dynamic_plugins.py b/dirsrvtests/suites/dynamic-plugins/test_dynamic_plugins.py deleted file mode 100644 index c05c402..0000000 --- a/dirsrvtests/suites/dynamic-plugins/test_dynamic_plugins.py +++ /dev/null @@ -1,493 +0,0 @@ -# --- BEGIN COPYRIGHT BLOCK --- -# Copyright (C) 2015 Red Hat, Inc. -# All rights reserved. -# -# License: GPL (version 3 or any later version). -# See LICENSE for details. -# --- END COPYRIGHT BLOCK --- -# -''' -Created on Dec 09, 2014 - -@author: mreynolds -''' -import os -import sys -import time -import ldap -import ldap.sasl -import logging -import pytest -import plugin_tests -import stress_tests -from lib389 import DirSrv, Entry, tools, tasks -from lib389.tools import DirSrvTools -from lib389._constants import * -from lib389.properties import * -from lib389.tasks import * - -log = logging.getLogger(__name__) - -installation_prefix = None - - -class TopologyStandalone(object): - def __init__(self, standalone): - standalone.open() - self.standalone = standalone - - -def repl_fail(replica): - # remove replica instance, and assert failure - replica.delete() - assert False - - -@pytest.fixture(scope="module") -def topology(request): - ''' - This fixture is used to standalone topology for the 'module'. - ''' - global installation_prefix - - if installation_prefix: - args_instance[SER_DEPLOYED_DIR] = installation_prefix - - standalone = DirSrv(verbose=False) - - # Args for the standalone instance - args_instance[SER_HOST] = HOST_STANDALONE - args_instance[SER_PORT] = PORT_STANDALONE - args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE - args_standalone = args_instance.copy() - standalone.allocate(args_standalone) - - # Get the status of the instance and restart it if it exists - instance_standalone = standalone.exists() - - # Remove the instance - if instance_standalone: - standalone.delete() - - # Create the instance - standalone.create() - - # Used to retrieve configuration information (dbdir, confdir...) - standalone.open() - - # Here we have standalone instance up and running - return TopologyStandalone(standalone) - - -def test_dynamic_plugins(topology): - """ - Test Dynamic Plugins - exercise each plugin and its main features, while - changing the configuration without restarting the server. - - Need to test: functionality, stability, and stress. These tests need to run - with replication disabled, and with replication setup with a - second instance. Then test if replication is working, and we have - same entries on each side. - - Functionality - Make sure that as configuration changes are made they take - effect immediately. Cross plugin interaction (e.g. automember/memberOf) - needs to tested, as well as plugin tasks. Need to test plugin - config validation(dependencies, etc). - - Memory Corruption - Restart the plugins many times, and in different orders and test - functionality, and stability. This will excerise the internal - plugin linked lists, dse callbacks, and task handlers. - - Stress - Put the server under load that will trigger multiple plugins(MO, RI, DNA, etc) - Restart various plugins while these operations are going on. Perform this test - 5 times(stress_max_run). - - """ - - REPLICA_PORT = 33334 - RUV_FILTER = '(&(nsuniqueid=ffffffff-ffffffff-ffffffff-ffffffff)(objectclass=nstombstone))' - master_maxcsn = 0 - replica_maxcsn = 0 - msg = ' (no replication)' - replication_run = False - stress_max_runs = 5 - - # First enable dynamic plugins - try: - topology.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-dynamic-plugins', 'on')]) - except ldap.LDAPError as e: - ldap.fatal('Failed to enable dynamic plugin!' + e.message['desc']) - assert False - - # Test that critical plugins can be updated even though the change might not be applied - try: - topology.standalone.modify_s(DN_LDBM, [(ldap.MOD_REPLACE, 'description', 'test')]) - except ldap.LDAPError as e: - ldap.fatal('Failed to apply change to critical plugin' + e.message['desc']) - assert False - - while 1: - # - # First run the tests with replication disabled, then rerun them with replication set up - # - - ############################################################################ - # Test plugin functionality - ############################################################################ - - log.info('####################################################################') - log.info('Testing Dynamic Plugins Functionality' + msg + '...') - log.info('####################################################################\n') - - plugin_tests.test_all_plugins(topology.standalone) - - log.info('####################################################################') - log.info('Successfully Tested Dynamic Plugins Functionality' + msg + '.') - log.info('####################################################################\n') - - ############################################################################ - # Test the stability by exercising the internal lists, callabcks, and task handlers - ############################################################################ - - log.info('####################################################################') - log.info('Testing Dynamic Plugins for Memory Corruption' + msg + '...') - log.info('####################################################################\n') - prev_plugin_test = None - prev_prev_plugin_test = None - - for plugin_test in plugin_tests.func_tests: - # - # Restart the plugin several times (and prev plugins) - work that linked list - # - plugin_test(topology.standalone, "restart") - - if prev_prev_plugin_test: - prev_prev_plugin_test(topology.standalone, "restart") - - plugin_test(topology.standalone, "restart") - - if prev_plugin_test: - prev_plugin_test(topology.standalone, "restart") - - plugin_test(topology.standalone, "restart") - - # Now run the functional test - plugin_test(topology.standalone) - - # Set the previous tests - if prev_plugin_test: - prev_prev_plugin_test = prev_plugin_test - prev_plugin_test = plugin_test - - log.info('####################################################################') - log.info('Successfully Tested Dynamic Plugins for Memory Corruption' + msg + '.') - log.info('####################################################################\n') - - ############################################################################ - # Stress two plugins while restarting it, and while restarting other plugins. - # The goal is to not crash, and have the plugins work after stressing them. - ############################################################################ - - log.info('####################################################################') - log.info('Stressing Dynamic Plugins' + msg + '...') - log.info('####################################################################\n') - - stress_tests.configureMO(topology.standalone) - stress_tests.configureRI(topology.standalone) - - stress_count = 0 - while stress_count < stress_max_runs: - log.info('####################################################################') - log.info('Running stress test' + msg + '. Run (%d/%d)...' % (stress_count + 1, stress_max_runs)) - log.info('####################################################################\n') - - try: - # Launch three new threads to add a bunch of users - add_users = stress_tests.AddUsers(topology.standalone, 'employee', True) - add_users.start() - add_users2 = stress_tests.AddUsers(topology.standalone, 'entry', True) - add_users2.start() - add_users3 = stress_tests.AddUsers(topology.standalone, 'person', True) - add_users3.start() - time.sleep(1) - - # While we are adding users restart the MO plugin and an idle plugin - topology.standalone.plugins.disable(name=PLUGIN_MEMBER_OF) - topology.standalone.plugins.enable(name=PLUGIN_MEMBER_OF) - time.sleep(1) - topology.standalone.plugins.disable(name=PLUGIN_MEMBER_OF) - time.sleep(1) - topology.standalone.plugins.enable(name=PLUGIN_MEMBER_OF) - topology.standalone.plugins.disable(name=PLUGIN_LINKED_ATTRS) - topology.standalone.plugins.enable(name=PLUGIN_LINKED_ATTRS) - time.sleep(1) - topology.standalone.plugins.disable(name=PLUGIN_MEMBER_OF) - topology.standalone.plugins.enable(name=PLUGIN_MEMBER_OF) - time.sleep(2) - topology.standalone.plugins.disable(name=PLUGIN_MEMBER_OF) - time.sleep(1) - topology.standalone.plugins.enable(name=PLUGIN_MEMBER_OF) - topology.standalone.plugins.disable(name=PLUGIN_LINKED_ATTRS) - topology.standalone.plugins.enable(name=PLUGIN_LINKED_ATTRS) - topology.standalone.plugins.disable(name=PLUGIN_MEMBER_OF) - time.sleep(1) - topology.standalone.plugins.enable(name=PLUGIN_MEMBER_OF) - topology.standalone.plugins.disable(name=PLUGIN_MEMBER_OF) - topology.standalone.plugins.enable(name=PLUGIN_MEMBER_OF) - - # Wait for the 'adding' threads to complete - add_users.join() - add_users2.join() - add_users3.join() - - # Now launch three threads to delete the users - del_users = stress_tests.DelUsers(topology.standalone, 'employee') - del_users.start() - del_users2 = stress_tests.DelUsers(topology.standalone, 'entry') - del_users2.start() - del_users3 = stress_tests.DelUsers(topology.standalone, 'person') - del_users3.start() - time.sleep(1) - - # Restart both the MO, RI plugins during these deletes, and an idle plugin - topology.standalone.plugins.disable(name=PLUGIN_REFER_INTEGRITY) - topology.standalone.plugins.disable(name=PLUGIN_MEMBER_OF) - topology.standalone.plugins.enable(name=PLUGIN_MEMBER_OF) - topology.standalone.plugins.enable(name=PLUGIN_REFER_INTEGRITY) - time.sleep(1) - topology.standalone.plugins.disable(name=PLUGIN_REFER_INTEGRITY) - time.sleep(1) - topology.standalone.plugins.disable(name=PLUGIN_MEMBER_OF) - time.sleep(1) - topology.standalone.plugins.enable(name=PLUGIN_MEMBER_OF) - time.sleep(1) - topology.standalone.plugins.enable(name=PLUGIN_REFER_INTEGRITY) - topology.standalone.plugins.disable(name=PLUGIN_LINKED_ATTRS) - topology.standalone.plugins.enable(name=PLUGIN_LINKED_ATTRS) - topology.standalone.plugins.disable(name=PLUGIN_REFER_INTEGRITY) - topology.standalone.plugins.disable(name=PLUGIN_MEMBER_OF) - topology.standalone.plugins.enable(name=PLUGIN_MEMBER_OF) - topology.standalone.plugins.enable(name=PLUGIN_REFER_INTEGRITY) - time.sleep(2) - topology.standalone.plugins.disable(name=PLUGIN_REFER_INTEGRITY) - time.sleep(1) - topology.standalone.plugins.disable(name=PLUGIN_MEMBER_OF) - time.sleep(1) - topology.standalone.plugins.enable(name=PLUGIN_MEMBER_OF) - time.sleep(1) - topology.standalone.plugins.enable(name=PLUGIN_REFER_INTEGRITY) - topology.standalone.plugins.disable(name=PLUGIN_LINKED_ATTRS) - topology.standalone.plugins.enable(name=PLUGIN_LINKED_ATTRS) - - # Wait for the 'deleting' threads to complete - del_users.join() - del_users2.join() - del_users3.join() - - # Now make sure both the MO and RI plugins still work correctly - plugin_tests.func_tests[8](topology.standalone) # RI plugin - plugin_tests.func_tests[5](topology.standalone) # MO plugin - - # Cleanup the stress tests - stress_tests.cleanup(topology.standalone) - - except: - log.info('Stress test failed!') - repl_fail(replica_inst) - - stress_count += 1 - log.info('####################################################################') - log.info('Successfully Stressed Dynamic Plugins' + msg + - '. Completed (%d/%d)' % (stress_count, stress_max_runs)) - log.info('####################################################################\n') - - if replication_run: - # We're done. - break - else: - # - # Enable replication and run everything one more time - # - log.info('Setting up replication, and rerunning the tests...\n') - - # Create replica instance - replica_inst = DirSrv(verbose=False) - args_instance[SER_HOST] = LOCALHOST - args_instance[SER_PORT] = REPLICA_PORT - args_instance[SER_SERVERID_PROP] = 'replica' - args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX - - args_replica_inst = args_instance.copy() - replica_inst.allocate(args_replica_inst) - replica_inst.create() - replica_inst.open() - - try: - topology.standalone.replica.enableReplication(suffix=DEFAULT_SUFFIX, - role=REPLICAROLE_MASTER, - replicaId=1) - replica_inst.replica.enableReplication(suffix=DEFAULT_SUFFIX, - role=REPLICAROLE_CONSUMER, - replicaId=65535) - properties = {RA_NAME: r'to_replica', - RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], - RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], - RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], - RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} - - repl_agreement = topology.standalone.agreement.create(suffix=DEFAULT_SUFFIX, - host=LOCALHOST, - port=REPLICA_PORT, - properties=properties) - - if not repl_agreement: - log.fatal("Fail to create a replica agreement") - repl_fail(replica_inst) - - topology.standalone.agreement.init(DEFAULT_SUFFIX, LOCALHOST, REPLICA_PORT) - topology.standalone.waitForReplInit(repl_agreement) - except: - log.info('Failed to setup replication!') - repl_fail(replica_inst) - - replication_run = True - msg = ' (replication enabled)' - time.sleep(1) - - ############################################################################ - # Check replication, and data are in sync, and remove the instance - ############################################################################ - - log.info('Checking if replication is in sync...') - - try: - # Grab master's max CSN - entry = topology.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, RUV_FILTER) - if not entry: - log.error('Failed to find db tombstone entry from master') - repl_fail(replica_inst) - elements = entry[0].getValues('nsds50ruv') - for ruv in elements: - if 'replica 1' in ruv: - parts = ruv.split() - if len(parts) == 5: - master_maxcsn = parts[4] - break - else: - log.error('RUV is incomplete') - repl_fail(replica_inst) - if master_maxcsn == 0: - log.error('Failed to find maxcsn on master') - repl_fail(replica_inst) - - except ldap.LDAPError as e: - log.fatal('Unable to search masterfor db tombstone: ' + e.message['desc']) - repl_fail(replica_inst) - - # Loop on the consumer - waiting for it to catch up - count = 0 - insync = False - while count < 10: - try: - # Grab master's max CSN - entry = replica_inst.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, RUV_FILTER) - if not entry: - log.error('Failed to find db tombstone entry on consumer') - repl_fail(replica_inst) - elements = entry[0].getValues('nsds50ruv') - for ruv in elements: - if 'replica 1' in ruv: - parts = ruv.split() - if len(parts) == 5: - replica_maxcsn = parts[4] - break - if replica_maxcsn == 0: - log.error('Failed to find maxcsn on consumer') - repl_fail(replica_inst) - except ldap.LDAPError as e: - log.fatal('Unable to search for db tombstone on consumer: ' + e.message['desc']) - repl_fail(replica_inst) - - if master_maxcsn == replica_maxcsn: - insync = True - log.info('Replication is in sync.\n') - break - count += 1 - time.sleep(1) - - # Report on replication status - if not insync: - log.error('Consumer not in sync with master!') - repl_fail(replica_inst) - - # - # Verify the databases are identical. There should not be any "user, entry, employee" entries - # - log.info('Checking if the data is the same between the replicas...') - - # Check the master - try: - entries = topology.standalone.search_s(DEFAULT_SUFFIX, - ldap.SCOPE_SUBTREE, - "(|(uid=person*)(uid=entry*)(uid=employee*))") - if len(entries) > 0: - log.error('Master database has incorrect data set!\n') - repl_fail(replica_inst) - except ldap.LDAPError as e: - log.fatal('Unable to search db on master: ' + e.message['desc']) - repl_fail(replica_inst) - - # Check the consumer - try: - entries = replica_inst.search_s(DEFAULT_SUFFIX, - ldap.SCOPE_SUBTREE, - "(|(uid=person*)(uid=entry*)(uid=employee*))") - if len(entries) > 0: - log.error('Consumer database in not consistent with master database') - repl_fail(replica_inst) - except ldap.LDAPError as e: - log.fatal('Unable to search db on consumer: ' + e.message['desc']) - repl_fail(replica_inst) - - log.info('Data is consistent across the replicas.\n') - - log.info('####################################################################') - log.info('Replication consistency test passed') - log.info('####################################################################\n') - - # Remove the replica instance - replica_inst.delete() - - ############################################################################ - # We made it to the end! - ############################################################################ - - log.info('#####################################################') - log.info('#####################################################') - log.info("Dynamic Plugins Testsuite: Completed Successfully!") - log.info('#####################################################') - log.info('#####################################################\n') - - -def test_dynamic_plugins_final(topology): - topology.standalone.delete() - - -def run_isolated(): - ''' - run_isolated is used to run these test cases independently of a test scheduler (xunit, py.test..) - To run isolated without py.test, you need to - - edit this file and comment '@pytest.fixture' line before 'topology' function. - - set the installation prefix - - run this program - ''' - global installation_prefix - installation_prefix = None - - topo = topology(True) - test_dynamic_plugins(topo) - test_dynamic_plugins_final(topo) - - -if __name__ == '__main__': - run_isolated() diff --git a/dirsrvtests/suites/filter/filter_test.py b/dirsrvtests/suites/filter/filter_test.py deleted file mode 100644 index d212f6a..0000000 --- a/dirsrvtests/suites/filter/filter_test.py +++ /dev/null @@ -1,152 +0,0 @@ -# --- BEGIN COPYRIGHT BLOCK --- -# Copyright (C) 2015 Red Hat, Inc. -# All rights reserved. -# -# License: GPL (version 3 or any later version). -# See LICENSE for details. -# --- END COPYRIGHT BLOCK --- -# -import os -import sys -import time -import ldap -import logging -import pytest -from lib389 import DirSrv, Entry, tools, tasks -from lib389.tools import DirSrvTools -from lib389._constants import * -from lib389.properties import * -from lib389.tasks import * - -logging.getLogger(__name__).setLevel(logging.DEBUG) -log = logging.getLogger(__name__) - -installation1_prefix = None - - -class TopologyStandalone(object): - def __init__(self, standalone): - standalone.open() - self.standalone = standalone - - -@pytest.fixture(scope="module") -def topology(request): - global installation1_prefix - if installation1_prefix: - args_instance[SER_DEPLOYED_DIR] = installation1_prefix - - # Creating standalone instance ... - standalone = DirSrv(verbose=False) - args_instance[SER_HOST] = HOST_STANDALONE - args_instance[SER_PORT] = PORT_STANDALONE - args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE - args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX - args_standalone = args_instance.copy() - standalone.allocate(args_standalone) - instance_standalone = standalone.exists() - if instance_standalone: - standalone.delete() - standalone.create() - standalone.open() - - # Clear out the tmp dir - standalone.clearTmpDir(__file__) - - return TopologyStandalone(standalone) - - -def test_filter_init(topology): - ''' - Write your testcase here... - ''' - return - - -def test_filter_escaped(topology): - ''' - Test we can search for an '*' in a attribute value. - ''' - - log.info('Running test_filter_escaped...') - - USER1_DN = 'uid=test_entry,' + DEFAULT_SUFFIX - USER2_DN = 'uid=test_entry2,' + DEFAULT_SUFFIX - - try: - topology.standalone.add_s(Entry((USER1_DN, {'objectclass': "top extensibleObject".split(), - 'sn': '1', - 'cn': 'test * me', - 'uid': 'test_entry', - 'userpassword': PASSWORD}))) - except ldap.LDAPError as e: - log.fatal('test_filter_escaped: Failed to add test user ' + USER1_DN + ': error ' + - e.message['desc']) - assert False - - try: - topology.standalone.add_s(Entry((USER2_DN, {'objectclass': "top extensibleObject".split(), - 'sn': '2', - 'cn': 'test me', - 'uid': 'test_entry2', - 'userpassword': PASSWORD}))) - except ldap.LDAPError as e: - log.fatal('test_filter_escaped: Failed to add test user ' + USER2_DN + ': error ' + e.message['desc']) - assert False - - try: - entry = topology.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, 'cn=*\**') - if not entry or len(entry) > 1: - log.fatal('test_filter_escaped: Entry was not found using "cn=*\**"') - assert False - except ldap.LDAPError as e: - log.fatal('test_filter_escaped: Failed to search for user(%s), error: %s' % - (USER1_DN, e.message('desc'))) - assert False - - log.info('test_filter_escaped: PASSED') - - -def test_filter_search_original_attrs(topology): - ''' - Search and request attributes with extra characters. The returned entry - should not have these extra characters: "objectclass EXTRA" - ''' - - log.info('Running test_filter_search_original_attrs...') - - try: - entry = topology.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_BASE, - 'objectclass=top', ['objectclass-EXTRA']) - if entry[0].hasAttr('objectclass-EXTRA'): - log.fatal('test_filter_search_original_attrs: Entry does not have the original attribute') - assert False - except ldap.LDAPError as e: - log.fatal('test_filter_search_original_attrs: Failed to search suffix(%s), error: %s' % - (DEFAULT_SUFFIX, e.message('desc'))) - assert False - - log.info('test_filter_search_original_attrs: PASSED') - - -def test_filter_final(topology): - topology.standalone.delete() - log.info('Testcase PASSED') - - -def run_isolated(): - global installation1_prefix - installation1_prefix = None - - topo = topology(True) - - test_filter_init(topo) - test_filter_escaped(topo) - test_filter_search_original_attrs(topo) - - test_filter_final(topo) - - -if __name__ == '__main__': - run_isolated() - diff --git a/dirsrvtests/suites/get_effective_rights/ger_test.py b/dirsrvtests/suites/get_effective_rights/ger_test.py deleted file mode 100644 index f87d0a1..0000000 --- a/dirsrvtests/suites/get_effective_rights/ger_test.py +++ /dev/null @@ -1,93 +0,0 @@ -# --- BEGIN COPYRIGHT BLOCK --- -# Copyright (C) 2015 Red Hat, Inc. -# All rights reserved. -# -# License: GPL (version 3 or any later version). -# See LICENSE for details. -# --- END COPYRIGHT BLOCK --- -# -import os -import sys -import time -import ldap -import logging -import pytest -from lib389 import DirSrv, Entry, tools, tasks -from lib389.tools import DirSrvTools -from lib389._constants import * -from lib389.properties import * -from lib389.tasks import * -from lib389.utils import * - -logging.getLogger(__name__).setLevel(logging.DEBUG) -log = logging.getLogger(__name__) - -installation1_prefix = None - - -class TopologyStandalone(object): - def __init__(self, standalone): - standalone.open() - self.standalone = standalone - - -@pytest.fixture(scope="module") -def topology(request): - global installation1_prefix - if installation1_prefix: - args_instance[SER_DEPLOYED_DIR] = installation1_prefix - - # Creating standalone instance ... - standalone = DirSrv(verbose=False) - args_instance[SER_HOST] = HOST_STANDALONE - args_instance[SER_PORT] = PORT_STANDALONE - args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE - args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX - args_standalone = args_instance.copy() - standalone.allocate(args_standalone) - instance_standalone = standalone.exists() - if instance_standalone: - standalone.delete() - standalone.create() - standalone.open() - - # Clear out the tmp dir - standalone.clearTmpDir(__file__) - - return TopologyStandalone(standalone) - - -def test_ger_init(topology): - ''' - Write any test suite initialization here(if needed) - ''' - - return - - -def test_ger_(topology): - ''' - Write a single test here... - ''' - - return - - -def test_ger_final(topology): - topology.standalone.delete() - log.info('ger test suite PASSED') - - -def run_isolated(): - global installation1_prefix - installation1_prefix = None - - topo = topology(True) - test_ger_init(topo) - test_ger_(topo) - test_ger_final(topo) - - -if __name__ == '__main__': - run_isolated() - diff --git a/dirsrvtests/suites/ldapi/ldapi_test.py b/dirsrvtests/suites/ldapi/ldapi_test.py deleted file mode 100644 index 06589bd..0000000 --- a/dirsrvtests/suites/ldapi/ldapi_test.py +++ /dev/null @@ -1,93 +0,0 @@ -# --- BEGIN COPYRIGHT BLOCK --- -# Copyright (C) 2015 Red Hat, Inc. -# All rights reserved. -# -# License: GPL (version 3 or any later version). -# See LICENSE for details. -# --- END COPYRIGHT BLOCK --- -# -import os -import sys -import time -import ldap -import logging -import pytest -from lib389 import DirSrv, Entry, tools, tasks -from lib389.tools import DirSrvTools -from lib389._constants import * -from lib389.properties import * -from lib389.tasks import * -from lib389.utils import * - -logging.getLogger(__name__).setLevel(logging.DEBUG) -log = logging.getLogger(__name__) - -installation1_prefix = None - - -class TopologyStandalone(object): - def __init__(self, standalone): - standalone.open() - self.standalone = standalone - - -@pytest.fixture(scope="module") -def topology(request): - global installation1_prefix - if installation1_prefix: - args_instance[SER_DEPLOYED_DIR] = installation1_prefix - - # Creating standalone instance ... - standalone = DirSrv(verbose=False) - args_instance[SER_HOST] = HOST_STANDALONE - args_instance[SER_PORT] = PORT_STANDALONE - args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE - args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX - args_standalone = args_instance.copy() - standalone.allocate(args_standalone) - instance_standalone = standalone.exists() - if instance_standalone: - standalone.delete() - standalone.create() - standalone.open() - - # Clear out the tmp dir - standalone.clearTmpDir(__file__) - - return TopologyStandalone(standalone) - - -def test_ldapi_init(topology): - ''' - Write any test suite initialization here(if needed) - ''' - - return - - -def test_ldapi_(topology): - ''' - Write a single test here... - ''' - - return - - -def test_ldapi_final(topology): - topology.standalone.delete() - log.info('ldapi test suite PASSED') - - -def run_isolated(): - global installation1_prefix - installation1_prefix = None - - topo = topology(True) - test_ldapi_init(topo) - test_ldapi_(topo) - test_ldapi_final(topo) - - -if __name__ == '__main__': - run_isolated() - diff --git a/dirsrvtests/suites/linkedattrs_plugin/linked_attrs_test.py b/dirsrvtests/suites/linkedattrs_plugin/linked_attrs_test.py deleted file mode 100644 index d61898f..0000000 --- a/dirsrvtests/suites/linkedattrs_plugin/linked_attrs_test.py +++ /dev/null @@ -1,93 +0,0 @@ -# --- BEGIN COPYRIGHT BLOCK --- -# Copyright (C) 2015 Red Hat, Inc. -# All rights reserved. -# -# License: GPL (version 3 or any later version). -# See LICENSE for details. -# --- END COPYRIGHT BLOCK --- -# -import os -import sys -import time -import ldap -import logging -import pytest -from lib389 import DirSrv, Entry, tools, tasks -from lib389.tools import DirSrvTools -from lib389._constants import * -from lib389.properties import * -from lib389.tasks import * -from lib389.utils import * - -logging.getLogger(__name__).setLevel(logging.DEBUG) -log = logging.getLogger(__name__) - -installation1_prefix = None - - -class TopologyStandalone(object): - def __init__(self, standalone): - standalone.open() - self.standalone = standalone - - -@pytest.fixture(scope="module") -def topology(request): - global installation1_prefix - if installation1_prefix: - args_instance[SER_DEPLOYED_DIR] = installation1_prefix - - # Creating standalone instance ... - standalone = DirSrv(verbose=False) - args_instance[SER_HOST] = HOST_STANDALONE - args_instance[SER_PORT] = PORT_STANDALONE - args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE - args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX - args_standalone = args_instance.copy() - standalone.allocate(args_standalone) - instance_standalone = standalone.exists() - if instance_standalone: - standalone.delete() - standalone.create() - standalone.open() - - # Clear out the tmp dir - standalone.clearTmpDir(__file__) - - return TopologyStandalone(standalone) - - -def test_linked_attrs_init(topology): - ''' - Write any test suite initialization here(if needed) - ''' - - return - - -def test_linked_attrs_(topology): - ''' - Write a single test here... - ''' - - return - - -def test_linked_attrs_final(topology): - topology.standalone.delete() - log.info('linked_attrs test suite PASSED') - - -def run_isolated(): - global installation1_prefix - installation1_prefix = None - - topo = topology(True) - test_linked_attrs_init(topo) - test_linked_attrs_(topo) - test_linked_attrs_final(topo) - - -if __name__ == '__main__': - run_isolated() - diff --git a/dirsrvtests/suites/mapping_tree/mapping_tree_test.py b/dirsrvtests/suites/mapping_tree/mapping_tree_test.py deleted file mode 100644 index 6cc95e4..0000000 --- a/dirsrvtests/suites/mapping_tree/mapping_tree_test.py +++ /dev/null @@ -1,93 +0,0 @@ -# --- BEGIN COPYRIGHT BLOCK --- -# Copyright (C) 2015 Red Hat, Inc. -# All rights reserved. -# -# License: GPL (version 3 or any later version). -# See LICENSE for details. -# --- END COPYRIGHT BLOCK --- -# -import os -import sys -import time -import ldap -import logging -import pytest -from lib389 import DirSrv, Entry, tools, tasks -from lib389.tools import DirSrvTools -from lib389._constants import * -from lib389.properties import * -from lib389.tasks import * -from lib389.utils import * - -logging.getLogger(__name__).setLevel(logging.DEBUG) -log = logging.getLogger(__name__) - -installation1_prefix = None - - -class TopologyStandalone(object): - def __init__(self, standalone): - standalone.open() - self.standalone = standalone - - -@pytest.fixture(scope="module") -def topology(request): - global installation1_prefix - if installation1_prefix: - args_instance[SER_DEPLOYED_DIR] = installation1_prefix - - # Creating standalone instance ... - standalone = DirSrv(verbose=False) - args_instance[SER_HOST] = HOST_STANDALONE - args_instance[SER_PORT] = PORT_STANDALONE - args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE - args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX - args_standalone = args_instance.copy() - standalone.allocate(args_standalone) - instance_standalone = standalone.exists() - if instance_standalone: - standalone.delete() - standalone.create() - standalone.open() - - # Clear out the tmp dir - standalone.clearTmpDir(__file__) - - return TopologyStandalone(standalone) - - -def test_mapping_tree_init(topology): - ''' - Write any test suite initialization here(if needed) - ''' - - return - - -def test_mapping_tree_(topology): - ''' - Write a single test here... - ''' - - return - - -def test_mapping_tree_final(topology): - topology.standalone.delete() - log.info('mapping_tree test suite PASSED') - - -def run_isolated(): - global installation1_prefix - installation1_prefix = None - - topo = topology(True) - test_mapping_tree_init(topo) - test_mapping_tree_(topo) - test_mapping_tree_final(topo) - - -if __name__ == '__main__': - run_isolated() - diff --git a/dirsrvtests/suites/memberof_plugin/memberof_test.py b/dirsrvtests/suites/memberof_plugin/memberof_test.py deleted file mode 100644 index e97c09a..0000000 --- a/dirsrvtests/suites/memberof_plugin/memberof_test.py +++ /dev/null @@ -1,176 +0,0 @@ -# --- BEGIN COPYRIGHT BLOCK --- -# Copyright (C) 2015 Red Hat, Inc. -# All rights reserved. -# -# License: GPL (version 3 or any later version). -# See LICENSE for details. -# --- END COPYRIGHT BLOCK --- -# - -import os -import sys -import time -import ldap -import logging -import pytest -from lib389 import DirSrv, Entry, tools, tasks -from lib389.tools import DirSrvTools -from lib389._constants import * -from lib389.properties import * -from lib389.tasks import * -from lib389.utils import * - -logging.getLogger(__name__).setLevel(logging.DEBUG) -log = logging.getLogger(__name__) -installation1_prefix = None - -MEMBEROF_PLUGIN_DN = ('cn=' + PLUGIN_MEMBER_OF + ',cn=plugins,cn=config') -USER1_DN = 'uid=user1,' + DEFAULT_SUFFIX -USER2_DN = 'uid=user2,' + DEFAULT_SUFFIX -GROUP_DN = 'cn=group,' + DEFAULT_SUFFIX - - -class TopologyStandalone(object): - def __init__(self, standalone): - standalone.open() - self.standalone = standalone - - -@pytest.fixture(scope="module") -def topology(request): - global installation1_prefix - if installation1_prefix: - args_instance[SER_DEPLOYED_DIR] = installation1_prefix - - # Creating standalone instance ... - standalone = DirSrv(verbose=False) - args_instance[SER_HOST] = HOST_STANDALONE - args_instance[SER_PORT] = PORT_STANDALONE - args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE - args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX - args_standalone = args_instance.copy() - standalone.allocate(args_standalone) - instance_standalone = standalone.exists() - if instance_standalone: - standalone.delete() - standalone.create() - standalone.open() - - # Delete each instance in the end - def fin(): - standalone.delete() - #pass - request.addfinalizer(fin) - - # Clear out the tmp dir - standalone.clearTmpDir(__file__) - - return TopologyStandalone(standalone) - - -def test_memberof_auto_add_oc(topology): - """ - Test the auto add objectclass feature. The plugin should add a predefined - objectclass that will allow memberOf to be added to an entry. - """ - - # enable dynamic plugins - try: - topology.standalone.modify_s(DN_CONFIG, - [(ldap.MOD_REPLACE, - 'nsslapd-dynamic-plugins', - 'on')]) - except ldap.LDAPError as e: - ldap.error('Failed to enable dynamic plugins! ' + e.message['desc']) - assert False - - # Enable the plugin - topology.standalone.plugins.enable(name=PLUGIN_MEMBER_OF) - - # First test invalid value (config validation) - topology.standalone.plugins.enable(name=PLUGIN_MEMBER_OF) - try: - topology.standalone.modify_s(MEMBEROF_PLUGIN_DN, - [(ldap.MOD_REPLACE, - 'memberofAutoAddOC', - 'invalid123')]) - log.fatal('Incorrectly added invalid objectclass!') - assert False - except ldap.UNWILLING_TO_PERFORM: - log.info('Correctly rejected invalid objectclass') - except ldap.LDAPError as e: - ldap.error('Unexpected error adding invalid objectclass - error: ' + e.message['desc']) - assert False - - # Add valid objectclass - topology.standalone.plugins.enable(name=PLUGIN_MEMBER_OF) - try: - topology.standalone.modify_s(MEMBEROF_PLUGIN_DN, - [(ldap.MOD_REPLACE, - 'memberofAutoAddOC', - 'inetuser')]) - except ldap.LDAPError as e: - log.fatal('Failed to configure memberOf plugin: error ' + e.message['desc']) - assert False - - # Add two users - try: - topology.standalone.add_s(Entry((USER1_DN, - {'objectclass': 'top', - 'objectclass': 'person', - 'objectclass': 'organizationalPerson', - 'objectclass': 'inetorgperson', - 'sn': 'last', - 'cn': 'full', - 'givenname': 'user1', - 'uid': 'user1' - }))) - except ldap.LDAPError as e: - log.fatal('Failed to add user1 entry, error: ' + e.message['desc']) - assert False - - try: - topology.standalone.add_s(Entry((USER2_DN, - {'objectclass': 'top', - 'objectclass': 'person', - 'objectclass': 'organizationalPerson', - 'objectclass': 'inetorgperson', - 'sn': 'last', - 'cn': 'full', - 'givenname': 'user2', - 'uid': 'user2' - }))) - except ldap.LDAPError as e: - log.fatal('Failed to add user2 entry, error: ' + e.message['desc']) - assert False - - # Add a group(that already includes one user - try: - topology.standalone.add_s(Entry((GROUP_DN, - {'objectclass': 'top', - 'objectclass': 'groupOfNames', - 'cn': 'group', - 'member': USER1_DN - }))) - except ldap.LDAPError as e: - log.fatal('Failed to add group entry, error: ' + e.message['desc']) - assert False - - # Add a user to the group - try: - topology.standalone.modify_s(GROUP_DN, - [(ldap.MOD_ADD, - 'member', - USER2_DN)]) - except ldap.LDAPError as e: - log.fatal('Failed to add user2 to group: error ' + e.message['desc']) - assert False - - log.info('Test complete.') - - -if __name__ == '__main__': - # Run isolated - # -s for DEBUG mode - CURRENT_FILE = os.path.realpath(__file__) - pytest.main("-s %s" % CURRENT_FILE) \ No newline at end of file diff --git a/dirsrvtests/suites/memory_leaks/range_search_test.py b/dirsrvtests/suites/memory_leaks/range_search_test.py deleted file mode 100644 index 12599c0..0000000 --- a/dirsrvtests/suites/memory_leaks/range_search_test.py +++ /dev/null @@ -1,138 +0,0 @@ -# --- BEGIN COPYRIGHT BLOCK --- -# Copyright (C) 2015 Red Hat, Inc. -# All rights reserved. -# -# License: GPL (version 3 or any later version). -# See LICENSE for details. -# --- END COPYRIGHT BLOCK --- -# -import os -import sys -import time -import ldap -import logging -import pytest -from lib389 import DirSrv, Entry, tools, tasks -from lib389.tools import DirSrvTools -from lib389._constants import * -from lib389.properties import * -from lib389.tasks import * -from lib389.utils import * - -logging.getLogger(__name__).setLevel(logging.DEBUG) -log = logging.getLogger(__name__) - -installation1_prefix = None - - -class TopologyStandalone(object): - def __init__(self, standalone): - standalone.open() - self.standalone = standalone - - -@pytest.fixture(scope="module") -def topology(request): - global installation1_prefix - if installation1_prefix: - args_instance[SER_DEPLOYED_DIR] = installation1_prefix - - # Creating standalone instance ... - standalone = DirSrv(verbose=False) - args_instance[SER_HOST] = HOST_STANDALONE - args_instance[SER_PORT] = PORT_STANDALONE - args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE - args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX - args_standalone = args_instance.copy() - standalone.allocate(args_standalone) - instance_standalone = standalone.exists() - if instance_standalone: - standalone.delete() - standalone.create() - standalone.open() - - # Clear out the tmp dir - standalone.clearTmpDir(__file__) - - def fin(): - standalone.delete() - sbin_dir = get_sbin_dir(prefix=standalone.prefix) - valgrind_disable(sbin_dir) - request.addfinalizer(fin) - - return TopologyStandalone(standalone) - - -def test_range_search_init(topology): - ''' - Enable retro cl, and valgrind. Since valgrind tests move the ns-slapd binary - around it's important to always "valgrind_disable" before "assert False"ing, - otherwise we leave the wrong ns-slapd in place if there is a failure - ''' - - log.info('Initializing test_range_search...') - - topology.standalone.plugins.enable(name=PLUGIN_RETRO_CHANGELOG) - - # First stop the instance - topology.standalone.stop(timeout=10) - - # Get the sbin directory so we know where to replace 'ns-slapd' - sbin_dir = get_sbin_dir(prefix=topology.standalone.prefix) - - # Enable valgrind - valgrind_enable(sbin_dir) - - # Now start the server with a longer timeout - topology.standalone.start(timeout=60) - - -def test_range_search(topology): - ''' - Add a 100 entries, and run a range search. When we encounter an error we - still need to disable valgrind before exiting - ''' - - log.info('Running test_range_search...') - - success = True - - # Add 100 test entries - for idx in range(1, 100): - idx = str(idx) - USER_DN = 'uid=user' + idx + ',' + DEFAULT_SUFFIX - try: - topology.standalone.add_s(Entry((USER_DN, {'objectclass': "top extensibleObject".split(), - 'uid': 'user' + idx}))) - except ldap.LDAPError as e: - log.fatal('test_range_search: Failed to add test user ' + USER_DN + ': error ' + e.message['desc']) - success = False - time.sleep(1) - - if success: - # Issue range search - try: - topology.standalone.search_s(RETROCL_SUFFIX, ldap.SCOPE_SUBTREE, - '(&(changenumber>=74)(changenumber<=84))') - except ldap.LDAPError as e: - log.fatal('test_range_search: Failed to search retro changelog(%s), error: %s' % - (RETROCL_SUFFIX, e.message('desc'))) - success = False - - if success: - # Get the results file, stop the server, and check for the leak - results_file = valgrind_get_results_file(topology.standalone) - topology.standalone.stop(timeout=30) - if valgrind_check_file(results_file, VALGRIND_LEAK_STR, 'range_candidates'): - log.fatal('test_range_search: Memory leak is still present!') - assert False - - log.info('test_range_search: PASSED') - - -if __name__ == '__main__': - # Run isolated - # -s for DEBUG mode - CURRENT_FILE = os.path.realpath(__file__) - pytest.main("-s %s" % CURRENT_FILE) - diff --git a/dirsrvtests/suites/mep_plugin/mep_test.py b/dirsrvtests/suites/mep_plugin/mep_test.py deleted file mode 100644 index 2bda08d..0000000 --- a/dirsrvtests/suites/mep_plugin/mep_test.py +++ /dev/null @@ -1,93 +0,0 @@ -# --- BEGIN COPYRIGHT BLOCK --- -# Copyright (C) 2015 Red Hat, Inc. -# All rights reserved. -# -# License: GPL (version 3 or any later version). -# See LICENSE for details. -# --- END COPYRIGHT BLOCK --- -# -import os -import sys -import time -import ldap -import logging -import pytest -from lib389 import DirSrv, Entry, tools, tasks -from lib389.tools import DirSrvTools -from lib389._constants import * -from lib389.properties import * -from lib389.tasks import * -from lib389.utils import * - -logging.getLogger(__name__).setLevel(logging.DEBUG) -log = logging.getLogger(__name__) - -installation1_prefix = None - - -class TopologyStandalone(object): - def __init__(self, standalone): - standalone.open() - self.standalone = standalone - - -@pytest.fixture(scope="module") -def topology(request): - global installation1_prefix - if installation1_prefix: - args_instance[SER_DEPLOYED_DIR] = installation1_prefix - - # Creating standalone instance ... - standalone = DirSrv(verbose=False) - args_instance[SER_HOST] = HOST_STANDALONE - args_instance[SER_PORT] = PORT_STANDALONE - args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE - args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX - args_standalone = args_instance.copy() - standalone.allocate(args_standalone) - instance_standalone = standalone.exists() - if instance_standalone: - standalone.delete() - standalone.create() - standalone.open() - - # Clear out the tmp dir - standalone.clearTmpDir(__file__) - - return TopologyStandalone(standalone) - - -def test_mep_init(topology): - ''' - Write any test suite initialization here(if needed) - ''' - - return - - -def test_mep_(topology): - ''' - Write a single test here... - ''' - - return - - -def test_mep_final(topology): - topology.standalone.delete() - log.info('mep test suite PASSED') - - -def run_isolated(): - global installation1_prefix - installation1_prefix = None - - topo = topology(True) - test_mep_init(topo) - test_mep_(topo) - test_mep_final(topo) - - -if __name__ == '__main__': - run_isolated() - diff --git a/dirsrvtests/suites/monitor/monitor_test.py b/dirsrvtests/suites/monitor/monitor_test.py deleted file mode 100644 index d24b3a5..0000000 --- a/dirsrvtests/suites/monitor/monitor_test.py +++ /dev/null @@ -1,93 +0,0 @@ -# --- BEGIN COPYRIGHT BLOCK --- -# Copyright (C) 2015 Red Hat, Inc. -# All rights reserved. -# -# License: GPL (version 3 or any later version). -# See LICENSE for details. -# --- END COPYRIGHT BLOCK --- -# -import os -import sys -import time -import ldap -import logging -import pytest -from lib389 import DirSrv, Entry, tools, tasks -from lib389.tools import DirSrvTools -from lib389._constants import * -from lib389.properties import * -from lib389.tasks import * -from lib389.utils import * - -logging.getLogger(__name__).setLevel(logging.DEBUG) -log = logging.getLogger(__name__) - -installation1_prefix = None - - -class TopologyStandalone(object): - def __init__(self, standalone): - standalone.open() - self.standalone = standalone - - -@pytest.fixture(scope="module") -def topology(request): - global installation1_prefix - if installation1_prefix: - args_instance[SER_DEPLOYED_DIR] = installation1_prefix - - # Creating standalone instance ... - standalone = DirSrv(verbose=False) - args_instance[SER_HOST] = HOST_STANDALONE - args_instance[SER_PORT] = PORT_STANDALONE - args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE - args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX - args_standalone = args_instance.copy() - standalone.allocate(args_standalone) - instance_standalone = standalone.exists() - if instance_standalone: - standalone.delete() - standalone.create() - standalone.open() - - # Clear out the tmp dir - standalone.clearTmpDir(__file__) - - return TopologyStandalone(standalone) - - -def test_monitor_init(topology): - ''' - Write any test suite initialization here(if needed) - ''' - - return - - -def test_monitor_(topology): - ''' - Write a single test here... - ''' - - return - - -def test_monitor_final(topology): - topology.standalone.delete() - log.info('monitor test suite PASSED') - - -def run_isolated(): - global installation1_prefix - installation1_prefix = None - - topo = topology(True) - test_monitor_init(topo) - test_monitor_(topo) - test_monitor_final(topo) - - -if __name__ == '__main__': - run_isolated() - diff --git a/dirsrvtests/suites/paged_results/paged_results_test.py b/dirsrvtests/suites/paged_results/paged_results_test.py deleted file mode 100644 index 54782bc..0000000 --- a/dirsrvtests/suites/paged_results/paged_results_test.py +++ /dev/null @@ -1,93 +0,0 @@ -# --- BEGIN COPYRIGHT BLOCK --- -# Copyright (C) 2015 Red Hat, Inc. -# All rights reserved. -# -# License: GPL (version 3 or any later version). -# See LICENSE for details. -# --- END COPYRIGHT BLOCK --- -# -import os -import sys -import time -import ldap -import logging -import pytest -from lib389 import DirSrv, Entry, tools, tasks -from lib389.tools import DirSrvTools -from lib389._constants import * -from lib389.properties import * -from lib389.tasks import * -from lib389.utils import * - -logging.getLogger(__name__).setLevel(logging.DEBUG) -log = logging.getLogger(__name__) - -installation1_prefix = None - - -class TopologyStandalone(object): - def __init__(self, standalone): - standalone.open() - self.standalone = standalone - - -@pytest.fixture(scope="module") -def topology(request): - global installation1_prefix - if installation1_prefix: - args_instance[SER_DEPLOYED_DIR] = installation1_prefix - - # Creating standalone instance ... - standalone = DirSrv(verbose=False) - args_instance[SER_HOST] = HOST_STANDALONE - args_instance[SER_PORT] = PORT_STANDALONE - args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE - args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX - args_standalone = args_instance.copy() - standalone.allocate(args_standalone) - instance_standalone = standalone.exists() - if instance_standalone: - standalone.delete() - standalone.create() - standalone.open() - - # Clear out the tmp dir - standalone.clearTmpDir(__file__) - - return TopologyStandalone(standalone) - - -def test_paged_results_init(topology): - ''' - Write any test suite initialization here(if needed) - ''' - - return - - -def test_paged_results_(topology): - ''' - Write a single test here... - ''' - - return - - -def test_paged_results_final(topology): - topology.standalone.delete() - log.info('paged_results test suite PASSED') - - -def run_isolated(): - global installation1_prefix - installation1_prefix = None - - topo = topology(True) - test_paged_results_init(topo) - test_paged_results_(topo) - test_paged_results_final(topo) - - -if __name__ == '__main__': - run_isolated() - diff --git a/dirsrvtests/suites/pam_passthru_plugin/pam_test.py b/dirsrvtests/suites/pam_passthru_plugin/pam_test.py deleted file mode 100644 index 05b55b2..0000000 --- a/dirsrvtests/suites/pam_passthru_plugin/pam_test.py +++ /dev/null @@ -1,93 +0,0 @@ -# --- BEGIN COPYRIGHT BLOCK --- -# Copyright (C) 2015 Red Hat, Inc. -# All rights reserved. -# -# License: GPL (version 3 or any later version). -# See LICENSE for details. -# --- END COPYRIGHT BLOCK --- -# -import os -import sys -import time -import ldap -import logging -import pytest -from lib389 import DirSrv, Entry, tools, tasks -from lib389.tools import DirSrvTools -from lib389._constants import * -from lib389.properties import * -from lib389.tasks import * -from lib389.utils import * - -logging.getLogger(__name__).setLevel(logging.DEBUG) -log = logging.getLogger(__name__) - -installation1_prefix = None - - -class TopologyStandalone(object): - def __init__(self, standalone): - standalone.open() - self.standalone = standalone - - -@pytest.fixture(scope="module") -def topology(request): - global installation1_prefix - if installation1_prefix: - args_instance[SER_DEPLOYED_DIR] = installation1_prefix - - # Creating standalone instance ... - standalone = DirSrv(verbose=False) - args_instance[SER_HOST] = HOST_STANDALONE - args_instance[SER_PORT] = PORT_STANDALONE - args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE - args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX - args_standalone = args_instance.copy() - standalone.allocate(args_standalone) - instance_standalone = standalone.exists() - if instance_standalone: - standalone.delete() - standalone.create() - standalone.open() - - # Clear out the tmp dir - standalone.clearTmpDir(__file__) - - return TopologyStandalone(standalone) - - -def test_pam_init(topology): - ''' - Write any test suite initialization here(if needed) - ''' - - return - - -def test_pam_(topology): - ''' - Write a single test here... - ''' - - return - - -def test_pam_final(topology): - topology.standalone.delete() - log.info('pam test suite PASSED') - - -def run_isolated(): - global installation1_prefix - installation1_prefix = None - - topo = topology(True) - test_pam_init(topo) - test_pam_(topo) - test_pam_final(topo) - - -if __name__ == '__main__': - run_isolated() - diff --git a/dirsrvtests/suites/passthru_plugin/passthru_test.py b/dirsrvtests/suites/passthru_plugin/passthru_test.py deleted file mode 100644 index 1c5d691..0000000 --- a/dirsrvtests/suites/passthru_plugin/passthru_test.py +++ /dev/null @@ -1,93 +0,0 @@ -# --- BEGIN COPYRIGHT BLOCK --- -# Copyright (C) 2015 Red Hat, Inc. -# All rights reserved. -# -# License: GPL (version 3 or any later version). -# See LICENSE for details. -# --- END COPYRIGHT BLOCK --- -# -import os -import sys -import time -import ldap -import logging -import pytest -from lib389 import DirSrv, Entry, tools, tasks -from lib389.tools import DirSrvTools -from lib389._constants import * -from lib389.properties import * -from lib389.tasks import * -from lib389.utils import * - -logging.getLogger(__name__).setLevel(logging.DEBUG) -log = logging.getLogger(__name__) - -installation1_prefix = None - - -class TopologyStandalone(object): - def __init__(self, standalone): - standalone.open() - self.standalone = standalone - - -@pytest.fixture(scope="module") -def topology(request): - global installation1_prefix - if installation1_prefix: - args_instance[SER_DEPLOYED_DIR] = installation1_prefix - - # Creating standalone instance ... - standalone = DirSrv(verbose=False) - args_instance[SER_HOST] = HOST_STANDALONE - args_instance[SER_PORT] = PORT_STANDALONE - args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE - args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX - args_standalone = args_instance.copy() - standalone.allocate(args_standalone) - instance_standalone = standalone.exists() - if instance_standalone: - standalone.delete() - standalone.create() - standalone.open() - - # Clear out the tmp dir - standalone.clearTmpDir(__file__) - - return TopologyStandalone(standalone) - - -def test_passthru_init(topology): - ''' - Write any test suite initialization here(if needed) - ''' - - return - - -def test_passthru_(topology): - ''' - Write a single test here... - ''' - - return - - -def test_passthru_final(topology): - topology.standalone.delete() - log.info('passthru test suite PASSED') - - -def run_isolated(): - global installation1_prefix - installation1_prefix = None - - topo = topology(True) - test_passthru_init(topo) - test_passthru_(topo) - test_passthru_final(topo) - - -if __name__ == '__main__': - run_isolated() - diff --git a/dirsrvtests/suites/password/password_test.py b/dirsrvtests/suites/password/password_test.py deleted file mode 100644 index 3465c2c..0000000 --- a/dirsrvtests/suites/password/password_test.py +++ /dev/null @@ -1,143 +0,0 @@ -# --- BEGIN COPYRIGHT BLOCK --- -# Copyright (C) 2015 Red Hat, Inc. -# All rights reserved. -# -# License: GPL (version 3 or any later version). -# See LICENSE for details. -# --- END COPYRIGHT BLOCK --- -# -import os -import sys -import time -import ldap -import logging -import pytest -from lib389 import DirSrv, Entry, tools, tasks -from lib389.tools import DirSrvTools -from lib389._constants import * -from lib389.properties import * -from lib389.tasks import * - -logging.getLogger(__name__).setLevel(logging.DEBUG) -log = logging.getLogger(__name__) - -installation1_prefix = None - - -class TopologyStandalone(object): - def __init__(self, standalone): - standalone.open() - self.standalone = standalone - - -@pytest.fixture(scope="module") -def topology(request): - global installation1_prefix - if installation1_prefix: - args_instance[SER_DEPLOYED_DIR] = installation1_prefix - - # Creating standalone instance ... - standalone = DirSrv(verbose=False) - args_instance[SER_HOST] = HOST_STANDALONE - args_instance[SER_PORT] = PORT_STANDALONE - args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE - args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX - args_standalone = args_instance.copy() - standalone.allocate(args_standalone) - instance_standalone = standalone.exists() - if instance_standalone: - standalone.delete() - standalone.create() - standalone.open() - - # Clear out the tmp dir - standalone.clearTmpDir(__file__) - - return TopologyStandalone(standalone) - - -def test_password_init(topology): - ''' - Do init, if necessary - ''' - - return - - -def test_password_delete_specific_password(topology): - ''' - Delete a specific userpassword, and make sure it is actually deleted from the entry - ''' - - log.info('Running test_password_delete_specific_password...') - - USER_DN = 'uid=test_entry,' + DEFAULT_SUFFIX - - # - # Add a test user with a password - # - try: - topology.standalone.add_s(Entry((USER_DN, {'objectclass': "top extensibleObject".split(), - 'sn': '1', - 'cn': 'user 1', - 'uid': 'user1', - 'userpassword': PASSWORD}))) - except ldap.LDAPError as e: - log.fatal('test_password_delete_specific_password: Failed to add test user ' + - USER_DN + ': error ' + e.message['desc']) - assert False - - # - # Delete the exact password - # - try: - topology.standalone.modify_s(USER_DN, [(ldap.MOD_DELETE, 'userpassword', PASSWORD)]) - except ldap.LDAPError as e: - log.fatal('test_password_delete_specific_password: Failed to delete userpassword: error ' + - e.message['desc']) - assert False - - # - # Check the password is actually deleted - # - try: - entry = topology.standalone.search_s(USER_DN, ldap.SCOPE_BASE, 'objectclass=top') - if entry[0].hasAttr('userpassword'): - log.fatal('test_password_delete_specific_password: Entry incorrectly still have the userpassword attribute') - assert False - except ldap.LDAPError as e: - log.fatal('test_password_delete_specific_password: Failed to search for user(%s), error: %s' % - (USER_DN, e.message('desc'))) - assert False - - # - # Cleanup - # - try: - topology.standalone.delete_s(USER_DN) - except ldap.LDAPError as e: - log.fatal('test_password_delete_specific_password: Failed to delete user(%s), error: %s' % - (USER_DN, e.message('desc'))) - assert False - - log.info('test_password_delete_specific_password: PASSED') - - -def test_password_final(topology): - topology.standalone.delete() - log.info('Password test suite PASSED') - - -def run_isolated(): - global installation1_prefix - installation1_prefix = None - - topo = topology(True) - test_password_init(topo) - test_password_delete_specific_password(topo) - test_password_final(topo) - - -if __name__ == '__main__': - run_isolated() - diff --git a/dirsrvtests/suites/password/pwdAdmin_test.py b/dirsrvtests/suites/password/pwdAdmin_test.py deleted file mode 100644 index 2c38756..0000000 --- a/dirsrvtests/suites/password/pwdAdmin_test.py +++ /dev/null @@ -1,447 +0,0 @@ -# --- BEGIN COPYRIGHT BLOCK --- -# Copyright (C) 2015 Red Hat, Inc. -# All rights reserved. -# -# License: GPL (version 3 or any later version). -# See LICENSE for details. -# --- END COPYRIGHT BLOCK --- -# -import os -import sys -import time -import ldap -import logging -import pytest -from lib389 import DirSrv, Entry, tools, tasks -from lib389.tools import DirSrvTools -from lib389._constants import * -from lib389.properties import * -from lib389.tasks import * -from lib389.utils import * - -logging.getLogger(__name__).setLevel(logging.DEBUG) -log = logging.getLogger(__name__) - -installation1_prefix = None -CONFIG_DN = 'cn=config' -ADMIN_NAME = 'passwd_admin' -ADMIN_DN = 'cn=%s,%s' % (ADMIN_NAME, SUFFIX) -ADMIN2_NAME = 'passwd_admin2' -ADMIN2_DN = 'cn=%s,%s' % (ADMIN2_NAME, SUFFIX) -ADMIN_PWD = 'adminPassword_1' -ADMIN_GROUP_DN = 'cn=password admin group,%s' % (SUFFIX) -ENTRY_NAME = 'Joe Schmo' -ENTRY_DN = 'cn=%s,%s' % (ENTRY_NAME, SUFFIX) -INVALID_PWDS = ('2_Short', 'No_Number', 'N0Special', '{SSHA}bBy8UdtPZwu8uZna9QOYG3Pr41RpIRVDl8wddw==') - - -class TopologyStandalone(object): - def __init__(self, standalone): - standalone.open() - self.standalone = standalone - - -@pytest.fixture(scope="module") -def topology(request): - global installation1_prefix - if installation1_prefix: - args_instance[SER_DEPLOYED_DIR] = installation1_prefix - - # Creating standalone instance ... - standalone = DirSrv(verbose=False) - args_instance[SER_HOST] = HOST_STANDALONE - args_instance[SER_PORT] = PORT_STANDALONE - args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE - args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX - args_standalone = args_instance.copy() - standalone.allocate(args_standalone) - instance_standalone = standalone.exists() - if instance_standalone: - standalone.delete() - standalone.create() - standalone.open() - - # Clear out the tmp dir - standalone.clearTmpDir(__file__) - - return TopologyStandalone(standalone) - - -def test_pwdAdmin_init(topology): - ''' - Create our future Password Admin entry, set the password policy, and test - that its working - ''' - - log.info('test_pwdAdmin_init: Creating Password Administator entries...') - - # Add Password Admin 1 - try: - topology.standalone.add_s(Entry((ADMIN_DN, {'objectclass': "top extensibleObject".split(), - 'cn': ADMIN_NAME, - 'userpassword': ADMIN_PWD}))) - except ldap.LDAPError as e: - log.fatal('test_pwdAdmin_init: Failed to add test user' + ADMIN_DN + ': error ' + e.message['desc']) - assert False - - # Add Password Admin 2 - try: - topology.standalone.add_s(Entry((ADMIN2_DN, {'objectclass': "top extensibleObject".split(), - 'cn': ADMIN2_NAME, - 'userpassword': ADMIN_PWD}))) - except ldap.LDAPError as e: - log.fatal('test_pwdAdmin_init: Failed to add test user ' + ADMIN2_DN + ': error ' + e.message['desc']) - assert False - - # Add Password Admin Group - try: - topology.standalone.add_s(Entry((ADMIN_GROUP_DN, {'objectclass': "top groupOfUNiqueNames".split(), - 'cn': 'password admin group', - 'uniquemember': ADMIN_DN, - 'uniquemember': ADMIN2_DN}))) - except ldap.LDAPError as e: - log.fatal('test_pwdAdmin_init: Failed to add group' + ADMIN_GROUP_DN + ': error ' + e.message['desc']) - assert False - - # Configure password policy - log.info('test_pwdAdmin_init: Configuring password policy...') - try: - topology.standalone.modify_s(CONFIG_DN, [(ldap.MOD_REPLACE, 'nsslapd-pwpolicy-local', 'on'), - (ldap.MOD_REPLACE, 'passwordCheckSyntax', 'on'), - (ldap.MOD_REPLACE, 'passwordMinCategories', '1'), - (ldap.MOD_REPLACE, 'passwordMinTokenLength', '1'), - (ldap.MOD_REPLACE, 'passwordExp', 'on'), - (ldap.MOD_REPLACE, 'passwordMinDigits', '1'), - (ldap.MOD_REPLACE, 'passwordMinSpecials', '1')]) - except ldap.LDAPError as e: - log.fatal('test_pwdAdmin_init: Failed configure password policy: ' + - e.message['desc']) - assert False - - # - # Add an aci to allow everyone all access (just makes things easier) - # - log.info('Add aci to allow password admin to add/update entries...') - - ACI_TARGET = "(target = \"ldap:///%s\")" % SUFFIX - ACI_TARGETATTR = "(targetattr = *)" - ACI_ALLOW = "(version 3.0; acl \"Password Admin Access\"; allow (all) " - ACI_SUBJECT = "(userdn = \"ldap:///anyone\");)" - ACI_BODY = ACI_TARGET + ACI_TARGETATTR + ACI_ALLOW + ACI_SUBJECT - mod = [(ldap.MOD_ADD, 'aci', ACI_BODY)] - try: - topology.standalone.modify_s(SUFFIX, mod) - except ldap.LDAPError as e: - log.fatal('test_pwdAdmin_init: Failed to add aci for password admin: ' + - e.message['desc']) - assert False - - # - # Bind as the future Password Admin - # - log.info('test_pwdAdmin_init: Bind as the Password Administator (before activating)...') - try: - topology.standalone.simple_bind_s(ADMIN_DN, ADMIN_PWD) - except ldap.LDAPError as e: - log.fatal('test_pwdAdmin_init: Failed to bind as the Password Admin: ' + - e.message['desc']) - assert False - - # - # Setup our test entry, and test password policy is working - # - entry = Entry(ENTRY_DN) - entry.setValues('objectclass', 'top', 'person') - entry.setValues('sn', ENTRY_NAME) - entry.setValues('cn', ENTRY_NAME) - - # - # Start by attempting to add an entry with an invalid password - # - log.info('test_pwdAdmin_init: Attempt to add entries with invalid passwords, these adds should fail...') - for passwd in INVALID_PWDS: - failed_as_expected = False - entry.setValues('userpassword', passwd) - log.info('test_pwdAdmin_init: Create a regular user entry %s with password (%s)...' % - (ENTRY_DN, passwd)) - try: - topology.standalone.add_s(entry) - except ldap.LDAPError as e: - # We failed as expected - failed_as_expected = True - log.info('test_pwdAdmin_init: Add failed as expected: password (%s) result (%s)' - % (passwd, e.message['desc'])) - - if not failed_as_expected: - log.fatal('test_pwdAdmin_init: We were incorrectly able to add an entry ' + - 'with an invalid password (%s)' % (passwd)) - assert False - - -def test_pwdAdmin(topology): - ''' - Test that password administrators/root DN can - bypass password syntax/policy. - - We need to test how passwords are modified in - existing entries, and when adding new entries. - - Create the Password Admin entry, but do not set - it as an admin yet. Use the entry to verify invalid - passwords are caught. Then activate the password - admin and make sure it can bypass password policy. - ''' - - # - # Now activate a password administator, bind as root dn to do the config - # update, then rebind as the password admin - # - log.info('test_pwdAdmin: Activate the Password Administator...') - - # - # Setup our test entry, and test password policy is working - # - entry = Entry(ENTRY_DN) - entry.setValues('objectclass', 'top', 'person') - entry.setValues('sn', ENTRY_NAME) - entry.setValues('cn', ENTRY_NAME) - - # Bind as Root DN - try: - topology.standalone.simple_bind_s(DN_DM, PASSWORD) - except ldap.LDAPError as e: - log.fatal('test_pwdAdmin: Root DN failed to authenticate: ' + - e.message['desc']) - assert False - - # Set the password admin - try: - topology.standalone.modify_s(CONFIG_DN, [(ldap.MOD_REPLACE, 'passwordAdminDN', ADMIN_DN)]) - except ldap.LDAPError as e: - log.fatal('test_pwdAdmin: Failed to add password admin to config: ' + - e.message['desc']) - assert False - - # Bind as Password Admin - try: - topology.standalone.simple_bind_s(ADMIN_DN, ADMIN_PWD) - except ldap.LDAPError as e: - log.fatal('test_pwdAdmin: Failed to bind as the Password Admin: ' + - e.message['desc']) - assert False - - # - # Start adding entries with invalid passwords, delete the entry after each pass. - # - for passwd in INVALID_PWDS: - entry.setValues('userpassword', passwd) - log.info('test_pwdAdmin: Create a regular user entry %s with password (%s)...' % - (ENTRY_DN, passwd)) - try: - topology.standalone.add_s(entry) - except ldap.LDAPError as e: - log.fatal('test_pwdAdmin: Failed to add entry with password (%s) result (%s)' - % (passwd, e.message['desc'])) - assert False - - log.info('test_pwdAdmin: Successfully added entry (%s)' % ENTRY_DN) - - # Delete entry for the next pass - try: - topology.standalone.delete_s(ENTRY_DN) - except ldap.LDAPError as e: - log.fatal('test_pwdAdmin: Failed to delete entry: %s' % - (e.message['desc'])) - assert False - - # - # Add the entry for the next round of testing (modify password) - # - entry.setValues('userpassword', ADMIN_PWD) - try: - topology.standalone.add_s(entry) - except ldap.LDAPError as e: - log.fatal('test_pwdAdmin: Failed to add entry with valid password (%s) result (%s)' % - (passwd, e.message['desc'])) - assert False - - # - # Deactivate the password admin and make sure invalid password updates fail - # - log.info('test_pwdAdmin: Deactivate Password Administator and ' + - 'try invalid password updates...') - - # Bind as root DN - try: - topology.standalone.simple_bind_s(DN_DM, PASSWORD) - except ldap.LDAPError as e: - log.fatal('test_pwdAdmin: Root DN failed to authenticate: ' + - e.message['desc']) - assert False - - # Remove password admin - try: - topology.standalone.modify_s(CONFIG_DN, [(ldap.MOD_DELETE, 'passwordAdminDN', None)]) - except ldap.LDAPError as e: - log.fatal('test_pwdAdmin: Failed to remove password admin from config: ' + - e.message['desc']) - assert False - - # Bind as Password Admin (who is no longer an admin) - try: - topology.standalone.simple_bind_s(ADMIN_DN, ADMIN_PWD) - except ldap.LDAPError as e: - log.fatal('test_pwdAdmin: Failed to bind as the Password Admin: ' + - e.message['desc']) - assert False - - # - # Make invalid password updates that should fail - # - for passwd in INVALID_PWDS: - failed_as_expected = False - entry.setValues('userpassword', passwd) - try: - topology.standalone.modify_s(ENTRY_DN, [(ldap.MOD_REPLACE, 'userpassword', passwd)]) - except ldap.LDAPError as e: - # We failed as expected - failed_as_expected = True - log.info('test_pwdAdmin: Password update failed as expected: password (%s) result (%s)' - % (passwd, e.message['desc'])) - - if not failed_as_expected: - log.fatal('test_pwdAdmin: We were incorrectly able to add an invalid password (%s)' - % (passwd)) - assert False - - # - # Now activate a password administator - # - log.info('test_pwdAdmin: Activate Password Administator and try updates again...') - - # Bind as root DN to make the update - try: - topology.standalone.simple_bind_s(DN_DM, PASSWORD) - except ldap.LDAPError as e: - log.fatal('test_pwdAdmin: Root DN failed to authenticate: ' + e.message['desc']) - assert False - - # Update config - set the password admin - try: - topology.standalone.modify_s(CONFIG_DN, [(ldap.MOD_REPLACE, 'passwordAdminDN', ADMIN_DN)]) - except ldap.LDAPError as e: - log.fatal('test_pwdAdmin: Failed to add password admin to config: ' + - e.message['desc']) - assert False - - # Bind as Password Admin - try: - topology.standalone.simple_bind_s(ADMIN_DN, ADMIN_PWD) - except ldap.LDAPError as e: - log.fatal('test_pwdAdmin: Failed to bind as the Password Admin: ' + - e.message['desc']) - assert False - - # - # Make the same password updates, but this time they should succeed - # - for passwd in INVALID_PWDS: - try: - topology.standalone.modify_s(ENTRY_DN, [(ldap.MOD_REPLACE, 'userpassword', passwd)]) - except ldap.LDAPError as e: - log.fatal('test_pwdAdmin: Password update failed unexpectedly: password (%s) result (%s)' - % (passwd, e.message['desc'])) - assert False - log.info('test_pwdAdmin: Password update succeeded (%s)' % passwd) - - # - # Test Password Admin Group - # - log.info('test_pwdAdmin: Testing password admin group...') - - # Bind as root DN to make the update - try: - topology.standalone.simple_bind_s(DN_DM, PASSWORD) - except ldap.LDAPError as e: - log.fatal('test_pwdAdmin: Root DN failed to authenticate: ' + e.message['desc']) - assert False - - # Update config - set the password admin group - try: - topology.standalone.modify_s(CONFIG_DN, [(ldap.MOD_REPLACE, 'passwordAdminDN', ADMIN_GROUP_DN)]) - except ldap.LDAPError as e: - log.fatal('test_pwdAdmin: Failed to add password admin to config: ' + - e.message['desc']) - assert False - - # Bind as admin2 - try: - topology.standalone.simple_bind_s(ADMIN2_DN, ADMIN_PWD) - except ldap.LDAPError as e: - log.fatal('test_pwdAdmin: Failed to bind as the Password Admin2: ' + - e.message['desc']) - assert False - - # Make some invalid password updates, but they should succeed - for passwd in INVALID_PWDS: - try: - topology.standalone.modify_s(ENTRY_DN, [(ldap.MOD_REPLACE, 'userpassword', passwd)]) - except ldap.LDAPError as e: - log.fatal('test_pwdAdmin: Password update failed unexpectedly: password (%s) result (%s)' - % (passwd, e.message['desc'])) - assert False - log.info('test_pwdAdmin: Password update succeeded (%s)' % passwd) - - # Cleanup - bind as Root DN for the other tests - try: - topology.standalone.simple_bind_s(DN_DM, PASSWORD) - except ldap.LDAPError as e: - log.fatal('test_pwdAdmin: Root DN failed to authenticate: ' + e.message['desc']) - assert False - - -def test_pwdAdmin_config_validation(topology): - ''' - Test config validation: - - - Test adding multiple passwordAdminDN attributes - - Test adding invalid values(non-DN's) - ''' - # Add mulitple attributes - one already eists so just try and add as second one - try: - topology.standalone.modify_s(CONFIG_DN, [(ldap.MOD_ADD, 'passwordAdminDN', ENTRY_DN)]) - log.fatal('test_pwdAdmin_config_validation: Incorrectly was able to add two config attributes') - assert False - except ldap.LDAPError as e: - log.info('test_pwdAdmin_config_validation: Failed as expected: ' + - e.message['desc']) - - # Attempt to set invalid DN - try: - topology.standalone.modify_s(CONFIG_DN, [(ldap.MOD_ADD, 'passwordAdminDN', 'ZZZZZ')]) - log.fatal('test_pwdAdmin_config_validation: Incorrectly was able to add invalid DN') - assert False - except ldap.LDAPError as e: - log.info('test_pwdAdmin_config_validation: Failed as expected: ' + - e.message['desc']) - - -def test_pwdAdmin_final(topology): - topology.standalone.delete() - log.info('pwdAdmin test suite PASSED') - - -def run_isolated(): - global installation1_prefix - installation1_prefix = None - - topo = topology(True) - test_pwdAdmin_init(topo) - test_pwdAdmin(topo) - test_pwdAdmin_config_validation(topo) - test_pwdAdmin_final(topo) - - -if __name__ == '__main__': - run_isolated() - diff --git a/dirsrvtests/suites/password/pwdPolicy_test.py b/dirsrvtests/suites/password/pwdPolicy_test.py deleted file mode 100644 index 9ceb62c..0000000 --- a/dirsrvtests/suites/password/pwdPolicy_test.py +++ /dev/null @@ -1,82 +0,0 @@ -# --- BEGIN COPYRIGHT BLOCK --- -# Copyright (C) 2015 Red Hat, Inc. -# All rights reserved. -# -# License: GPL (version 3 or any later version). -# See LICENSE for details. -# --- END COPYRIGHT BLOCK --- -# -import os -import sys -import time -import ldap -import logging -import pytest -from lib389 import DirSrv, Entry, tools, tasks -from lib389.tools import DirSrvTools -from lib389._constants import * -from lib389.properties import * -from lib389.tasks import * - -logging.getLogger(__name__).setLevel(logging.DEBUG) -log = logging.getLogger(__name__) - -installation1_prefix = None - - -class TopologyStandalone(object): - def __init__(self, standalone): - standalone.open() - self.standalone = standalone - - -@pytest.fixture(scope="module") -def topology(request): - global installation1_prefix - if installation1_prefix: - args_instance[SER_DEPLOYED_DIR] = installation1_prefix - - # Creating standalone instance ... - standalone = DirSrv(verbose=False) - args_instance[SER_HOST] = HOST_STANDALONE - args_instance[SER_PORT] = PORT_STANDALONE - args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE - args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX - args_standalone = args_instance.copy() - standalone.allocate(args_standalone) - instance_standalone = standalone.exists() - if instance_standalone: - standalone.delete() - standalone.create() - standalone.open() - - # Clear out the tmp dir - standalone.clearTmpDir(__file__) - - return TopologyStandalone(standalone) - - -def test_pwdPolicy_init(topology): - ''' - Init the test suite (if necessary) - ''' - return - - -def test_pwdPolicy_final(topology): - topology.standalone.delete() - log.info('Password Policy test suite PASSED') - - -def run_isolated(): - global installation1_prefix - installation1_prefix = None - - topo = topology(True) - test_pwdPolicy_init(topo) - test_pwdPolicy_final(topo) - - -if __name__ == '__main__': - run_isolated() - diff --git a/dirsrvtests/suites/posix_winsync_plugin/posix_winsync_test.py b/dirsrvtests/suites/posix_winsync_plugin/posix_winsync_test.py deleted file mode 100644 index c50702b..0000000 --- a/dirsrvtests/suites/posix_winsync_plugin/posix_winsync_test.py +++ /dev/null @@ -1,93 +0,0 @@ -# --- BEGIN COPYRIGHT BLOCK --- -# Copyright (C) 2015 Red Hat, Inc. -# All rights reserved. -# -# License: GPL (version 3 or any later version). -# See LICENSE for details. -# --- END COPYRIGHT BLOCK --- -# -import os -import sys -import time -import ldap -import logging -import pytest -from lib389 import DirSrv, Entry, tools, tasks -from lib389.tools import DirSrvTools -from lib389._constants import * -from lib389.properties import * -from lib389.tasks import * -from lib389.utils import * - -logging.getLogger(__name__).setLevel(logging.DEBUG) -log = logging.getLogger(__name__) - -installation1_prefix = None - - -class TopologyStandalone(object): - def __init__(self, standalone): - standalone.open() - self.standalone = standalone - - -@pytest.fixture(scope="module") -def topology(request): - global installation1_prefix - if installation1_prefix: - args_instance[SER_DEPLOYED_DIR] = installation1_prefix - - # Creating standalone instance ... - standalone = DirSrv(verbose=False) - args_instance[SER_HOST] = HOST_STANDALONE - args_instance[SER_PORT] = PORT_STANDALONE - args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE - args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX - args_standalone = args_instance.copy() - standalone.allocate(args_standalone) - instance_standalone = standalone.exists() - if instance_standalone: - standalone.delete() - standalone.create() - standalone.open() - - # Clear out the tmp dir - standalone.clearTmpDir(__file__) - - return TopologyStandalone(standalone) - - -def test_posix_winsync_init(topology): - ''' - Write any test suite initialization here(if needed) - ''' - - return - - -def test_posix_winsync_(topology): - ''' - Write a single test here... - ''' - - return - - -def test_posix_winsync_final(topology): - topology.standalone.delete() - log.info('posix_winsync test suite PASSED') - - -def run_isolated(): - global installation1_prefix - installation1_prefix = None - - topo = topology(True) - test_posix_winsync_init(topo) - test_posix_winsync_(topo) - test_posix_winsync_final(topo) - - -if __name__ == '__main__': - run_isolated() - diff --git a/dirsrvtests/suites/psearch/psearch_test.py b/dirsrvtests/suites/psearch/psearch_test.py deleted file mode 100644 index d68f06d..0000000 --- a/dirsrvtests/suites/psearch/psearch_test.py +++ /dev/null @@ -1,93 +0,0 @@ -# --- BEGIN COPYRIGHT BLOCK --- -# Copyright (C) 2015 Red Hat, Inc. -# All rights reserved. -# -# License: GPL (version 3 or any later version). -# See LICENSE for details. -# --- END COPYRIGHT BLOCK --- -# -import os -import sys -import time -import ldap -import logging -import pytest -from lib389 import DirSrv, Entry, tools, tasks -from lib389.tools import DirSrvTools -from lib389._constants import * -from lib389.properties import * -from lib389.tasks import * -from lib389.utils import * - -logging.getLogger(__name__).setLevel(logging.DEBUG) -log = logging.getLogger(__name__) - -installation1_prefix = None - - -class TopologyStandalone(object): - def __init__(self, standalone): - standalone.open() - self.standalone = standalone - - -@pytest.fixture(scope="module") -def topology(request): - global installation1_prefix - if installation1_prefix: - args_instance[SER_DEPLOYED_DIR] = installation1_prefix - - # Creating standalone instance ... - standalone = DirSrv(verbose=False) - args_instance[SER_HOST] = HOST_STANDALONE - args_instance[SER_PORT] = PORT_STANDALONE - args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE - args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX - args_standalone = args_instance.copy() - standalone.allocate(args_standalone) - instance_standalone = standalone.exists() - if instance_standalone: - standalone.delete() - standalone.create() - standalone.open() - - # Clear out the tmp dir - standalone.clearTmpDir(__file__) - - return TopologyStandalone(standalone) - - -def test_psearch_init(topology): - ''' - Write any test suite initialization here(if needed) - ''' - - return - - -def test_psearch_(topology): - ''' - Write a single test here... - ''' - - return - - -def test_psearch_final(topology): - topology.standalone.delete() - log.info('psearch test suite PASSED') - - -def run_isolated(): - global installation1_prefix - installation1_prefix = None - - topo = topology(True) - test_psearch_init(topo) - test_psearch_(topo) - test_psearch_final(topo) - - -if __name__ == '__main__': - run_isolated() - diff --git a/dirsrvtests/suites/referint_plugin/referint_test.py b/dirsrvtests/suites/referint_plugin/referint_test.py deleted file mode 100644 index 9a96ba6..0000000 --- a/dirsrvtests/suites/referint_plugin/referint_test.py +++ /dev/null @@ -1,93 +0,0 @@ -# --- BEGIN COPYRIGHT BLOCK --- -# Copyright (C) 2015 Red Hat, Inc. -# All rights reserved. -# -# License: GPL (version 3 or any later version). -# See LICENSE for details. -# --- END COPYRIGHT BLOCK --- -# -import os -import sys -import time -import ldap -import logging -import pytest -from lib389 import DirSrv, Entry, tools, tasks -from lib389.tools import DirSrvTools -from lib389._constants import * -from lib389.properties import * -from lib389.tasks import * -from lib389.utils import * - -logging.getLogger(__name__).setLevel(logging.DEBUG) -log = logging.getLogger(__name__) - -installation1_prefix = None - - -class TopologyStandalone(object): - def __init__(self, standalone): - standalone.open() - self.standalone = standalone - - -@pytest.fixture(scope="module") -def topology(request): - global installation1_prefix - if installation1_prefix: - args_instance[SER_DEPLOYED_DIR] = installation1_prefix - - # Creating standalone instance ... - standalone = DirSrv(verbose=False) - args_instance[SER_HOST] = HOST_STANDALONE - args_instance[SER_PORT] = PORT_STANDALONE - args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE - args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX - args_standalone = args_instance.copy() - standalone.allocate(args_standalone) - instance_standalone = standalone.exists() - if instance_standalone: - standalone.delete() - standalone.create() - standalone.open() - - # Clear out the tmp dir - standalone.clearTmpDir(__file__) - - return TopologyStandalone(standalone) - - -def test_referint_init(topology): - ''' - Write any test suite initialization here(if needed) - ''' - - return - - -def test_referint_(topology): - ''' - Write a single test here... - ''' - - return - - -def test_referint_final(topology): - topology.standalone.delete() - log.info('referint test suite PASSED') - - -def run_isolated(): - global installation1_prefix - installation1_prefix = None - - topo = topology(True) - test_referint_init(topo) - test_referint_(topo) - test_referint_final(topo) - - -if __name__ == '__main__': - run_isolated() - diff --git a/dirsrvtests/suites/replication/cleanallruv_test.py b/dirsrvtests/suites/replication/cleanallruv_test.py deleted file mode 100644 index 373269d..0000000 --- a/dirsrvtests/suites/replication/cleanallruv_test.py +++ /dev/null @@ -1,1494 +0,0 @@ -# --- BEGIN COPYRIGHT BLOCK --- -# Copyright (C) 2015 Red Hat, Inc. -# All rights reserved. -# -# License: GPL (version 3 or any later version). -# See LICENSE for details. -# --- END COPYRIGHT BLOCK --- -# -import os -import sys -import time -import ldap -import logging -import pytest -import threading -from lib389 import DirSrv, Entry, tools, tasks -from lib389.tools import DirSrvTools -from lib389._constants import * -from lib389.properties import * -from lib389.tasks import * -from lib389.utils import * -logging.getLogger(__name__).setLevel(logging.DEBUG) -log = logging.getLogger(__name__) - -installation1_prefix = None - - -class AddUsers(threading.Thread): - def __init__(self, inst, num_users): - threading.Thread.__init__(self) - self.daemon = True - self.inst = inst - self.num_users = num_users - - def openConnection(self, inst): - # Open a new connection to our LDAP server - server = DirSrv(verbose=False) - args_instance[SER_HOST] = inst.host - args_instance[SER_PORT] = inst.port - args_instance[SER_SERVERID_PROP] = inst.serverid - args_standalone = args_instance.copy() - server.allocate(args_standalone) - server.open() - return server - - def run(self): - # Start adding users - conn = self.openConnection(self.inst) - idx = 0 - - while idx < self.num_users: - USER_DN = 'uid=' + self.inst.serverid + '_' + str(idx) + ',' + DEFAULT_SUFFIX - try: - conn.add_s(Entry((USER_DN, {'objectclass': 'top extensibleObject'.split(), - 'uid': 'user' + str(idx)}))) - except ldap.UNWILLING_TO_PERFORM: - # One of the masters was probably put into read only mode - just break out - break - except ldap.LDAPError as e: - log.error('AddUsers: failed to add (' + USER_DN + ') error: ' + e.message['desc']) - assert False - idx += 1 - - conn.close() - - -class TopologyReplication(object): - def __init__(self, master1, master2, master3, master4, m1_m2_agmt, m1_m3_agmt, m1_m4_agmt): - master1.open() - self.master1 = master1 - master2.open() - self.master2 = master2 - master3.open() - self.master3 = master3 - master4.open() - self.master4 = master4 - - # Store the agreement dn's for future initializations - self.m1_m2_agmt = m1_m2_agmt - self.m1_m3_agmt = m1_m3_agmt - self.m1_m4_agmt = m1_m4_agmt - - -@pytest.fixture(scope="module") -def topology(request): - global installation1_prefix - if installation1_prefix: - args_instance[SER_DEPLOYED_DIR] = installation1_prefix - - # Creating master 1... - master1 = DirSrv(verbose=False) - args_instance[SER_HOST] = HOST_MASTER_1 - args_instance[SER_PORT] = PORT_MASTER_1 - args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1 - args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX - args_master = args_instance.copy() - master1.allocate(args_master) - instance_master1 = master1.exists() - if instance_master1: - master1.delete() - master1.create() - master1.open() - master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_1) - master1.log = log - - # Creating master 2... - master2 = DirSrv(verbose=False) - args_instance[SER_HOST] = HOST_MASTER_2 - args_instance[SER_PORT] = PORT_MASTER_2 - args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_2 - args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX - args_master = args_instance.copy() - master2.allocate(args_master) - instance_master2 = master2.exists() - if instance_master2: - master2.delete() - master2.create() - master2.open() - master2.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_2) - - # Creating master 3... - master3 = DirSrv(verbose=False) - args_instance[SER_HOST] = HOST_MASTER_3 - args_instance[SER_PORT] = PORT_MASTER_3 - args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_3 - args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX - args_master = args_instance.copy() - master3.allocate(args_master) - instance_master3 = master3.exists() - if instance_master3: - master3.delete() - master3.create() - master3.open() - master3.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_3) - - # Creating master 4... - master4 = DirSrv(verbose=False) - args_instance[SER_HOST] = HOST_MASTER_4 - args_instance[SER_PORT] = PORT_MASTER_4 - args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_4 - args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX - args_master = args_instance.copy() - master4.allocate(args_master) - instance_master4 = master4.exists() - if instance_master4: - master4.delete() - master4.create() - master4.open() - master4.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_4) - - # - # Create all the agreements - # - # Creating agreement from master 1 to master 2 - properties = {RA_NAME: r'meTo_$host:$port', - RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], - RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], - RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], - RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} - m1_m2_agmt = master1.agreement.create(suffix=SUFFIX, host=master2.host, port=master2.port, properties=properties) - if not m1_m2_agmt: - log.fatal("Fail to create a master -> master replica agreement") - sys.exit(1) - log.debug("%s created" % m1_m2_agmt) - - # Creating agreement from master 1 to master 3 - properties = {RA_NAME: r'meTo_$host:$port', - RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], - RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], - RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], - RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} - m1_m3_agmt = master1.agreement.create(suffix=SUFFIX, host=master3.host, port=master3.port, properties=properties) - if not m1_m3_agmt: - log.fatal("Fail to create a master -> master replica agreement") - sys.exit(1) - log.debug("%s created" % m1_m3_agmt) - - # Creating agreement from master 1 to master 4 - properties = {RA_NAME: r'meTo_$host:$port', - RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], - RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], - RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], - RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} - m1_m4_agmt = master1.agreement.create(suffix=SUFFIX, host=master4.host, port=master4.port, properties=properties) - if not m1_m4_agmt: - log.fatal("Fail to create a master -> master replica agreement") - sys.exit(1) - log.debug("%s created" % m1_m4_agmt) - - # Creating agreement from master 2 to master 1 - properties = {RA_NAME: r'meTo_$host:$port', - RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], - RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], - RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], - RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} - m2_m1_agmt = master2.agreement.create(suffix=SUFFIX, host=master1.host, port=master1.port, properties=properties) - if not m2_m1_agmt: - log.fatal("Fail to create a master -> master replica agreement") - sys.exit(1) - log.debug("%s created" % m2_m1_agmt) - - # Creating agreement from master 2 to master 3 - properties = {RA_NAME: r'meTo_$host:$port', - RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], - RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], - RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], - RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} - m2_m3_agmt = master2.agreement.create(suffix=SUFFIX, host=master3.host, port=master3.port, properties=properties) - if not m2_m3_agmt: - log.fatal("Fail to create a master -> master replica agreement") - sys.exit(1) - log.debug("%s created" % m2_m3_agmt) - - # Creating agreement from master 2 to master 4 - properties = {RA_NAME: r'meTo_$host:$port', - RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], - RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], - RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], - RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} - m2_m4_agmt = master2.agreement.create(suffix=SUFFIX, host=master4.host, port=master4.port, properties=properties) - if not m2_m4_agmt: - log.fatal("Fail to create a master -> master replica agreement") - sys.exit(1) - log.debug("%s created" % m2_m4_agmt) - - # Creating agreement from master 3 to master 1 - properties = {RA_NAME: r'meTo_$host:$port', - RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], - RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], - RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], - RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} - m3_m1_agmt = master3.agreement.create(suffix=SUFFIX, host=master1.host, port=master1.port, properties=properties) - if not m3_m1_agmt: - log.fatal("Fail to create a master -> master replica agreement") - sys.exit(1) - log.debug("%s created" % m3_m1_agmt) - - # Creating agreement from master 3 to master 2 - properties = {RA_NAME: r'meTo_$host:$port', - RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], - RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], - RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], - RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} - m3_m2_agmt = master3.agreement.create(suffix=SUFFIX, host=master2.host, port=master2.port, properties=properties) - if not m3_m2_agmt: - log.fatal("Fail to create a master -> master replica agreement") - sys.exit(1) - log.debug("%s created" % m3_m2_agmt) - - # Creating agreement from master 3 to master 4 - properties = {RA_NAME: r'meTo_$host:$port', - RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], - RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], - RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], - RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} - m3_m4_agmt = master3.agreement.create(suffix=SUFFIX, host=master4.host, port=master4.port, properties=properties) - if not m3_m4_agmt: - log.fatal("Fail to create a master -> master replica agreement") - sys.exit(1) - log.debug("%s created" % m3_m4_agmt) - - # Creating agreement from master 4 to master 1 - properties = {RA_NAME: r'meTo_$host:$port', - RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], - RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], - RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], - RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} - m4_m1_agmt = master4.agreement.create(suffix=SUFFIX, host=master1.host, port=master1.port, properties=properties) - if not m4_m1_agmt: - log.fatal("Fail to create a master -> master replica agreement") - sys.exit(1) - log.debug("%s created" % m4_m1_agmt) - - # Creating agreement from master 4 to master 2 - properties = {RA_NAME: r'meTo_$host:$port', - RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], - RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], - RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], - RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} - m4_m2_agmt = master4.agreement.create(suffix=SUFFIX, host=master2.host, port=master2.port, properties=properties) - if not m4_m2_agmt: - log.fatal("Fail to create a master -> master replica agreement") - sys.exit(1) - log.debug("%s created" % m4_m2_agmt) - - # Creating agreement from master 4 to master 3 - properties = {RA_NAME: r'meTo_$host:$port', - RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], - RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], - RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], - RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} - m4_m3_agmt = master4.agreement.create(suffix=SUFFIX, host=master3.host, port=master3.port, properties=properties) - if not m4_m3_agmt: - log.fatal("Fail to create a master -> master replica agreement") - sys.exit(1) - log.debug("%s created" % m4_m3_agmt) - - # Allow the replicas to get situated with the new agreements - time.sleep(5) - - # - # Initialize all the agreements - # - master1.agreement.init(SUFFIX, HOST_MASTER_2, PORT_MASTER_2) - master1.waitForReplInit(m1_m2_agmt) - master1.agreement.init(SUFFIX, HOST_MASTER_3, PORT_MASTER_3) - master1.waitForReplInit(m1_m3_agmt) - master1.agreement.init(SUFFIX, HOST_MASTER_4, PORT_MASTER_4) - master1.waitForReplInit(m1_m4_agmt) - - # Check replication is working... - if master1.testReplication(DEFAULT_SUFFIX, master2): - log.info('Replication is working.') - else: - log.fatal('Replication is not working.') - assert False - - # Clear out the tmp dir - master1.clearTmpDir(__file__) - - return TopologyReplication(master1, master2, master3, master4, m1_m2_agmt, m1_m3_agmt, m1_m4_agmt) - - -def restore_master4(topology): - ''' - In our tests will always be removing master 4, so we need a common - way to restore it for another test - ''' - - log.info('Restoring master 4...') - - # Enable replication on master 4 - topology.master4.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_4) - - # - # Create agreements from master 4 -> m1, m2 ,m3 - # - # Creating agreement from master 4 to master 1 - properties = {RA_NAME: r'meTo_$host:$port', - RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], - RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], - RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], - RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} - m4_m1_agmt = topology.master4.agreement.create(suffix=SUFFIX, host=topology.master1.host, - port=topology.master1.port, properties=properties) - if not m4_m1_agmt: - log.fatal("Fail to create a master -> master replica agreement") - sys.exit(1) - log.debug("%s created" % m4_m1_agmt) - - # Creating agreement from master 4 to master 2 - properties = {RA_NAME: r'meTo_$host:$port', - RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], - RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], - RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], - RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} - m4_m2_agmt = topology.master4.agreement.create(suffix=SUFFIX, host=topology.master2.host, - port=topology.master2.port, properties=properties) - if not m4_m2_agmt: - log.fatal("Fail to create a master -> master replica agreement") - sys.exit(1) - log.debug("%s created" % m4_m2_agmt) - - # Creating agreement from master 4 to master 3 - properties = {RA_NAME: r'meTo_$host:$port', - RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], - RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], - RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], - RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} - m4_m3_agmt = topology.master4.agreement.create(suffix=SUFFIX, host=topology.master3.host, - port=topology.master3.port, properties=properties) - if not m4_m3_agmt: - log.fatal("Fail to create a master -> master replica agreement") - sys.exit(1) - log.debug("%s created" % m4_m3_agmt) - - # - # Create agreements from m1, m2, m3 to master 4 - # - # Creating agreement from master 1 to master 4 - properties = {RA_NAME: r'meTo_$host:$port', - RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], - RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], - RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], - RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} - m1_m4_agmt = topology.master1.agreement.create(suffix=SUFFIX, host=topology.master4.host, - port=topology.master4.port, properties=properties) - if not m1_m4_agmt: - log.fatal("Fail to create a master -> master replica agreement") - sys.exit(1) - log.debug("%s created" % m1_m4_agmt) - - # Creating agreement from master 2 to master 4 - properties = {RA_NAME: r'meTo_$host:$port', - RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], - RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], - RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], - RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} - m2_m4_agmt = topology.master2.agreement.create(suffix=SUFFIX, host=topology.master4.host, - port=topology.master4.port, properties=properties) - if not m2_m4_agmt: - log.fatal("Fail to create a master -> master replica agreement") - sys.exit(1) - log.debug("%s created" % m2_m4_agmt) - - # Creating agreement from master 3 to master 4 - properties = {RA_NAME: r'meTo_$host:$port', - RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], - RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], - RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], - RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} - m3_m4_agmt = topology.master3.agreement.create(suffix=SUFFIX, host=topology.master4.host, - port=topology.master4.port, properties=properties) - if not m3_m4_agmt: - log.fatal("Fail to create a master -> master replica agreement") - sys.exit(1) - log.debug("%s created" % m3_m4_agmt) - - # - # Restart the other servers - this allows the rid(for master4) to be used again/valid - # - topology.master1.restart(timeout=30) - topology.master2.restart(timeout=30) - topology.master3.restart(timeout=30) - topology.master4.restart(timeout=30) - - # - # Initialize the agreements - # - topology.master1.agreement.init(SUFFIX, HOST_MASTER_2, PORT_MASTER_2) - topology.master1.waitForReplInit(topology.m1_m2_agmt) - topology.master1.agreement.init(SUFFIX, HOST_MASTER_3, PORT_MASTER_3) - topology.master1.waitForReplInit(topology.m1_m3_agmt) - topology.master1.agreement.init(SUFFIX, HOST_MASTER_4, PORT_MASTER_4) - topology.master1.waitForReplInit(topology.m1_m4_agmt) - - # - # Test Replication is working - # - # Check replication is working with previous working master(m1 -> m2) - if topology.master1.testReplication(DEFAULT_SUFFIX, topology.master2): - log.info('Replication is working m1 -> m2.') - else: - log.fatal('restore_master4: Replication is not working from m1 -> m2.') - assert False - - # Check replication is working from master 1 to master 4... - if topology.master1.testReplication(DEFAULT_SUFFIX, topology.master4): - log.info('Replication is working m1 -> m4.') - else: - log.fatal('restore_master4: Replication is not working from m1 -> m4.') - assert False - - # Check replication is working from master 4 to master1... - if topology.master4.testReplication(DEFAULT_SUFFIX, topology.master1): - log.info('Replication is working m4 -> m1.') - else: - log.fatal('restore_master4: Replication is not working from m4 -> 1.') - assert False - - log.info('Master 4 has been successfully restored.') - - -def test_cleanallruv_init(topology): - ''' - Make updates on each master to make sure we have the all master RUVs on - each master. - ''' - - log.info('Initializing cleanAllRUV test suite...') - - # Master 1 - if not topology.master1.testReplication(DEFAULT_SUFFIX, topology.master2): - log.fatal('test_cleanallruv_init: Replication is not working between master 1 and master 2.') - assert False - - if not topology.master1.testReplication(DEFAULT_SUFFIX, topology.master3): - log.fatal('test_cleanallruv_init: Replication is not working between master 1 and master 3.') - assert False - - if not topology.master1.testReplication(DEFAULT_SUFFIX, topology.master4): - log.fatal('test_cleanallruv_init: Replication is not working between master 1 and master 4.') - assert False - - # Master 2 - if not topology.master2.testReplication(DEFAULT_SUFFIX, topology.master1): - log.fatal('test_cleanallruv_init: Replication is not working between master 2 and master 1.') - assert False - - if not topology.master2.testReplication(DEFAULT_SUFFIX, topology.master3): - log.fatal('test_cleanallruv_init: Replication is not working between master 2 and master 3.') - assert False - - if not topology.master2.testReplication(DEFAULT_SUFFIX, topology.master4): - log.fatal('test_cleanallruv_init: Replication is not working between master 2 and master 4.') - assert False - - # Master 3 - if not topology.master3.testReplication(DEFAULT_SUFFIX, topology.master1): - log.fatal('test_cleanallruv_init: Replication is not working between master 2 and master 1.') - assert False - - if not topology.master3.testReplication(DEFAULT_SUFFIX, topology.master2): - log.fatal('test_cleanallruv_init: Replication is not working between master 2 and master 2.') - assert False - - if not topology.master3.testReplication(DEFAULT_SUFFIX, topology.master4): - log.fatal('test_cleanallruv_init: Replication is not working between master 2 and master 4.') - assert False - - # Master 4 - if not topology.master4.testReplication(DEFAULT_SUFFIX, topology.master1): - log.fatal('test_cleanallruv_init: Replication is not working between master 2 and master 1.') - assert False - - if not topology.master4.testReplication(DEFAULT_SUFFIX, topology.master2): - log.fatal('test_cleanallruv_init: Replication is not working between master 2 and master 2.') - assert False - - if not topology.master4.testReplication(DEFAULT_SUFFIX, topology.master3): - log.fatal('test_cleanallruv_init: Replication is not working between master 2 and master 3.') - assert False - - log.info('Initialized cleanAllRUV test suite.') - - -def test_cleanallruv_clean(topology): - ''' - Disable a master, remove agreements to that master, and clean the RUVs on - the remaining replicas - ''' - - log.info('Running test_cleanallruv_clean...') - - # Disable master 4 - log.info('test_cleanallruv_clean: disable master 4...') - try: - topology.master4.replica.disableReplication(DEFAULT_SUFFIX) - except: - log.fatal('error!') - assert False - - # Remove the agreements from the other masters that point to master 4 - log.info('test_cleanallruv_clean: remove all the agreements to master 4...') - try: - topology.master1.agreement.delete(DEFAULT_SUFFIX, topology.master4) - except ldap.LDAPError as e: - log.fatal('test_cleanallruv_clean: Failed to delete agmt(m1 -> m4), error: ' + - e.message['desc']) - assert False - try: - topology.master2.agreement.delete(DEFAULT_SUFFIX, topology.master4) - except ldap.LDAPError as e: - log.fatal('test_cleanallruv_clean: Failed to delete agmt(m2 -> m4), error: ' + - e.message['desc']) - assert False - try: - topology.master3.agreement.delete(DEFAULT_SUFFIX, topology.master4) - except ldap.LDAPError as e: - log.fatal('test_cleanallruv_clean: Failed to delete agmt(m3 -> m4), error: ' + - e.message['desc']) - assert False - - # Run the task - log.info('test_cleanallruv_clean: run the cleanAllRUV task...') - try: - topology.master1.tasks.cleanAllRUV(suffix=DEFAULT_SUFFIX, replicaid='4', - args={TASK_WAIT: True}) - except ValueError as e: - log.fatal('test_cleanallruv_clean: Problem running cleanAllRuv task: ' + - e.message('desc')) - assert False - - # Check the other master's RUV for 'replica 4' - log.info('test_cleanallruv_clean: check all the masters have been cleaned...') - clean = False - count = 0 - while not clean and count < 5: - clean = True - - # Check master 1 - try: - entry = topology.master1.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, REPLICA_RUV_FILTER) - if not entry: - log.error('test_cleanallruv_clean: Failed to find db tombstone entry from master') - repl_fail(replica_inst) - elements = entry[0].getValues('nsds50ruv') - for ruv in elements: - if 'replica 4' in ruv: - # Not cleaned - log.error('test_cleanallruv_clean: Master 1 not cleaned!') - clean = False - if clean: - log.info('test_cleanallruv_clean: Master 1 is cleaned.') - except ldap.LDAPError as e: - log.fatal('test_cleanallruv_clean: Unable to search master 1 for db tombstone: ' + e.message['desc']) - - # Check master 2 - try: - entry = topology.master2.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, REPLICA_RUV_FILTER) - if not entry: - log.error('test_cleanallruv_clean: Failed to find db tombstone entry from master') - repl_fail(replica_inst) - elements = entry[0].getValues('nsds50ruv') - for ruv in elements: - if 'replica 4' in ruv: - # Not cleaned - log.error('test_cleanallruv_clean: Master 2 not cleaned!') - clean = False - if clean: - log.info('test_cleanallruv_clean: Master 2 is cleaned.') - except ldap.LDAPError as e: - log.fatal('Unable to search master 2 for db tombstone: ' + e.message['desc']) - - # Check master 3 - try: - entry = topology.master3.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, REPLICA_RUV_FILTER) - if not entry: - log.error('test_cleanallruv_clean: Failed to find db tombstone entry from master') - repl_fail(replica_inst) - elements = entry[0].getValues('nsds50ruv') - for ruv in elements: - if 'replica 4' in ruv: - # Not cleaned - log.error('test_cleanallruv_clean: Master 3 not cleaned!') - clean = False - if clean: - log.info('test_cleanallruv_clean: Master 3 is cleaned.') - except ldap.LDAPError as e: - log.fatal('test_cleanallruv_clean: Unable to search master 3 for db tombstone: ' + e.message['desc']) - - # Sleep a bit and give it chance to clean up... - time.sleep(5) - count += 1 - - if not clean: - log.fatal('test_cleanallruv_clean: Failed to clean replicas') - assert False - - log.info('Allow cleanallruv threads to finish...') - time.sleep(30) - - log.info('test_cleanallruv_clean PASSED, restoring master 4...') - - # - # Cleanup - restore master 4 - # - restore_master4(topology) - - -def test_cleanallruv_clean_restart(topology): - ''' - Test that if a master istopped during the clean process, that it - resumes and finishes when its started. - ''' - - log.info('Running test_cleanallruv_clean_restart...') - - # Disable master 4 - log.info('test_cleanallruv_clean_restart: disable master 4...') - try: - topology.master4.replica.disableReplication(DEFAULT_SUFFIX) - except: - log.fatal('error!') - assert False - - # Remove the agreements from the other masters that point to master 4 - log.info('test_cleanallruv_clean: remove all the agreements to master 4...') - try: - topology.master1.agreement.delete(DEFAULT_SUFFIX, topology.master4) - except ldap.LDAPError as e: - log.fatal('test_cleanallruv_clean_restart: Failed to delete agmt(m1 -> m4), error: ' + - e.message['desc']) - assert False - try: - topology.master2.agreement.delete(DEFAULT_SUFFIX, topology.master4) - except ldap.LDAPError as e: - log.fatal('test_cleanallruv_clean_restart: Failed to delete agmt(m2 -> m4), error: ' + - e.message['desc']) - assert False - try: - topology.master3.agreement.delete(DEFAULT_SUFFIX, topology.master4) - except ldap.LDAPError as e: - log.fatal('test_cleanallruv_clean_restart: Failed to delete agmt(m3 -> m4), error: ' + - e.message['desc']) - assert False - - # Stop master 3 to keep the task running, so we can stop master 1... - topology.master3.stop(timeout=30) - - # Run the task - log.info('test_cleanallruv_clean_restart: run the cleanAllRUV task...') - try: - topology.master1.tasks.cleanAllRUV(suffix=DEFAULT_SUFFIX, replicaid='4', - args={TASK_WAIT: False}) - except ValueError as e: - log.fatal('test_cleanallruv_clean_restart: Problem running cleanAllRuv task: ' + - e.message('desc')) - assert False - - # Sleep a bit, then stop master 1 - time.sleep(3) - topology.master1.stop(timeout=30) - - # Now start master 3 & 1, and make sure we didn't crash - topology.master3.start(timeout=30) - if topology.master3.detectDisorderlyShutdown(): - log.fatal('test_cleanallruv_clean_restart: Master 3 previously crashed!') - assert False - - topology.master1.start(timeout=30) - if topology.master1.detectDisorderlyShutdown(): - log.fatal('test_cleanallruv_clean_restart: Master 1 previously crashed!') - assert False - - # Wait a little for agmts/cleanallruv to wake up - time.sleep(5) - - # Check the other master's RUV for 'replica 4' - log.info('test_cleanallruv_clean_restart: check all the masters have been cleaned...') - clean = False - count = 0 - while not clean and count < 10: - clean = True - - # Check master 1 - try: - entry = topology.master1.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, REPLICA_RUV_FILTER) - if not entry: - log.error('test_cleanallruv_clean_restart: Failed to find db tombstone entry from master') - repl_fail(replica_inst) - elements = entry[0].getValues('nsds50ruv') - for ruv in elements: - if 'replica 4' in ruv: - # Not cleaned - log.error('test_cleanallruv_clean_restart: Master 1 not cleaned!') - clean = False - if clean: - log.info('test_cleanallruv_clean_restart: Master 1 is cleaned.') - except ldap.LDAPError as e: - log.fatal('test_cleanallruv_clean_restart: Unable to search master 1 for db tombstone: ' + - e.message['desc']) - - # Check master 2 - try: - entry = topology.master2.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, REPLICA_RUV_FILTER) - if not entry: - log.error('test_cleanallruv_clean_restart: Failed to find db tombstone entry from master') - repl_fail(replica_inst) - elements = entry[0].getValues('nsds50ruv') - for ruv in elements: - if 'replica 4' in ruv: - # Not cleaned - log.error('test_cleanallruv_clean_restart: Master 2 not cleaned!') - clean = False - if clean: - log.info('test_cleanallruv_clean_restart: Master 2 is cleaned.') - except ldap.LDAPError as e: - log.fatal('test_cleanallruv_clean_restart: Unable to search master 2 for db tombstone: ' + - e.message['desc']) - - # Check master 3 - try: - entry = topology.master3.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, REPLICA_RUV_FILTER) - if not entry: - log.error('test_cleanallruv_clean_restart: Failed to find db tombstone entry from master') - repl_fail(replica_inst) - elements = entry[0].getValues('nsds50ruv') - for ruv in elements: - if 'replica 4' in ruv: - # Not cleaned - log.error('test_cleanallruv_clean_restart: Master 3 not cleaned!') - clean = False - if clean: - log.info('test_cleanallruv_clean_restart: Master 3 is cleaned.') - except ldap.LDAPError as e: - log.fatal('test_cleanallruv_clean_restart: Unable to search master 3 for db tombstone: ' + - e.message['desc']) - - # Sleep a bit and give it chance to clean up... - time.sleep(5) - count += 1 - - if not clean: - log.fatal('Failed to clean replicas') - assert False - - log.info('Allow cleanallruv threads to finish...') - time.sleep(30) - - log.info('test_cleanallruv_clean_restart PASSED, restoring master 4...') - - # - # Cleanup - restore master 4 - # - restore_master4(topology) - - -def test_cleanallruv_clean_force(topology): - ''' - Disable a master, remove agreements to that master, and clean the RUVs on - the remaining replicas - ''' - - log.info('Running test_cleanallruv_clean_force...') - - # Stop master 3, while we update master 4, so that 3 is behind the other masters - topology.master3.stop(timeout=10) - - # Add a bunch of updates to master 4 - m4_add_users = AddUsers(topology.master4, 1500) - m4_add_users.start() - m4_add_users.join() - - # Disable master 4 - log.info('test_cleanallruv_clean_force: disable master 4...') - try: - topology.master4.replica.disableReplication(DEFAULT_SUFFIX) - except: - log.fatal('error!') - assert False - - # Start master 3, it should be out of sync with the other replicas... - topology.master3.start(timeout=10) - - # Remove the agreements from the other masters that point to master 4 - log.info('test_cleanallruv_clean_force: remove all the agreements to master 4...') - try: - topology.master1.agreement.delete(DEFAULT_SUFFIX, topology.master4) - except ldap.LDAPError as e: - log.fatal('test_cleanallruv_clean_force: Failed to delete agmt(m1 -> m4), error: ' + - e.message['desc']) - assert False - try: - topology.master2.agreement.delete(DEFAULT_SUFFIX, topology.master4) - except ldap.LDAPError as e: - log.fatal('test_cleanallruv_clean_force: Failed to delete agmt(m2 -> m4), error: ' + - e.message['desc']) - assert False - try: - topology.master3.agreement.delete(DEFAULT_SUFFIX, topology.master4) - except ldap.LDAPError as e: - log.fatal('test_cleanallruv_clean_force: Failed to delete agmt(m3 -> m4), error: ' + - e.message['desc']) - assert False - - # Run the task, use "force" because master 3 is not in sync with the other replicas - # in regards to the replica 4 RUV - log.info('test_cleanallruv_clean_force: run the cleanAllRUV task...') - try: - topology.master1.tasks.cleanAllRUV(suffix=DEFAULT_SUFFIX, replicaid='4', - force=True, args={TASK_WAIT: True}) - except ValueError as e: - log.fatal('test_cleanallruv_clean_force: Problem running cleanAllRuv task: ' + - e.message('desc')) - assert False - - # Check the other master's RUV for 'replica 4' - log.info('test_cleanallruv_clean_force: check all the masters have been cleaned...') - clean = False - count = 0 - while not clean and count < 5: - clean = True - - # Check master 1 - try: - entry = topology.master1.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, REPLICA_RUV_FILTER) - if not entry: - log.error('test_cleanallruv_clean_force: Failed to find db tombstone entry from master') - repl_fail(replica_inst) - elements = entry[0].getValues('nsds50ruv') - for ruv in elements: - if 'replica 4' in ruv: - # Not cleaned - log.error('test_cleanallruv_clean_force: Master 1 not cleaned!') - clean = False - if clean: - log.info('test_cleanallruv_clean_force: Master 1 is cleaned.') - except ldap.LDAPError as e: - log.fatal('test_cleanallruv_clean_force: Unable to search master 1 for db tombstone: ' + - e.message['desc']) - - # Check master 2 - try: - entry = topology.master2.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, REPLICA_RUV_FILTER) - if not entry: - log.error('test_cleanallruv_clean_force: Failed to find db tombstone entry from master') - repl_fail(replica_inst) - elements = entry[0].getValues('nsds50ruv') - for ruv in elements: - if 'replica 4' in ruv: - # Not cleaned - log.error('test_cleanallruv_clean_force: Master 1 not cleaned!') - clean = False - if clean: - log.info('Master 2 is cleaned.') - except ldap.LDAPError as e: - log.fatal('test_cleanallruv_clean_force: Unable to search master 2 for db tombstone: ' + - e.message['desc']) - - # Check master 3 - try: - entry = topology.master3.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, REPLICA_RUV_FILTER) - if not entry: - log.error('test_cleanallruv_clean_force: Failed to find db tombstone entry from master') - repl_fail(replica_inst) - elements = entry[0].getValues('nsds50ruv') - for ruv in elements: - if 'replica 4' in ruv: - # Not cleaned - log.error('test_cleanallruv_clean_force: Master 3 not cleaned!') - clean = False - if clean: - log.info('test_cleanallruv_clean_force: Master 3 is cleaned.') - except ldap.LDAPError as e: - log.fatal('test_cleanallruv_clean_force: Unable to search master 3 for db tombstone: ' + - e.message['desc']) - - # Sleep a bit and give it chance to clean up... - time.sleep(5) - count += 1 - - if not clean: - log.fatal('test_cleanallruv_clean_force: Failed to clean replicas') - assert False - - log.info('test_cleanallruv_clean_force: Allow cleanallruv threads to finish') - time.sleep(30) - - log.info('test_cleanallruv_clean_force PASSED, restoring master 4...') - - # - # Cleanup - restore master 4 - # - restore_master4(topology) - - -def test_cleanallruv_abort(topology): - ''' - Test the abort task. - - DIsable master 4 - Stop master 2 so that it can not be cleaned - Run the clean task - Wait a bit - Abort the task - Verify task is aborted - ''' - - log.info('Running test_cleanallruv_abort...') - - # Disable master 4 - log.info('test_cleanallruv_abort: disable replication on master 4...') - try: - topology.master4.replica.disableReplication(DEFAULT_SUFFIX) - except: - log.fatal('test_cleanallruv_abort: failed to disable replication') - assert False - - # Remove the agreements from the other masters that point to master 4 - log.info('test_cleanallruv_abort: remove all the agreements to master 4...)') - try: - topology.master1.agreement.delete(DEFAULT_SUFFIX, topology.master4) - except ldap.LDAPError as e: - log.fatal('test_cleanallruv_abort: Failed to delete agmt(m1 -> m4), error: ' + - e.message['desc']) - assert False - try: - topology.master2.agreement.delete(DEFAULT_SUFFIX, topology.master4) - except ldap.LDAPError as e: - log.fatal('test_cleanallruv_abort: Failed to delete agmt(m2 -> m4), error: ' + - e.message['desc']) - assert False - try: - topology.master3.agreement.delete(DEFAULT_SUFFIX, topology.master4) - except ldap.LDAPError as e: - log.fatal('test_cleanallruv_abort: Failed to delete agmt(m3 -> m4), error: ' + - e.message['desc']) - assert False - - # Stop master 2 - log.info('test_cleanallruv_abort: stop master 2 to freeze the cleanAllRUV task...') - topology.master2.stop(timeout=10) - - # Run the task - log.info('test_cleanallruv_abort: add the cleanAllRUV task...') - try: - (clean_task_dn, rc) = topology.master1.tasks.cleanAllRUV(suffix=DEFAULT_SUFFIX, - replicaid='4', args={TASK_WAIT: False}) - except ValueError as e: - log.fatal('test_cleanallruv_abort: Problem running cleanAllRuv task: ' + - e.message('desc')) - assert False - - # Wait a bit - time.sleep(10) - - # Abort the task - log.info('test_cleanallruv_abort: abort the cleanAllRUV task...') - try: - topology.master1.tasks.abortCleanAllRUV(suffix=DEFAULT_SUFFIX, replicaid='4', - args={TASK_WAIT: True}) - except ValueError as e: - log.fatal('test_cleanallruv_abort: Problem running abortCleanAllRuv task: ' + - e.message('desc')) - assert False - - # Check master 1 does not have the clean task running - log.info('test_cleanallruv_abort: check master 1 no longer has a cleanAllRUV task...') - attrlist = ['nsTaskLog', 'nsTaskStatus', 'nsTaskExitCode', - 'nsTaskCurrentItem', 'nsTaskTotalItems'] - done = False - count = 0 - while not done and count < 5: - entry = topology.master1.getEntry(clean_task_dn, attrlist=attrlist) - if not entry or entry.nsTaskExitCode: - done = True - break - time.sleep(1) - count += 1 - if not done: - log.fatal('test_cleanallruv_abort: CleanAllRUV task was not aborted') - assert False - - # Start master 2 - log.info('test_cleanallruv_abort: start master 2 to begin the restore process...') - topology.master2.start(timeout=10) - - # - # Now run the clean task task again to we can properly restore master 4 - # - log.info('test_cleanallruv_abort: run cleanAllRUV task so we can properly restore master 4...') - try: - topology.master1.tasks.cleanAllRUV(suffix=DEFAULT_SUFFIX, - replicaid='4', args={TASK_WAIT: True}) - except ValueError as e: - log.fatal('test_cleanallruv_abort: Problem running cleanAllRuv task: ' + e.message('desc')) - assert False - - log.info('test_cleanallruv_abort PASSED, restoring master 4...') - - # - # Cleanup - Restore master 4 - # - restore_master4(topology) - - -def test_cleanallruv_abort_restart(topology): - ''' - Test the abort task can handle a restart, and then resume - ''' - - log.info('Running test_cleanallruv_abort_restart...') - - # Disable master 4 - log.info('test_cleanallruv_abort_restart: disable replication on master 4...') - try: - topology.master4.replica.disableReplication(DEFAULT_SUFFIX) - except: - log.fatal('error!') - assert False - - # Remove the agreements from the other masters that point to master 4 - log.info('test_cleanallruv_abort_restart: remove all the agreements to master 4...)') - try: - topology.master1.agreement.delete(DEFAULT_SUFFIX, topology.master4) - except ldap.LDAPError as e: - log.fatal('test_cleanallruv_abort_restart: Failed to delete agmt(m1 -> m4), error: ' + - e.message['desc']) - assert False - try: - topology.master2.agreement.delete(DEFAULT_SUFFIX, topology.master4) - except ldap.LDAPError as e: - log.fatal('test_cleanallruv_abort_restart: Failed to delete agmt(m2 -> m4), error: ' + - e.message['desc']) - assert False - try: - topology.master3.agreement.delete(DEFAULT_SUFFIX, topology.master4) - except ldap.LDAPError as e: - log.fatal('test_cleanallruv_abort_restart: Failed to delete agmt(m3 -> m4), error: ' + - e.message['desc']) - assert False - - # Stop master 3 - log.info('test_cleanallruv_abort_restart: stop master 3 to freeze the cleanAllRUV task...') - topology.master3.stop(timeout=10) - - # Run the task - log.info('test_cleanallruv_abort_restart: add the cleanAllRUV task...') - try: - (clean_task_dn, rc) = topology.master1.tasks.cleanAllRUV(suffix=DEFAULT_SUFFIX, - replicaid='4', args={TASK_WAIT: False}) - except ValueError as e: - log.fatal('test_cleanallruv_abort_restart: Problem running cleanAllRuv task: ' + - e.message('desc')) - assert False - - # Wait a bit - time.sleep(5) - - # Abort the task - log.info('test_cleanallruv_abort_restart: abort the cleanAllRUV task...') - try: - topology.master1.tasks.abortCleanAllRUV(suffix=DEFAULT_SUFFIX, replicaid='4', - certify=True, args={TASK_WAIT: False}) - except ValueError as e: - log.fatal('test_cleanallruv_abort_restart: Problem running test_cleanallruv_abort_restart task: ' + - e.message('desc')) - assert False - - # Allow task to run for a bit: - time.sleep(5) - - # Check master 1 does not have the clean task running - log.info('test_cleanallruv_abort: check master 1 no longer has a cleanAllRUV task...') - attrlist = ['nsTaskLog', 'nsTaskStatus', 'nsTaskExitCode', - 'nsTaskCurrentItem', 'nsTaskTotalItems'] - done = False - count = 0 - while not done and count < 10: - entry = topology.master1.getEntry(clean_task_dn, attrlist=attrlist) - if not entry or entry.nsTaskExitCode: - done = True - break - time.sleep(1) - count += 1 - if not done: - log.fatal('test_cleanallruv_abort_restart: CleanAllRUV task was not aborted') - assert False - - # Now restart master 1, and make sure the abort process completes - topology.master1.restart(timeout=30) - if topology.master1.detectDisorderlyShutdown(): - log.fatal('test_cleanallruv_abort_restart: Master 1 previously crashed!') - assert False - - # Start master 3 - topology.master3.start(timeout=10) - - # Check master 1 tried to run abort task. We expect the abort task to be aborted. - if not topology.master1.searchErrorsLog('Aborting abort task'): - log.fatal('test_cleanallruv_abort_restart: Abort task did not restart') - assert False - - # - # Now run the clean task task again to we can properly restore master 4 - # - log.info('test_cleanallruv_abort_restart: run cleanAllRUV task so we can properly restore master 4...') - try: - topology.master1.tasks.cleanAllRUV(suffix=DEFAULT_SUFFIX, - replicaid='4', args={TASK_WAIT: True}) - except ValueError as e: - log.fatal('test_cleanallruv_abort_restart: Problem running cleanAllRuv task: ' + - e.message('desc')) - assert False - - log.info('test_cleanallruv_abort_restart PASSED, restoring master 4...') - - # - # Cleanup - Restore master 4 - # - restore_master4(topology) - - -def test_cleanallruv_abort_certify(topology): - ''' - Test the abort task. - - Disable master 4 - Stop master 2 so that it can not be cleaned - Run the clean task - Wait a bit - Abort the task - Verify task is aborted - ''' - - log.info('Running test_cleanallruv_abort_certify...') - - # Disable master 4 - log.info('test_cleanallruv_abort_certify: disable replication on master 4...') - try: - topology.master4.replica.disableReplication(DEFAULT_SUFFIX) - except: - log.fatal('error!') - assert False - - # Remove the agreements from the other masters that point to master 4 - log.info('test_cleanallruv_abort_certify: remove all the agreements to master 4...)') - try: - topology.master1.agreement.delete(DEFAULT_SUFFIX, topology.master4) - except ldap.LDAPError as e: - log.fatal('test_cleanallruv_abort_certify: Failed to delete agmt(m1 -> m4), error: ' + - e.message['desc']) - assert False - try: - topology.master2.agreement.delete(DEFAULT_SUFFIX, topology.master4) - except ldap.LDAPError as e: - log.fatal('test_cleanallruv_abort_certify: Failed to delete agmt(m2 -> m4), error: ' + - e.message['desc']) - assert False - try: - topology.master3.agreement.delete(DEFAULT_SUFFIX, topology.master4) - except ldap.LDAPError as e: - log.fatal('test_cleanallruv_abort_certify: Failed to delete agmt(m3 -> m4), error: ' + - e.message['desc']) - assert False - - # Stop master 2 - log.info('test_cleanallruv_abort_certify: stop master 2 to freeze the cleanAllRUV task...') - topology.master2.stop(timeout=10) - - # Run the task - log.info('test_cleanallruv_abort_certify: add the cleanAllRUV task...') - try: - (clean_task_dn, rc) = topology.master1.tasks.cleanAllRUV(suffix=DEFAULT_SUFFIX, - replicaid='4', args={TASK_WAIT: False}) - except ValueError as e: - log.fatal('test_cleanallruv_abort_certify: Problem running cleanAllRuv task: ' + - e.message('desc')) - assert False - - # Abort the task - log.info('test_cleanallruv_abort_certify: abort the cleanAllRUV task...') - try: - (abort_task_dn, rc) = topology.master1.tasks.abortCleanAllRUV(suffix=DEFAULT_SUFFIX, - replicaid='4', certify=True, args={TASK_WAIT: False}) - except ValueError as e: - log.fatal('test_cleanallruv_abort_certify: Problem running abortCleanAllRuv task: ' + - e.message('desc')) - assert False - - # Wait a while and make sure the abort task is still running - log.info('test_cleanallruv_abort_certify: sleep for 10 seconds') - time.sleep(10) - - attrlist = ['nsTaskLog', 'nsTaskStatus', 'nsTaskExitCode', - 'nsTaskCurrentItem', 'nsTaskTotalItems'] - entry = topology.master1.getEntry(abort_task_dn, attrlist=attrlist) - if not entry or entry.nsTaskExitCode: - log.fatal('test_cleanallruv_abort_certify: abort task incorrectly finished') - assert False - - # Now start master 2 so it can be aborted - log.info('test_cleanallruv_abort_certify: start master 2 to allow the abort task to finish...') - topology.master2.start(timeout=10) - - # Wait for the abort task to stop - done = False - count = 0 - while not done and count < 60: - entry = topology.master1.getEntry(abort_task_dn, attrlist=attrlist) - if not entry or entry.nsTaskExitCode: - done = True - break - time.sleep(1) - count += 1 - if not done: - log.fatal('test_cleanallruv_abort_certify: The abort CleanAllRUV task was not aborted') - assert False - - # Check master 1 does not have the clean task running - log.info('test_cleanallruv_abort_certify: check master 1 no longer has a cleanAllRUV task...') - attrlist = ['nsTaskLog', 'nsTaskStatus', 'nsTaskExitCode', - 'nsTaskCurrentItem', 'nsTaskTotalItems'] - done = False - count = 0 - while not done and count < 5: - entry = topology.master1.getEntry(clean_task_dn, attrlist=attrlist) - if not entry or entry.nsTaskExitCode: - done = True - break - time.sleep(1) - count += 1 - if not done: - log.fatal('test_cleanallruv_abort_certify: CleanAllRUV task was not aborted') - assert False - - # Start master 2 - log.info('test_cleanallruv_abort_certify: start master 2 to begin the restore process...') - topology.master2.start(timeout=10) - - # - # Now run the clean task task again to we can properly restore master 4 - # - log.info('test_cleanallruv_abort_certify: run cleanAllRUV task so we can properly restore master 4...') - try: - topology.master1.tasks.cleanAllRUV(suffix=DEFAULT_SUFFIX, - replicaid='4', args={TASK_WAIT: True}) - except ValueError as e: - log.fatal('test_cleanallruv_abort_certify: Problem running cleanAllRuv task: ' + - e.message('desc')) - assert False - - log.info('test_cleanallruv_abort_certify PASSED, restoring master 4...') - - # - # Cleanup - Restore master 4 - # - restore_master4(topology) - - -def test_cleanallruv_stress_clean(topology): - ''' - Put each server(m1 - m4) under stress, and perform the entire clean process - ''' - log.info('Running test_cleanallruv_stress_clean...') - log.info('test_cleanallruv_stress_clean: put all the masters under load...') - - # Put all the masters under load - m1_add_users = AddUsers(topology.master1, 4000) - m1_add_users.start() - m2_add_users = AddUsers(topology.master2, 4000) - m2_add_users.start() - m3_add_users = AddUsers(topology.master3, 4000) - m3_add_users.start() - m4_add_users = AddUsers(topology.master4, 4000) - m4_add_users.start() - - # Allow sometime to get replication flowing in all directions - log.info('test_cleanallruv_stress_clean: allow some time for replication to get flowing...') - time.sleep(5) - - # Put master 4 into read only mode - log.info('test_cleanallruv_stress_clean: put master 4 into read-only mode...') - try: - topology.master4.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-readonly', 'on')]) - except ldap.LDAPError as e: - log.fatal('test_cleanallruv_stress_clean: Failed to put master 4 into read-only mode: error ' + - e.message['desc']) - assert False - - # We need to wait for master 4 to push its changes out - log.info('test_cleanallruv_stress_clean: allow some time for master 4 to push changes out (30 seconds)...') - time.sleep(30) - - # Disable master 4 - log.info('test_cleanallruv_stress_clean: disable replication on master 4...') - try: - topology.master4.replica.disableReplication(DEFAULT_SUFFIX) - except: - log.fatal('test_cleanallruv_stress_clean: failed to diable replication') - assert False - - # Remove the agreements from the other masters that point to master 4 - log.info('test_cleanallruv_stress_clean: remove all the agreements to master 4...') - try: - topology.master1.agreement.delete(DEFAULT_SUFFIX, topology.master4) - except ldap.LDAPError as e: - log.fatal('test_cleanallruv_stress_clean: Failed to delete agmt(m1 -> m4), error: ' + - e.message['desc']) - assert False - try: - topology.master2.agreement.delete(DEFAULT_SUFFIX, topology.master4) - except ldap.LDAPError as e: - log.fatal('test_cleanallruv_stress_clean: Failed to delete agmt(m2 -> m4), error: ' + - e.message['desc']) - assert False - try: - topology.master3.agreement.delete(DEFAULT_SUFFIX, topology.master4) - except ldap.LDAPError as e: - log.fatal('test_cleanallruv_stress_clean: Failed to delete agmt(m3 -> m4), error: ' + - e.message['desc']) - assert False - - # Run the task - log.info('test_cleanallruv_stress_clean: Run the cleanAllRUV task...') - try: - topology.master1.tasks.cleanAllRUV(suffix=DEFAULT_SUFFIX, replicaid='4', - args={TASK_WAIT: True}) - except ValueError as e: - log.fatal('test_cleanallruv_stress_clean: Problem running cleanAllRuv task: ' + - e.message('desc')) - assert False - - # Wait for the update to finish - log.info('test_cleanallruv_stress_clean: wait for all the updates to finish...') - m1_add_users.join() - m2_add_users.join() - m3_add_users.join() - m4_add_users.join() - - # Check the other master's RUV for 'replica 4' - log.info('test_cleanallruv_stress_clean: check if all the replicas have been cleaned...') - clean = False - count = 0 - while not clean and count < 10: - clean = True - - # Check master 1 - try: - entry = topology.master1.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, REPLICA_RUV_FILTER) - if not entry: - log.error('test_cleanallruv_stress_clean: Failed to find db tombstone entry from master') - repl_fail(replica_inst) - elements = entry[0].getValues('nsds50ruv') - for ruv in elements: - if 'replica 4' in ruv: - # Not cleaned - log.error('test_cleanallruv_stress_clean: Master 1 not cleaned!') - clean = False - if clean: - log.info('test_cleanallruv_stress_clean: Master 1 is cleaned.') - except ldap.LDAPError as e: - log.fatal('test_cleanallruv_stress_clean: Unable to search master 1 for db tombstone: ' + - e.message['desc']) - - # Check master 2 - try: - entry = topology.master2.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, REPLICA_RUV_FILTER) - if not entry: - log.error('test_cleanallruv_stress_clean: Failed to find db tombstone entry from master') - repl_fail(replica_inst) - elements = entry[0].getValues('nsds50ruv') - for ruv in elements: - if 'replica 4' in ruv: - # Not cleaned - log.error('test_cleanallruv_stress_clean: Master 2 not cleaned!') - clean = False - if clean: - log.info('test_cleanallruv_stress_clean: Master 2 is cleaned.') - except ldap.LDAPError as e: - log.fatal('test_cleanallruv_stress_clean: Unable to search master 2 for db tombstone: ' + - e.message['desc']) - - # Check master 3 - try: - entry = topology.master3.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, REPLICA_RUV_FILTER) - if not entry: - log.error('test_cleanallruv_stress_clean: Failed to find db tombstone entry from master') - repl_fail(replica_inst) - elements = entry[0].getValues('nsds50ruv') - for ruv in elements: - if 'replica 4' in ruv: - # Not cleaned - log.error('test_cleanallruv_stress_clean: Master 3 not cleaned!') - clean = False - if clean: - log.info('test_cleanallruv_stress_clean: Master 3 is cleaned.') - except ldap.LDAPError as e: - log.fatal('test_cleanallruv_stress_clean: Unable to search master 3 for db tombstone: ' + - e.message['desc']) - - # Sleep a bit and give it chance to clean up... - time.sleep(5) - count += 1 - - if not clean: - log.fatal('test_cleanallruv_stress_clean: Failed to clean replicas') - assert False - - log.info('test_cleanallruv_stress_clean: PASSED, restoring master 4...') - - # - # Cleanup - restore master 4 - # - - # Turn off readonly mode - try: - topology.master4.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-readonly', 'off')]) - except ldap.LDAPError as e: - log.fatal('test_cleanallruv_stress_clean: Failed to put master 4 into read-only mode: error ' + - e.message['desc']) - assert False - - restore_master4(topology) - - -def test_cleanallruv_final(topology): - topology.master1.delete() - topology.master2.delete() - topology.master3.delete() - topology.master4.delete() - log.info('cleanAllRUV test suite PASSED') - - -def run_isolated(): - global installation1_prefix - installation1_prefix = None - topo = topology(True) - - test_cleanallruv_init(topo) - test_cleanallruv_clean(topo) - test_cleanallruv_clean_restart(topo) - test_cleanallruv_clean_force(topo) - test_cleanallruv_abort(topo) - test_cleanallruv_abort_restart(topo) - test_cleanallruv_abort_certify(topo) - test_cleanallruv_stress_clean(topo) - test_cleanallruv_final(topo) - - -if __name__ == '__main__': - run_isolated() - diff --git a/dirsrvtests/suites/replication/wait_for_async_feature_test.py b/dirsrvtests/suites/replication/wait_for_async_feature_test.py deleted file mode 100644 index 4905088..0000000 --- a/dirsrvtests/suites/replication/wait_for_async_feature_test.py +++ /dev/null @@ -1,280 +0,0 @@ -import os -import sys -import time -import ldap -import logging -import pytest -from lib389 import DirSrv, Entry, tools, tasks -from lib389.tools import DirSrvTools -from lib389._constants import * -from lib389.properties import * -from lib389.tasks import * -from lib389.utils import * -from collections import Counter - -logging.getLogger(__name__).setLevel(logging.DEBUG) -log = logging.getLogger(__name__) - -installation1_prefix = None - -WAITFOR_ASYNC_ATTR = "nsDS5ReplicaWaitForAsyncResults" - -class TopologyReplication(object): - def __init__(self, master1, master2, m1_m2_agmt, m2_m1_agmt): - master1.open() - master2.open() - self.masters = ((master1, m1_m2_agmt), - (master2, m2_m1_agmt)) - - -@pytest.fixture(scope="module") -def topology(request): - global installation1_prefix - if installation1_prefix: - args_instance[SER_DEPLOYED_DIR] = installation1_prefix - - # Creating master 1... - master1 = DirSrv(verbose=False) - args_instance[SER_HOST] = HOST_MASTER_1 - args_instance[SER_PORT] = PORT_MASTER_1 - args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1 - args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX - args_master = args_instance.copy() - master1.allocate(args_master) - instance_master1 = master1.exists() - if instance_master1: - master1.delete() - master1.create() - master1.open() - master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_1) - - # Creating master 2... - master2 = DirSrv(verbose=False) - args_instance[SER_HOST] = HOST_MASTER_2 - args_instance[SER_PORT] = PORT_MASTER_2 - args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_2 - args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX - args_master = args_instance.copy() - master2.allocate(args_master) - instance_master2 = master2.exists() - if instance_master2: - master2.delete() - master2.create() - master2.open() - master2.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_2) - - # - # Create all the agreements - # - # Creating agreement from master 1 to master 2 - properties = {RA_NAME: r'meTo_$host:$port', - RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], - RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], - RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], - RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} - m1_m2_agmt = master1.agreement.create(suffix=SUFFIX, host=master2.host, port=master2.port, properties=properties) - if not m1_m2_agmt: - log.fatal("Fail to create a master -> master replica agreement") - sys.exit(1) - log.debug("%s created" % m1_m2_agmt) - - # Creating agreement from master 2 to master 1 - properties = {RA_NAME: r'meTo_$host:$port', - RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], - RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], - RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], - RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} - m2_m1_agmt = master2.agreement.create(suffix=SUFFIX, host=master1.host, port=master1.port, properties=properties) - if not m2_m1_agmt: - log.fatal("Fail to create a master -> master replica agreement") - sys.exit(1) - log.debug("%s created" % m2_m1_agmt) - - # Allow the replicas to get situated with the new agreements... - time.sleep(5) - - # - # Initialize all the agreements - # - master1.agreement.init(SUFFIX, HOST_MASTER_2, PORT_MASTER_2) - master1.waitForReplInit(m1_m2_agmt) - master2.agreement.init(SUFFIX, HOST_MASTER_1, PORT_MASTER_1) - master2.waitForReplInit(m2_m1_agmt) - - # Check replication is working... - if master1.testReplication(DEFAULT_SUFFIX, master2): - log.info('Replication is working.') - else: - log.fatal('Replication is not working.') - assert False - - log.info("Set Replication Debugging loglevel for the errorlog") - master1.setLogLevel(lib389.LOG_REPLICA) - master2.setLogLevel(lib389.LOG_REPLICA) - - # Delete each instance in the end - def fin(): - master1.delete() - master2.delete() - request.addfinalizer(fin) - - # Clear out the tmp dir - master1.clearTmpDir(__file__) - - return TopologyReplication(master1, master2, m1_m2_agmt, m2_m1_agmt) - - -@pytest.fixture(params=[(None, (4, 10)), - ('2000', (0, 1)), - ('0', (4, 10)), - ('-5', (4, 10))]) -def waitfor_async_attr(topology, request): - """Sets attribute on all replicas""" - - attr_value = request.param[0] - expected_result = request.param[1] - - # Run through all masters - for master in topology.masters: - agmt = master[1] - try: - if attr_value: - log.info("Set %s: %s on %s" % ( - WAITFOR_ASYNC_ATTR, attr_value, master[0].serverid)) - mod = [(ldap.MOD_REPLACE, WAITFOR_ASYNC_ATTR, attr_value)] - else: - log.info("Delete %s from %s" % ( - WAITFOR_ASYNC_ATTR, master[0].serverid)) - mod = [(ldap.MOD_DELETE, WAITFOR_ASYNC_ATTR, None)] - master[0].modify_s(agmt, mod) - except ldap.LDAPError as e: - log.error('Failed to set or delete %s attribute: (%s)' % ( - WAITFOR_ASYNC_ATTR, e.message['desc'])) - - return (attr_value, expected_result) - - -@pytest.fixture -def entries(topology, request): - """Adds entries to the master1""" - - master1 = topology.masters[0][0] - - TEST_OU = "test" - test_dn = SUFFIX - test_list = [] - - log.info("Add 100 nested entries under replicated suffix on %s" % master1.serverid) - for i in xrange(100): - test_dn = 'ou=%s%s,%s' % (TEST_OU, i, test_dn) - test_list.insert(0, test_dn) - try: - master1.add_s(Entry((test_dn, - {'objectclass': 'top', - 'objectclass': 'organizationalUnit', - 'ou': TEST_OU}))) - except ldap.LDAPError as e: - log.error('Failed to add entry (%s): error (%s)' % (test_dn, - e.message['desc'])) - assert False - - log.info("Delete created entries") - for test_dn in test_list: - try: - master1.delete_s(test_dn) - except ldap.LDAPError, e: - log.error('Failed to delete entry (%s): error (%s)' % (test_dn, - e.message['desc'])) - assert False - - def fin(): - log.info("Clear the errors log in the end of the test case") - with open(master1.errlog, 'w') as errlog: - errlog.writelines("") - request.addfinalizer(fin) - - -def test_not_int_value(topology): - """Tests not integer value""" - - master1 = topology.masters[0][0] - agmt = topology.masters[0][1] - - log.info("Try to set %s: wv1" % WAITFOR_ASYNC_ATTR) - try: - mod = [(ldap.MOD_REPLACE, WAITFOR_ASYNC_ATTR, "wv1")] - master1.modify_s(agmt, mod) - except ldap.LDAPError as e: - assert e.message['desc'] == 'Invalid syntax' - - -def test_multi_value(topology): - """Tests multi value""" - - master1 = topology.masters[0][0] - agmt = topology.masters[0][1] - log.info("agmt: %s" % agmt) - - log.info("Try to set %s: 100 and 101 in the same time (multi value test)" % ( - WAITFOR_ASYNC_ATTR)) - try: - mod = [(ldap.MOD_ADD, WAITFOR_ASYNC_ATTR, "100")] - master1.modify_s(agmt, mod) - mod = [(ldap.MOD_ADD, WAITFOR_ASYNC_ATTR, "101")] - master1.modify_s(agmt, mod) - except ldap.LDAPError as e: - assert e.message['desc'] == 'Object class violation' - - -def test_value_check(topology, waitfor_async_attr): - """Checks that value has been set correctly""" - - attr_value = waitfor_async_attr[0] - - for master in topology.masters: - agmt = master[1] - - log.info("Check attr %s on %s" % (WAITFOR_ASYNC_ATTR, master[0].serverid)) - try: - if attr_value: - entry = master[0].search_s(agmt, ldap.SCOPE_BASE, "%s=%s" % ( - WAITFOR_ASYNC_ATTR, attr_value)) - assert entry - else: - entry = master[0].search_s(agmt, ldap.SCOPE_BASE, "%s=*" % WAITFOR_ASYNC_ATTR) - assert not entry - except ldap.LDAPError as e: - log.fatal('Search failed, error: ' + e.message['desc']) - assert False - - -def test_behavior_with_value(topology, waitfor_async_attr, entries): - """Tests replication behavior with valid - nsDS5ReplicaWaitForAsyncResults attribute values - """ - - master1 = topology.masters[0][0] - sync_dict = Counter() - min_ap = waitfor_async_attr[1][0] - max_ap = waitfor_async_attr[1][1] - - log.info("Gather all sync attempts within Counter dict, group by timestamp") - with open(master1.errlog, 'r') as errlog: - errlog_filtered = filter(lambda x: "waitfor_async_results" in x, errlog) - for line in errlog_filtered: - # Watch only over unsuccessful sync attempts - if line.split()[4] != line.split()[5]: - timestamp = line.split(']')[0] - sync_dict[timestamp] += 1 - - log.info("Take the most common timestamp and assert it has appeared " \ - "in the range from %s to %s times" % (min_ap, max_ap)) - most_common_val = sync_dict.most_common(1)[0][1] - assert min_ap <= most_common_val <= max_ap - - -if __name__ == '__main__': - # Run isolated - # -s for DEBUG mode - CURRENT_FILE = os.path.realpath(__file__) - pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/suites/replsync_plugin/repl_sync_test.py b/dirsrvtests/suites/replsync_plugin/repl_sync_test.py deleted file mode 100644 index bd52fb6..0000000 --- a/dirsrvtests/suites/replsync_plugin/repl_sync_test.py +++ /dev/null @@ -1,93 +0,0 @@ -# --- BEGIN COPYRIGHT BLOCK --- -# Copyright (C) 2015 Red Hat, Inc. -# All rights reserved. -# -# License: GPL (version 3 or any later version). -# See LICENSE for details. -# --- END COPYRIGHT BLOCK --- -# -import os -import sys -import time -import ldap -import logging -import pytest -from lib389 import DirSrv, Entry, tools, tasks -from lib389.tools import DirSrvTools -from lib389._constants import * -from lib389.properties import * -from lib389.tasks import * -from lib389.utils import * - -logging.getLogger(__name__).setLevel(logging.DEBUG) -log = logging.getLogger(__name__) - -installation1_prefix = None - - -class TopologyStandalone(object): - def __init__(self, standalone): - standalone.open() - self.standalone = standalone - - -@pytest.fixture(scope="module") -def topology(request): - global installation1_prefix - if installation1_prefix: - args_instance[SER_DEPLOYED_DIR] = installation1_prefix - - # Creating standalone instance ... - standalone = DirSrv(verbose=False) - args_instance[SER_HOST] = HOST_STANDALONE - args_instance[SER_PORT] = PORT_STANDALONE - args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE - args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX - args_standalone = args_instance.copy() - standalone.allocate(args_standalone) - instance_standalone = standalone.exists() - if instance_standalone: - standalone.delete() - standalone.create() - standalone.open() - - # Clear out the tmp dir - standalone.clearTmpDir(__file__) - - return TopologyStandalone(standalone) - - -def test_repl_sync_init(topology): - ''' - Write any test suite initialization here(if needed) - ''' - - return - - -def test_repl_sync_(topology): - ''' - Write a single test here... - ''' - - return - - -def test_repl_sync_final(topology): - topology.standalone.delete() - log.info('repl_sync test suite PASSED') - - -def run_isolated(): - global installation1_prefix - installation1_prefix = None - - topo = topology(True) - test_repl_sync_init(topo) - test_repl_sync_(topo) - test_repl_sync_final(topo) - - -if __name__ == '__main__': - run_isolated() - diff --git a/dirsrvtests/suites/resource_limits/res_limits_test.py b/dirsrvtests/suites/resource_limits/res_limits_test.py deleted file mode 100644 index 672bebc..0000000 --- a/dirsrvtests/suites/resource_limits/res_limits_test.py +++ /dev/null @@ -1,93 +0,0 @@ -# --- BEGIN COPYRIGHT BLOCK --- -# Copyright (C) 2015 Red Hat, Inc. -# All rights reserved. -# -# License: GPL (version 3 or any later version). -# See LICENSE for details. -# --- END COPYRIGHT BLOCK --- -# -import os -import sys -import time -import ldap -import logging -import pytest -from lib389 import DirSrv, Entry, tools, tasks -from lib389.tools import DirSrvTools -from lib389._constants import * -from lib389.properties import * -from lib389.tasks import * -from lib389.utils import * - -logging.getLogger(__name__).setLevel(logging.DEBUG) -log = logging.getLogger(__name__) - -installation1_prefix = None - - -class TopologyStandalone(object): - def __init__(self, standalone): - standalone.open() - self.standalone = standalone - - -@pytest.fixture(scope="module") -def topology(request): - global installation1_prefix - if installation1_prefix: - args_instance[SER_DEPLOYED_DIR] = installation1_prefix - - # Creating standalone instance ... - standalone = DirSrv(verbose=False) - args_instance[SER_HOST] = HOST_STANDALONE - args_instance[SER_PORT] = PORT_STANDALONE - args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE - args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX - args_standalone = args_instance.copy() - standalone.allocate(args_standalone) - instance_standalone = standalone.exists() - if instance_standalone: - standalone.delete() - standalone.create() - standalone.open() - - # Clear out the tmp dir - standalone.clearTmpDir(__file__) - - return TopologyStandalone(standalone) - - -def test_res_limits_init(topology): - ''' - Write any test suite initialization here(if needed) - ''' - - return - - -def test_res_limits_(topology): - ''' - Write a single test here... - ''' - - return - - -def test_res_limits_final(topology): - topology.standalone.delete() - log.info('res_limits test suite PASSED') - - -def run_isolated(): - global installation1_prefix - installation1_prefix = None - - topo = topology(True) - test_res_limits_init(topo) - test_res_limits_(topo) - test_res_limits_final(topo) - - -if __name__ == '__main__': - run_isolated() - diff --git a/dirsrvtests/suites/retrocl_plugin/retrocl_test.py b/dirsrvtests/suites/retrocl_plugin/retrocl_test.py deleted file mode 100644 index 2d8b61f..0000000 --- a/dirsrvtests/suites/retrocl_plugin/retrocl_test.py +++ /dev/null @@ -1,93 +0,0 @@ -# --- BEGIN COPYRIGHT BLOCK --- -# Copyright (C) 2015 Red Hat, Inc. -# All rights reserved. -# -# License: GPL (version 3 or any later version). -# See LICENSE for details. -# --- END COPYRIGHT BLOCK --- -# -import os -import sys -import time -import ldap -import logging -import pytest -from lib389 import DirSrv, Entry, tools, tasks -from lib389.tools import DirSrvTools -from lib389._constants import * -from lib389.properties import * -from lib389.tasks import * -from lib389.utils import * - -logging.getLogger(__name__).setLevel(logging.DEBUG) -log = logging.getLogger(__name__) - -installation1_prefix = None - - -class TopologyStandalone(object): - def __init__(self, standalone): - standalone.open() - self.standalone = standalone - - -@pytest.fixture(scope="module") -def topology(request): - global installation1_prefix - if installation1_prefix: - args_instance[SER_DEPLOYED_DIR] = installation1_prefix - - # Creating standalone instance ... - standalone = DirSrv(verbose=False) - args_instance[SER_HOST] = HOST_STANDALONE - args_instance[SER_PORT] = PORT_STANDALONE - args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE - args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX - args_standalone = args_instance.copy() - standalone.allocate(args_standalone) - instance_standalone = standalone.exists() - if instance_standalone: - standalone.delete() - standalone.create() - standalone.open() - - # Clear out the tmp dir - standalone.clearTmpDir(__file__) - - return TopologyStandalone(standalone) - - -def test_retrocl_init(topology): - ''' - Write any test suite initialization here(if needed) - ''' - - return - - -def test_retrocl_(topology): - ''' - Write a single test here... - ''' - - return - - -def test_retrocl_final(topology): - topology.standalone.delete() - log.info('retrocl test suite PASSED') - - -def run_isolated(): - global installation1_prefix - installation1_prefix = None - - topo = topology(True) - test_retrocl_init(topo) - test_retrocl_(topo) - test_retrocl_final(topo) - - -if __name__ == '__main__': - run_isolated() - diff --git a/dirsrvtests/suites/reverpwd_plugin/reverpwd_test.py b/dirsrvtests/suites/reverpwd_plugin/reverpwd_test.py deleted file mode 100644 index ae79bb5..0000000 --- a/dirsrvtests/suites/reverpwd_plugin/reverpwd_test.py +++ /dev/null @@ -1,93 +0,0 @@ -# --- BEGIN COPYRIGHT BLOCK --- -# Copyright (C) 2015 Red Hat, Inc. -# All rights reserved. -# -# License: GPL (version 3 or any later version). -# See LICENSE for details. -# --- END COPYRIGHT BLOCK --- -# -import os -import sys -import time -import ldap -import logging -import pytest -from lib389 import DirSrv, Entry, tools, tasks -from lib389.tools import DirSrvTools -from lib389._constants import * -from lib389.properties import * -from lib389.tasks import * -from lib389.utils import * - -logging.getLogger(__name__).setLevel(logging.DEBUG) -log = logging.getLogger(__name__) - -installation1_prefix = None - - -class TopologyStandalone(object): - def __init__(self, standalone): - standalone.open() - self.standalone = standalone - - -@pytest.fixture(scope="module") -def topology(request): - global installation1_prefix - if installation1_prefix: - args_instance[SER_DEPLOYED_DIR] = installation1_prefix - - # Creating standalone instance ... - standalone = DirSrv(verbose=False) - args_instance[SER_HOST] = HOST_STANDALONE - args_instance[SER_PORT] = PORT_STANDALONE - args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE - args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX - args_standalone = args_instance.copy() - standalone.allocate(args_standalone) - instance_standalone = standalone.exists() - if instance_standalone: - standalone.delete() - standalone.create() - standalone.open() - - # Clear out the tmp dir - standalone.clearTmpDir(__file__) - - return TopologyStandalone(standalone) - - -def test_reverpwd_init(topology): - ''' - Write any test suite initialization here(if needed) - ''' - - return - - -def test_reverpwd_(topology): - ''' - Write a single test here... - ''' - - return - - -def test_reverpwd_final(topology): - topology.standalone.delete() - log.info('reverpwd test suite PASSED') - - -def run_isolated(): - global installation1_prefix - installation1_prefix = None - - topo = topology(True) - test_reverpwd_init(topo) - test_reverpwd_(topo) - test_reverpwd_final(topo) - - -if __name__ == '__main__': - run_isolated() - diff --git a/dirsrvtests/suites/roles_plugin/roles_test.py b/dirsrvtests/suites/roles_plugin/roles_test.py deleted file mode 100644 index 704f2b7..0000000 --- a/dirsrvtests/suites/roles_plugin/roles_test.py +++ /dev/null @@ -1,93 +0,0 @@ -# --- BEGIN COPYRIGHT BLOCK --- -# Copyright (C) 2015 Red Hat, Inc. -# All rights reserved. -# -# License: GPL (version 3 or any later version). -# See LICENSE for details. -# --- END COPYRIGHT BLOCK --- -# -import os -import sys -import time -import ldap -import logging -import pytest -from lib389 import DirSrv, Entry, tools, tasks -from lib389.tools import DirSrvTools -from lib389._constants import * -from lib389.properties import * -from lib389.tasks import * -from lib389.utils import * - -logging.getLogger(__name__).setLevel(logging.DEBUG) -log = logging.getLogger(__name__) - -installation1_prefix = None - - -class TopologyStandalone(object): - def __init__(self, standalone): - standalone.open() - self.standalone = standalone - - -@pytest.fixture(scope="module") -def topology(request): - global installation1_prefix - if installation1_prefix: - args_instance[SER_DEPLOYED_DIR] = installation1_prefix - - # Creating standalone instance ... - standalone = DirSrv(verbose=False) - args_instance[SER_HOST] = HOST_STANDALONE - args_instance[SER_PORT] = PORT_STANDALONE - args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE - args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX - args_standalone = args_instance.copy() - standalone.allocate(args_standalone) - instance_standalone = standalone.exists() - if instance_standalone: - standalone.delete() - standalone.create() - standalone.open() - - # Clear out the tmp dir - standalone.clearTmpDir(__file__) - - return TopologyStandalone(standalone) - - -def test_roles_init(topology): - ''' - Write any test suite initialization here(if needed) - ''' - - return - - -def test_roles_(topology): - ''' - Write a single test here... - ''' - - return - - -def test_roles_final(topology): - topology.standalone.delete() - log.info('roles test suite PASSED') - - -def run_isolated(): - global installation1_prefix - installation1_prefix = None - - topo = topology(True) - test_roles_init(topo) - test_roles_(topo) - test_roles_final(topo) - - -if __name__ == '__main__': - run_isolated() - diff --git a/dirsrvtests/suites/rootdn_plugin/rootdn_plugin_test.py b/dirsrvtests/suites/rootdn_plugin/rootdn_plugin_test.py deleted file mode 100644 index 2e70656..0000000 --- a/dirsrvtests/suites/rootdn_plugin/rootdn_plugin_test.py +++ /dev/null @@ -1,778 +0,0 @@ -# --- BEGIN COPYRIGHT BLOCK --- -# Copyright (C) 2015 Red Hat, Inc. -# All rights reserved. -# -# License: GPL (version 3 or any later version). -# See LICENSE for details. -# --- END COPYRIGHT BLOCK --- -# -import os -import sys -import time -import ldap -import logging -import pytest -from lib389 import DirSrv, Entry, tools, tasks -from lib389.tools import DirSrvTools -from lib389._constants import * -from lib389.properties import * -from lib389.tasks import * - -logging.getLogger(__name__).setLevel(logging.DEBUG) -log = logging.getLogger(__name__) - -installation1_prefix = None - -PLUGIN_DN = 'cn=' + PLUGIN_ROOTDN_ACCESS + ',cn=plugins,cn=config' -USER1_DN = 'uid=user1,' + DEFAULT_SUFFIX - - -class TopologyStandalone(object): - def __init__(self, standalone): - standalone.open() - self.standalone = standalone - - -@pytest.fixture(scope="module") -def topology(request): - global installation1_prefix - if installation1_prefix: - args_instance[SER_DEPLOYED_DIR] = installation1_prefix - - # Creating standalone instance ... - standalone = DirSrv(verbose=False) - args_instance[SER_HOST] = HOST_STANDALONE - args_instance[SER_PORT] = PORT_STANDALONE - args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE - args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX - args_standalone = args_instance.copy() - standalone.allocate(args_standalone) - instance_standalone = standalone.exists() - if instance_standalone: - standalone.delete() - standalone.create() - standalone.open() - - # Clear out the tmp dir - standalone.clearTmpDir(__file__) - - return TopologyStandalone(standalone) - - -def test_rootdn_init(topology): - ''' - Initialize our setup to test the ROot DN Access Control Plugin - - Test the following access control type: - - - Allowed IP address * - - Denied IP address * - - Specific time window - - Days allowed access - - Allowed host * - - Denied host * - - * means mulitple valued - ''' - - log.info('Initializing root DN test suite...') - - # - # Set an aci so we can modify the plugin after we deny the Root DN - # - ACI = ('(target ="ldap:///cn=config")(targetattr = "*")(version 3.0' + - ';acl "all access";allow (all)(userdn="ldap:///anyone");)') - try: - topology.standalone.modify_s(DN_CONFIG, [(ldap.MOD_ADD, 'aci', ACI)]) - except ldap.LDAPError as e: - log.fatal('test_rootdn_init: Failed to add aci to config: error ' + - e.message['desc']) - assert False - - # - # Create a user to modify the config - # - try: - topology.standalone.add_s(Entry((USER1_DN, {'objectclass': "top extensibleObject".split(), - 'uid': 'user1', - 'userpassword': PASSWORD}))) - except ldap.LDAPError as e: - log.fatal('test_rootdn_init: Failed to add test user ' + USER1_DN + ': error ' + - e.message['desc']) - assert False - - # - # Enable dynamic plugins - # - try: - topology.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-dynamic-plugins', 'on')]) - except ldap.LDAPError as e: - log.fatal('test_rootdn_init: Failed to set dynamic plugins: error ' + e.message['desc']) - assert False - - # - # Enable the plugin (aftewr enabling dynamic plugins) - # - topology.standalone.plugins.enable(PLUGIN_ROOTDN_ACCESS) - - log.info('test_rootdn_init: Initialized root DN test suite.') - - -def test_rootdn_access_specific_time(topology): - ''' - Test binding inside and outside of a specific time - ''' - - log.info('Running test_rootdn_access_specific_time...') - - # Get the current time, and bump it ahead twohours - current_hour = time.strftime("%H") - if int(current_hour) > 12: - open_time = '0200' - close_time = '0400' - else: - open_time = '1600' - close_time = '1800' - - try: - topology.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_ADD, 'rootdn-open-time', open_time), - (ldap.MOD_ADD, 'rootdn-close-time', close_time)]) - except ldap.LDAPError as e: - log.fatal('test_rootdn_access_specific_time: Failed to set (blocking) open/close times: error ' + - e.message['desc']) - assert False - - # - # Bind as Root DN - should fail - # - try: - topology.standalone.simple_bind_s(DN_DM, PASSWORD) - succeeded = True - except ldap.LDAPError as e: - succeeded = False - - if succeeded: - log.fatal('test_rootdn_access_specific_time: Root DN was incorrectly able to bind') - assert False - - # - # Set config to allow the entire day - # - try: - topology.standalone.simple_bind_s(USER1_DN, PASSWORD) - except ldap.LDAPError as e: - log.fatal('test_rootdn_access_specific_time: test_rootdn: failed to bind as user1') - assert False - - try: - topology.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, 'rootdn-open-time', '0000'), - (ldap.MOD_REPLACE, 'rootdn-close-time', '2359')]) - except ldap.LDAPError as e: - log.fatal('test_rootdn_access_specific_time: Failed to set (open) open/close times: error ' + - e.message['desc']) - assert False - - try: - topology.standalone.simple_bind_s(DN_DM, PASSWORD) - except ldap.LDAPError as e: - log.fatal('test_rootdn_access_specific_time: Root DN bind failed unexpectedly failed: error ' + - e.message['desc']) - assert False - - # - # Cleanup - undo the changes we made so the next test has a clean slate - # - try: - topology.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_DELETE, 'rootdn-open-time', None), - (ldap.MOD_DELETE, 'rootdn-close-time', None)]) - except ldap.LDAPError as e: - log.fatal('test_rootdn_access_specific_time: Failed to delete open and close time: error ' + - e.message['desc']) - assert False - - try: - topology.standalone.simple_bind_s(DN_DM, PASSWORD) - except ldap.LDAPError as e: - log.fatal('test_rootdn_access_specific_time: Root DN bind failed unexpectedly failed: error ' + - e.message['desc']) - assert False - - log.info('test_rootdn_access_specific_time: PASSED') - - -def test_rootdn_access_day_of_week(topology): - ''' - Test the days of week feature - ''' - - log.info('Running test_rootdn_access_day_of_week...') - - days = ('Sun', 'Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat') - day = int(time.strftime("%w", time.gmtime())) - - if day > 3: - deny_days = days[0] + ', ' + days[1] - allow_days = days[day] + ',' + days[day - 1] - else: - deny_days = days[4] + ',' + days[5] - allow_days = days[day] + ',' + days[day + 1] - - # - # Set the deny days - # - try: - topology.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, 'rootdn-days-allowed', - deny_days)]) - except ldap.LDAPError as e: - log.fatal('test_rootdn_access_day_of_week: Failed to set the deny days: error ' + - e.message['desc']) - assert False - - # - # Bind as Root DN - should fail - # - try: - topology.standalone.simple_bind_s(DN_DM, PASSWORD) - succeeded = True - except ldap.LDAPError as e: - succeeded = False - - if succeeded: - log.fatal('test_rootdn_access_day_of_week: Root DN was incorrectly able to bind') - assert False - - # - # Set the allow days - # - try: - topology.standalone.simple_bind_s(USER1_DN, PASSWORD) - except ldap.LDAPError as e: - log.fatal('test_rootdn_access_day_of_week: : failed to bind as user1') - assert False - - try: - topology.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, 'rootdn-days-allowed', - allow_days)]) - except ldap.LDAPError as e: - log.fatal('test_rootdn_access_day_of_week: Failed to set the deny days: error ' + - e.message['desc']) - assert False - - try: - topology.standalone.simple_bind_s(DN_DM, PASSWORD) - except ldap.LDAPError as e: - log.fatal('test_rootdn_access_day_of_week: Root DN bind failed unexpectedly failed: error ' + - e.message['desc']) - assert False - - # - # Cleanup - undo the changes we made so the next test has a clean slate - # - try: - topology.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_DELETE, 'rootdn-days-allowed', None)]) - except ldap.LDAPError as e: - log.fatal('test_rootdn_access_day_of_week: Failed to set rootDN plugin config: error ' + - e.message['desc']) - assert False - - try: - topology.standalone.simple_bind_s(DN_DM, PASSWORD) - except ldap.LDAPError as e: - log.fatal('test_rootdn_access_day_of_week: Root DN bind failed unexpectedly failed: error ' + - e.message['desc']) - assert False - - log.info('test_rootdn_access_day_of_week: PASSED') - - -def test_rootdn_access_denied_ip(topology): - ''' - Test denied IP feature - we can just test denying 127.0.01 - ''' - - log.info('Running test_rootdn_access_denied_ip...') - - try: - topology.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, 'rootdn-deny-ip', '127.0.0.1'), - (ldap.MOD_ADD, 'rootdn-deny-ip', '::1')]) - except ldap.LDAPError as e: - log.fatal('test_rootdn_access_denied_ip: Failed to set rootDN plugin config: error ' + - e.message['desc']) - assert False - - # - # Bind as Root DN - should fail - # - try: - topology.standalone.simple_bind_s(DN_DM, PASSWORD) - succeeded = True - except ldap.LDAPError as e: - succeeded = False - - if succeeded: - log.fatal('test_rootdn_access_denied_ip: Root DN was incorrectly able to bind') - assert False - - # - # Change the denied IP so root DN succeeds - # - try: - topology.standalone.simple_bind_s(USER1_DN, PASSWORD) - except ldap.LDAPError as e: - log.fatal('test_rootdn_access_denied_ip: : failed to bind as user1') - assert False - - try: - topology.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, 'rootdn-deny-ip', '255.255.255.255')]) - except ldap.LDAPError as e: - log.fatal('test_rootdn_access_denied_ip: Failed to set rootDN plugin config: error ' + - e.message['desc']) - assert False - - try: - topology.standalone.simple_bind_s(DN_DM, PASSWORD) - except ldap.LDAPError as e: - log.fatal('test_rootdn_access_denied_ip: Root DN bind failed unexpectedly failed: error ' + - e.message['desc']) - assert False - - # - # Cleanup - undo the changes we made so the next test has a clean slate - # - try: - topology.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_DELETE, 'rootdn-deny-ip', None)]) - except ldap.LDAPError as e: - log.fatal('test_rootdn_access_denied_ip: Failed to set rootDN plugin config: error ' + - e.message['desc']) - assert False - - try: - topology.standalone.simple_bind_s(DN_DM, PASSWORD) - except ldap.LDAPError as e: - log.fatal('test_rootdn_access_denied_ip: Root DN bind failed unexpectedly failed: error ' + - e.message['desc']) - assert False - - log.info('test_rootdn_access_denied_ip: PASSED') - - -def test_rootdn_access_denied_host(topology): - ''' - Test denied Host feature - we can just test denying localhost - ''' - - log.info('Running test_rootdn_access_denied_host...') - - try: - topology.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_ADD, 'rootdn-deny-host', 'localhost.localdomain')]) - except ldap.LDAPError as e: - log.fatal('test_rootdn_access_denied_host: Failed to set deny host: error ' + - e.message['desc']) - assert False - - # - # Bind as Root DN - should fail - # - try: - topology.standalone.simple_bind_s(DN_DM, PASSWORD) - succeeded = True - except ldap.LDAPError as e: - succeeded = False - - if succeeded: - log.fatal('test_rootdn_access_denied_host: Root DN was incorrectly able to bind') - assert False - - # - # Change the denied host so root DN succeeds - # - try: - topology.standalone.simple_bind_s(USER1_DN, PASSWORD) - except ldap.LDAPError as e: - log.fatal('test_rootdn_access_denied_host: : failed to bind as user1') - assert False - - try: - topology.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, 'rootdn-deny-host', 'i.dont.exist.com')]) - except ldap.LDAPError as e: - log.fatal('test_rootdn_access_denied_host: Failed to set rootDN plugin config: error ' + - e.message['desc']) - assert False - - try: - topology.standalone.simple_bind_s(DN_DM, PASSWORD) - except ldap.LDAPError as e: - log.fatal('test_rootdn_access_denied_host: Root DN bind failed unexpectedly failed: error ' + - e.message['desc']) - assert False - - # - # Cleanup - undo the changes we made so the next test has a clean slate - # - try: - topology.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_DELETE, 'rootdn-deny-host', None)]) - except ldap.LDAPError as e: - log.fatal('test_rootdn_access_denied_host: Failed to set rootDN plugin config: error ' + - e.message['desc']) - assert False - - try: - topology.standalone.simple_bind_s(DN_DM, PASSWORD) - except ldap.LDAPError as e: - log.fatal('test_rootdn_access_denied_host: Root DN bind failed unexpectedly failed: error ' + - e.message['desc']) - assert False - - log.info('test_rootdn_access_denied_host: PASSED') - - -def test_rootdn_access_allowed_ip(topology): - ''' - Test allowed ip feature - ''' - - log.info('Running test_rootdn_access_allowed_ip...') - - # - # Set allowed host to an unknown host - blocks the Root DN - # - try: - topology.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, 'rootdn-allow-ip', '255.255.255.255')]) - except ldap.LDAPError as e: - log.fatal('test_rootdn_access_allowed_ip: Failed to set allowed host: error ' + - e.message['desc']) - assert False - - # - # Bind as Root DN - should fail - # - try: - topology.standalone.simple_bind_s(DN_DM, PASSWORD) - succeeded = True - except ldap.LDAPError as e: - succeeded = False - - if succeeded: - log.fatal('test_rootdn_access_allowed_ip: Root DN was incorrectly able to bind') - assert False - - # - # Allow localhost - # - try: - topology.standalone.simple_bind_s(USER1_DN, PASSWORD) - except ldap.LDAPError as e: - log.fatal('test_rootdn_access_allowed_ip: : failed to bind as user1') - assert False - - try: - topology.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, 'rootdn-allow-ip', '127.0.0.1'), - (ldap.MOD_ADD, 'rootdn-allow-ip', '::1')]) - except ldap.LDAPError as e: - log.fatal('test_rootdn_access_allowed_ip: Failed to set allowed host: error ' + - e.message['desc']) - assert False - - try: - topology.standalone.simple_bind_s(DN_DM, PASSWORD) - except ldap.LDAPError as e: - log.fatal('test_rootdn_access_allowed_ip: Root DN bind failed unexpectedly failed: error ' + - e.message['desc']) - assert False - - # - # Cleanup - undo everything we did so the next test has a clean slate - # - try: - topology.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_DELETE, 'rootdn-allow-ip', None)]) - except ldap.LDAPError as e: - log.fatal('test_rootdn_access_allowed_ip: Failed to delete(rootdn-allow-ip): error ' + - e.message['desc']) - assert False - - try: - topology.standalone.simple_bind_s(DN_DM, PASSWORD) - except ldap.LDAPError as e: - log.fatal('test_rootdn_access_allowed_ip: Root DN bind failed unexpectedly failed: error ' + - e.message['desc']) - assert False - - log.info('test_rootdn_access_allowed_ip: PASSED') - - -def test_rootdn_access_allowed_host(topology): - ''' - Test allowed ip feature - ''' - - log.info('Running test_rootdn_access_allowed_host...') - - # - # Set allowed host to an unknown host - blocks the Root DN - # - try: - topology.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, 'rootdn-allow-host', 'i.dont.exist.com')]) - except ldap.LDAPError as e: - log.fatal('test_rootdn_access_allowed_host: Failed to set allowed host: error ' + - e.message['desc']) - assert False - - # - # Bind as Root DN - should fail - # - try: - topology.standalone.simple_bind_s(DN_DM, PASSWORD) - succeeded = True - except ldap.LDAPError as e: - succeeded = False - - if succeeded: - log.fatal('test_rootdn_access_allowed_host: Root DN was incorrectly able to bind') - assert False - - # - # Allow localhost - # - try: - topology.standalone.simple_bind_s(USER1_DN, PASSWORD) - except ldap.LDAPError as e: - log.fatal('test_rootdn_access_allowed_host: : failed to bind as user1') - assert False - - try: - topology.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_ADD, 'rootdn-allow-host', 'localhost.localdomain')]) - except ldap.LDAPError as e: - log.fatal('test_rootdn_access_allowed_host: Failed to set allowed host: error ' + - e.message['desc']) - assert False - - try: - topology.standalone.simple_bind_s(DN_DM, PASSWORD) - except ldap.LDAPError as e: - log.fatal('test_rootdn_access_allowed_host: Root DN bind failed unexpectedly failed: error ' + - e.message['desc']) - assert False - - # - # Cleanup - undo everything we did so the next test has a clean slate - # - try: - topology.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_DELETE, 'rootdn-allow-host', None)]) - except ldap.LDAPError as e: - log.fatal('test_rootdn_access_allowed_host: Failed to delete(rootdn-allow-host): error ' + - e.message['desc']) - assert False - - try: - topology.standalone.simple_bind_s(DN_DM, PASSWORD) - except ldap.LDAPError as e: - log.fatal('test_rootdn_access_allowed_host: Root DN bind failed unexpectedly failed: error ' + - e.message['desc']) - assert False - - log.info('test_rootdn_access_allowed_host: PASSED') - - -def test_rootdn_config_validate(topology): - ''' - Test configuration validation - - test single valued attributes: rootdn-open-time, - rootdn-close-time, - rootdn-days-allowed - - ''' - - log.info('Running test_rootdn_config_validate...') - - # - # Test rootdn-open-time - # - try: - topology.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, 'rootdn-open-time', '0000')]) - log.fatal('test_rootdn_config_validate: Incorrectly allowed to just add "rootdn-open-time" ') - assert False - except ldap.LDAPError: - pass - - try: - topology.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_ADD, 'rootdn-open-time', '0000'), - (ldap.MOD_ADD, 'rootdn-open-time', '0001')]) - log.fatal('test_rootdn_config_validate: Incorrectly allowed to add multiple "rootdn-open-time"') - assert False - except ldap.LDAPError: - pass - - try: - topology.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, 'rootdn-open-time', '-1'), - (ldap.MOD_REPLACE, 'rootdn-close-time', '0000')]) - log.fatal('test_rootdn_config_validate: Incorrectly allowed to add invalid "rootdn-open-time: -1"') - assert False - except ldap.LDAPError: - pass - - try: - topology.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, 'rootdn-open-time', '2400'), - (ldap.MOD_REPLACE, 'rootdn-close-time', '0000')]) - log.fatal('test_rootdn_config_validate: Incorrectly allowed to add invalid "rootdn-open-time: 2400"') - assert False - except ldap.LDAPError: - pass - - try: - topology.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, 'rootdn-open-time', 'aaaaa'), - (ldap.MOD_REPLACE, 'rootdn-close-time', '0000')]) - log.fatal('test_rootdn_config_validate: Incorrectly allowed to add invalid "rootdn-open-time: aaaaa"') - assert False - except ldap.LDAPError: - pass - - # - # Test rootdn-close-time - # - try: - topology.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, 'rootdn-close-time', '0000')]) - log.fatal('test_rootdn_config_validate: Incorrectly allowed to add just "rootdn-close-time"') - assert False - except ldap.LDAPError: - pass - - try: - topology.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_ADD, 'rootdn-close-time', '0000'), - (ldap.MOD_ADD, 'rootdn-close-time', '0001')]) - log.fatal('test_rootdn_config_validate: Incorrectly allowed to add multiple "rootdn-open-time"') - assert False - except ldap.LDAPError: - pass - - try: - topology.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, 'rootdn-open-time', '0000'), - (ldap.MOD_REPLACE, 'rootdn-close-time', '-1')]) - log.fatal('test_rootdn_config_validate: Incorrectly allowed to add invalid "rootdn-close-time: -1"') - assert False - except ldap.LDAPError: - pass - - try: - topology.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, 'rootdn-open-time', '0000'), - (ldap.MOD_REPLACE, 'rootdn-close-time', '2400')]) - log.fatal('test_rootdn_config_validate: Incorrectly allowed to add invalid "rootdn-close-time: 2400"') - assert False - except ldap.LDAPError: - pass - - try: - topology.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, 'rootdn-open-time', '0000'), - (ldap.MOD_REPLACE, 'rootdn-close-time', 'aaaaa')]) - log.fatal('test_rootdn_config_validate: Incorrectly allowed to add invalid "rootdn-close-time: aaaaa"') - assert False - except ldap.LDAPError: - pass - - # - # Test days allowed - # - try: - topology.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_ADD, 'rootdn-days-allowed', 'Mon'), - (ldap.MOD_ADD, 'rootdn-days-allowed', 'Tue')]) - log.fatal('test_rootdn_config_validate: Incorrectly allowed to add two "rootdn-days-allowed"') - assert False - except ldap.LDAPError: - pass - - try: - topology.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, 'rootdn-days-allowed', 'Mon1')]) - log.fatal('test_rootdn_config_validate: Incorrectly allowed to add invalid "rootdn-days-allowed: Mon1"') - assert False - except ldap.LDAPError: - pass - - try: - topology.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, 'rootdn-days-allowed', 'Tue, Mon1')]) - log.fatal('test_rootdn_config_validate: Incorrectly allowed to add invalid "rootdn-days-allowed: Tue, Mon1"') - assert False - except ldap.LDAPError: - pass - - try: - topology.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, 'rootdn-days-allowed', 'm111m')]) - log.fatal('test_rootdn_config_validate: Incorrectly allowed to add invalid "rootdn-days-allowed: 111"') - assert False - except ldap.LDAPError: - pass - - try: - topology.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, 'rootdn-days-allowed', 'Gur')]) - log.fatal('test_rootdn_config_validate: Incorrectly allowed to add invalid "rootdn-days-allowed: Gur"') - assert False - except ldap.LDAPError: - pass - - # - # Test allow ips - # - try: - topology.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, 'rootdn-allow-ip', '12.12.Z.12')]) - log.fatal('test_rootdn_config_validate: Incorrectly allowed to add invalid "rootdn-allow-ip: 12.12.Z.12"') - assert False - except ldap.LDAPError: - pass - - # - # Test deny ips - # - try: - topology.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, 'rootdn-deny-ip', '12.12.Z.12')]) - log.fatal('test_rootdn_config_validate: Incorrectly allowed to add invalid "rootdn-deny-ip: 12.12.Z.12"') - assert False - except ldap.LDAPError: - pass - - # - # Test allow hosts - # - try: - topology.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, 'rootdn-allow-host', 'host._.com')]) - log.fatal('test_rootdn_config_validate: Incorrectly allowed to add invalid "rootdn-allow-host: host._.com"') - assert False - except ldap.LDAPError: - pass - - # - # Test deny hosts - # - try: - topology.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, 'rootdn-deny-host', 'host.####.com')]) - log.fatal('test_rootdn_config_validate: Incorrectly allowed to add invalid "rootdn-deny-host: host.####.com"') - assert False - except ldap.LDAPError: - pass - - log.info('test_rootdn_config_validate: PASSED') - - -def test_rootdn_final(topology): - topology.standalone.delete() - log.info('Root DN Access Control test suite PASSED') - - -def run_isolated(): - global installation1_prefix - installation1_prefix = None - - topo = topology(True) - test_rootdn_init(topo) - test_rootdn_access_specific_time(topo) - test_rootdn_access_day_of_week(topo) - test_rootdn_access_allowed_ip(topo) - test_rootdn_access_denied_ip(topo) - test_rootdn_access_allowed_host(topo) - test_rootdn_access_denied_host(topo) - test_rootdn_config_validate(topo) - - test_rootdn_final(topo) - - -if __name__ == '__main__': - run_isolated() - diff --git a/dirsrvtests/suites/sasl/sasl_test.py b/dirsrvtests/suites/sasl/sasl_test.py deleted file mode 100644 index 2f5e18c..0000000 --- a/dirsrvtests/suites/sasl/sasl_test.py +++ /dev/null @@ -1,93 +0,0 @@ -# --- BEGIN COPYRIGHT BLOCK --- -# Copyright (C) 2015 Red Hat, Inc. -# All rights reserved. -# -# License: GPL (version 3 or any later version). -# See LICENSE for details. -# --- END COPYRIGHT BLOCK --- -# -import os -import sys -import time -import ldap -import logging -import pytest -from lib389 import DirSrv, Entry, tools, tasks -from lib389.tools import DirSrvTools -from lib389._constants import * -from lib389.properties import * -from lib389.tasks import * -from lib389.utils import * - -logging.getLogger(__name__).setLevel(logging.DEBUG) -log = logging.getLogger(__name__) - -installation1_prefix = None - - -class TopologyStandalone(object): - def __init__(self, standalone): - standalone.open() - self.standalone = standalone - - -@pytest.fixture(scope="module") -def topology(request): - global installation1_prefix - if installation1_prefix: - args_instance[SER_DEPLOYED_DIR] = installation1_prefix - - # Creating standalone instance ... - standalone = DirSrv(verbose=False) - args_instance[SER_HOST] = HOST_STANDALONE - args_instance[SER_PORT] = PORT_STANDALONE - args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE - args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX - args_standalone = args_instance.copy() - standalone.allocate(args_standalone) - instance_standalone = standalone.exists() - if instance_standalone: - standalone.delete() - standalone.create() - standalone.open() - - # Clear out the tmp dir - standalone.clearTmpDir(__file__) - - return TopologyStandalone(standalone) - - -def test_sasl_init(topology): - ''' - Write any test suite initialization here(if needed) - ''' - - return - - -def test_sasl_(topology): - ''' - Write a single test here... - ''' - - return - - -def test_sasl_final(topology): - topology.standalone.delete() - log.info('sasl test suite PASSED') - - -def run_isolated(): - global installation1_prefix - installation1_prefix = None - - topo = topology(True) - test_sasl_init(topo) - test_sasl_(topo) - test_sasl_final(topo) - - -if __name__ == '__main__': - run_isolated() - diff --git a/dirsrvtests/suites/schema/test_schema.py b/dirsrvtests/suites/schema/test_schema.py deleted file mode 100644 index f23391a..0000000 --- a/dirsrvtests/suites/schema/test_schema.py +++ /dev/null @@ -1,228 +0,0 @@ -# --- BEGIN COPYRIGHT BLOCK --- -# Copyright (C) 2015 Red Hat, Inc. -# All rights reserved. -# -# License: GPL (version 3 or any later version). -# See LICENSE for details. -# --- END COPYRIGHT BLOCK --- -# -''' -Created on Dec 18, 2013 - -@author: rmeggins -''' -import os -import sys -import time -import ldap -import six -from ldap.cidict import cidict -from ldap.schema import SubSchema -import logging -import pytest -from lib389 import DirSrv, Entry, tools -from lib389.tools import DirSrvTools -from lib389._constants import * -from lib389.properties import * - - -logging.getLogger(__name__).setLevel(logging.DEBUG) -log = logging.getLogger(__name__) - -installation_prefix = None - -attrclass = ldap.schema.models.AttributeType -occlass = ldap.schema.models.ObjectClass -syntax_len_supported = False - - -class TopologyStandalone(object): - def __init__(self, standalone): - standalone.open() - self.standalone = standalone - - -@pytest.fixture(scope="module") -def topology(request): - ''' - This fixture is used to create a DirSrv instance for the 'module'. - ''' - global installation_prefix - - if installation_prefix: - args_instance[SER_DEPLOYED_DIR] = installation_prefix - schemainst = DirSrv(verbose=False) - - # Args for the master instance - args_instance[SER_HOST] = HOST_STANDALONE - args_instance[SER_PORT] = PORT_STANDALONE - args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE - schemainst.allocate(args_instance) - - # Remove all the instance - if schemainst.exists(): - schemainst.delete() - - # Create the instance - schemainst.create() - schemainst.open() - - return TopologyStandalone(schemainst) - - -def ochasattr(subschema, oc, mustormay, attr, key): - """See if the oc and any of its parents and ancestors have the - given attr""" - rc = False - if not key in oc.__dict__: - dd = cidict() - for ii in oc.__dict__[mustormay]: - dd[ii] = ii - oc.__dict__[key] = dd - if attr in oc.__dict__[key]: - rc = True - else: - # look in parents - for noroid in oc.sup: - ocpar = subschema.get_obj(occlass, noroid) - assert(ocpar) - rc = ochasattr(subschema, ocpar, mustormay, attr, key) - if rc: - break - return rc - - -def ochasattrs(subschema, oc, mustormay, attrs): - key = mustormay + "dict" - ret = [] - for attr in attrs: - if not ochasattr(subschema, oc, mustormay, attr, key): - ret.append(attr) - return ret - - -def mycmp(v1, v2): - v1ary, v2ary = [v1], [v2] - if isinstance(v1, list) or isinstance(v1, tuple): - v1ary, v2ary = list(set([x.lower() for x in v1])), list(set([x.lower() for x in v2])) - if not len(v1ary) == len(v2ary): - return False - for v1, v2 in zip(v1ary, v2ary): - if isinstance(v1, six.string_types): - if not len(v1) == len(v2): - return False - if not v1 == v2: - return False - return True - - -def ocgetdiffs(ldschema, oc1, oc2): - fields = ['obsolete', 'names', 'desc', 'must', 'may', 'kind', 'sup'] - ret = '' - for field in fields: - v1, v2 = oc1.__dict__[field], oc2.__dict__[field] - if field == 'may' or field == 'must': - missing = ochasattrs(ldschema, oc1, field, oc2.__dict__[field]) - if missing: - ret = ret + '\t%s is missing %s\n' % (field, missing) - missing = ochasattrs(ldschema, oc2, field, oc1.__dict__[field]) - if missing: - ret = ret + '\t%s is missing %s\n' % (field, missing) - elif not mycmp(v1, v2): - ret = ret + '\t%s differs: [%s] vs. [%s]\n' % (field, oc1.__dict__[field], oc2.__dict__[field]) - return ret - - -def atgetparfield(subschema, at, field): - v = None - for nameoroid in at.sup: - atpar = subschema.get_obj(attrclass, nameoroid) - assert(atpar) - v = atpar.__dict__.get(field, atgetparfield(subschema, atpar, field)) - if v is not None: - break - return v - - -def atgetdiffs(ldschema, at1, at2): - fields = ['names', 'desc', 'obsolete', 'sup', 'equality', 'ordering', 'substr', 'syntax', - 'single_value', 'collective', 'no_user_mod', 'usage'] - if syntax_len_supported: - fields.append('syntax_len') - ret = '' - for field in fields: - v1 = at1.__dict__.get(field) or atgetparfield(ldschema, at1, field) - v2 = at2.__dict__.get(field) or atgetparfield(ldschema, at2, field) - if not mycmp(v1, v2): - ret = ret + '\t%s differs: [%s] vs. [%s]\n' % (field, at1.__dict__[field], at2.__dict__[field]) - return ret - - -def test_schema_comparewithfiles(topology): - '''Compare the schema from ldap cn=schema with the schema files''' - - log.info('Running test_schema_comparewithfiles...') - - retval = True - schemainst = topology.standalone - ldschema = schemainst.schema.get_subschema() - assert ldschema - for fn in schemainst.schema.list_files(): - fschema = schemainst.schema.file_to_subschema(fn) - if not fschema: - log.warn("Unable to parse %s as a schema file - skipping" % fn) - continue - assert fschema - for oid in fschema.listall(occlass): - se = fschema.get_obj(occlass, oid) - assert se - ldse = ldschema.get_obj(occlass, oid) - if not ldse: - log.error("objectclass in %s but not in %s: %s" % (fn, DN_SCHEMA, se)) - retval = False - continue - ret = ocgetdiffs(ldschema, ldse, se) - if ret: - log.error("name %s oid %s\n%s" % (se.names[0], oid, ret)) - retval = False - for oid in fschema.listall(attrclass): - se = fschema.get_obj(attrclass, oid) - assert se - ldse = ldschema.get_obj(attrclass, oid) - if not ldse: - log.error("attributetype in %s but not in %s: %s" % (fn, DN_SCHEMA, se)) - retval = False - continue - ret = atgetdiffs(ldschema, ldse, se) - if ret: - log.error("name %s oid %s\n%s" % (se.names[0], oid, ret)) - retval = False - assert retval - - log.info('test_schema_comparewithfiles: PASSED') - - -def test_schema_final(topology): - topology.standalone.delete() - - -def run_isolated(): - ''' - run_isolated is used to run these test cases independently of a test scheduler (xunit, py.test..) - To run isolated without py.test, you need to - - edit this file and comment '@pytest.fixture' line before 'topology' function. - - set the installation prefix - - run this program - ''' - global installation_prefix - installation_prefix = os.environ.get('PREFIX') - - topo = topology(True) - - test_schema_comparewithfiles(topo) - - test_schema_final(topo) - -if __name__ == '__main__': - run_isolated() - diff --git a/dirsrvtests/suites/schema_reload_plugin/schema_reload_test.py b/dirsrvtests/suites/schema_reload_plugin/schema_reload_test.py deleted file mode 100644 index c516745..0000000 --- a/dirsrvtests/suites/schema_reload_plugin/schema_reload_test.py +++ /dev/null @@ -1,93 +0,0 @@ -# --- BEGIN COPYRIGHT BLOCK --- -# Copyright (C) 2015 Red Hat, Inc. -# All rights reserved. -# -# License: GPL (version 3 or any later version). -# See LICENSE for details. -# --- END COPYRIGHT BLOCK --- -# -import os -import sys -import time -import ldap -import logging -import pytest -from lib389 import DirSrv, Entry, tools, tasks -from lib389.tools import DirSrvTools -from lib389._constants import * -from lib389.properties import * -from lib389.tasks import * -from lib389.utils import * - -logging.getLogger(__name__).setLevel(logging.DEBUG) -log = logging.getLogger(__name__) - -installation1_prefix = None - - -class TopologyStandalone(object): - def __init__(self, standalone): - standalone.open() - self.standalone = standalone - - -@pytest.fixture(scope="module") -def topology(request): - global installation1_prefix - if installation1_prefix: - args_instance[SER_DEPLOYED_DIR] = installation1_prefix - - # Creating standalone instance ... - standalone = DirSrv(verbose=False) - args_instance[SER_HOST] = HOST_STANDALONE - args_instance[SER_PORT] = PORT_STANDALONE - args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE - args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX - args_standalone = args_instance.copy() - standalone.allocate(args_standalone) - instance_standalone = standalone.exists() - if instance_standalone: - standalone.delete() - standalone.create() - standalone.open() - - # Clear out the tmp dir - standalone.clearTmpDir(__file__) - - return TopologyStandalone(standalone) - - -def test_schema_reload_init(topology): - ''' - Write any test suite initialization here(if needed) - ''' - - return - - -def test_schema_reload_(topology): - ''' - Write a single test here... - ''' - - return - - -def test_schema_reload_final(topology): - topology.standalone.delete() - log.info('schema_reload test suite PASSED') - - -def run_isolated(): - global installation1_prefix - installation1_prefix = None - - topo = topology(True) - test_schema_reload_init(topo) - test_schema_reload_(topo) - test_schema_reload_final(topo) - - -if __name__ == '__main__': - run_isolated() - diff --git a/dirsrvtests/suites/snmp/snmp_test.py b/dirsrvtests/suites/snmp/snmp_test.py deleted file mode 100644 index a442efc..0000000 --- a/dirsrvtests/suites/snmp/snmp_test.py +++ /dev/null @@ -1,93 +0,0 @@ -# --- BEGIN COPYRIGHT BLOCK --- -# Copyright (C) 2015 Red Hat, Inc. -# All rights reserved. -# -# License: GPL (version 3 or any later version). -# See LICENSE for details. -# --- END COPYRIGHT BLOCK --- -# -import os -import sys -import time -import ldap -import logging -import pytest -from lib389 import DirSrv, Entry, tools, tasks -from lib389.tools import DirSrvTools -from lib389._constants import * -from lib389.properties import * -from lib389.tasks import * -from lib389.utils import * - -logging.getLogger(__name__).setLevel(logging.DEBUG) -log = logging.getLogger(__name__) - -installation1_prefix = None - - -class TopologyStandalone(object): - def __init__(self, standalone): - standalone.open() - self.standalone = standalone - - -@pytest.fixture(scope="module") -def topology(request): - global installation1_prefix - if installation1_prefix: - args_instance[SER_DEPLOYED_DIR] = installation1_prefix - - # Creating standalone instance ... - standalone = DirSrv(verbose=False) - args_instance[SER_HOST] = HOST_STANDALONE - args_instance[SER_PORT] = PORT_STANDALONE - args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE - args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX - args_standalone = args_instance.copy() - standalone.allocate(args_standalone) - instance_standalone = standalone.exists() - if instance_standalone: - standalone.delete() - standalone.create() - standalone.open() - - # Clear out the tmp dir - standalone.clearTmpDir(__file__) - - return TopologyStandalone(standalone) - - -def test_snmp_init(topology): - ''' - Write any test suite initialization here(if needed) - ''' - - return - - -def test_snmp_(topology): - ''' - Write a single test here... - ''' - - return - - -def test_snmp_final(topology): - topology.standalone.delete() - log.info('snmp test suite PASSED') - - -def run_isolated(): - global installation1_prefix - installation1_prefix = None - - topo = topology(True) - test_snmp_init(topo) - test_snmp_(topo) - test_snmp_final(topo) - - -if __name__ == '__main__': - run_isolated() - diff --git a/dirsrvtests/suites/ssl/ssl_test.py b/dirsrvtests/suites/ssl/ssl_test.py deleted file mode 100644 index d0b36b5..0000000 --- a/dirsrvtests/suites/ssl/ssl_test.py +++ /dev/null @@ -1,93 +0,0 @@ -# --- BEGIN COPYRIGHT BLOCK --- -# Copyright (C) 2015 Red Hat, Inc. -# All rights reserved. -# -# License: GPL (version 3 or any later version). -# See LICENSE for details. -# --- END COPYRIGHT BLOCK --- -# -import os -import sys -import time -import ldap -import logging -import pytest -from lib389 import DirSrv, Entry, tools, tasks -from lib389.tools import DirSrvTools -from lib389._constants import * -from lib389.properties import * -from lib389.tasks import * -from lib389.utils import * - -logging.getLogger(__name__).setLevel(logging.DEBUG) -log = logging.getLogger(__name__) - -installation1_prefix = None - - -class TopologyStandalone(object): - def __init__(self, standalone): - standalone.open() - self.standalone = standalone - - -@pytest.fixture(scope="module") -def topology(request): - global installation1_prefix - if installation1_prefix: - args_instance[SER_DEPLOYED_DIR] = installation1_prefix - - # Creating standalone instance ... - standalone = DirSrv(verbose=False) - args_instance[SER_HOST] = HOST_STANDALONE - args_instance[SER_PORT] = PORT_STANDALONE - args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE - args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX - args_standalone = args_instance.copy() - standalone.allocate(args_standalone) - instance_standalone = standalone.exists() - if instance_standalone: - standalone.delete() - standalone.create() - standalone.open() - - # Clear out the tmp dir - standalone.clearTmpDir(__file__) - - return TopologyStandalone(standalone) - - -def test_ssl_init(topology): - ''' - Write any test suite initialization here(if needed) - ''' - - return - - -def test_ssl_(topology): - ''' - Write a single test here... - ''' - - return - - -def test_ssl_final(topology): - topology.standalone.delete() - log.info('ssl test suite PASSED') - - -def run_isolated(): - global installation1_prefix - installation1_prefix = None - - topo = topology(True) - test_ssl_init(topo) - test_ssl_(topo) - test_ssl_final(topo) - - -if __name__ == '__main__': - run_isolated() - diff --git a/dirsrvtests/suites/syntax_plugin/syntax_test.py b/dirsrvtests/suites/syntax_plugin/syntax_test.py deleted file mode 100644 index 8f801ca..0000000 --- a/dirsrvtests/suites/syntax_plugin/syntax_test.py +++ /dev/null @@ -1,93 +0,0 @@ -# --- BEGIN COPYRIGHT BLOCK --- -# Copyright (C) 2015 Red Hat, Inc. -# All rights reserved. -# -# License: GPL (version 3 or any later version). -# See LICENSE for details. -# --- END COPYRIGHT BLOCK --- -# -import os -import sys -import time -import ldap -import logging -import pytest -from lib389 import DirSrv, Entry, tools, tasks -from lib389.tools import DirSrvTools -from lib389._constants import * -from lib389.properties import * -from lib389.tasks import * -from lib389.utils import * - -logging.getLogger(__name__).setLevel(logging.DEBUG) -log = logging.getLogger(__name__) - -installation1_prefix = None - - -class TopologyStandalone(object): - def __init__(self, standalone): - standalone.open() - self.standalone = standalone - - -@pytest.fixture(scope="module") -def topology(request): - global installation1_prefix - if installation1_prefix: - args_instance[SER_DEPLOYED_DIR] = installation1_prefix - - # Creating standalone instance ... - standalone = DirSrv(verbose=False) - args_instance[SER_HOST] = HOST_STANDALONE - args_instance[SER_PORT] = PORT_STANDALONE - args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE - args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX - args_standalone = args_instance.copy() - standalone.allocate(args_standalone) - instance_standalone = standalone.exists() - if instance_standalone: - standalone.delete() - standalone.create() - standalone.open() - - # Clear out the tmp dir - standalone.clearTmpDir(__file__) - - return TopologyStandalone(standalone) - - -def test_syntax_init(topology): - ''' - Write any test suite initialization here(if needed) - ''' - - return - - -def test_syntax_(topology): - ''' - Write a single test here... - ''' - - return - - -def test_syntax_final(topology): - topology.standalone.delete() - log.info('syntax test suite PASSED') - - -def run_isolated(): - global installation1_prefix - installation1_prefix = None - - topo = topology(True) - test_syntax_init(topo) - test_syntax_(topo) - test_syntax_final(topo) - - -if __name__ == '__main__': - run_isolated() - diff --git a/dirsrvtests/suites/usn_plugin/usn_test.py b/dirsrvtests/suites/usn_plugin/usn_test.py deleted file mode 100644 index bd57835..0000000 --- a/dirsrvtests/suites/usn_plugin/usn_test.py +++ /dev/null @@ -1,93 +0,0 @@ -# --- BEGIN COPYRIGHT BLOCK --- -# Copyright (C) 2015 Red Hat, Inc. -# All rights reserved. -# -# License: GPL (version 3 or any later version). -# See LICENSE for details. -# --- END COPYRIGHT BLOCK --- -# -import os -import sys -import time -import ldap -import logging -import pytest -from lib389 import DirSrv, Entry, tools, tasks -from lib389.tools import DirSrvTools -from lib389._constants import * -from lib389.properties import * -from lib389.tasks import * -from lib389.utils import * - -logging.getLogger(__name__).setLevel(logging.DEBUG) -log = logging.getLogger(__name__) - -installation1_prefix = None - - -class TopologyStandalone(object): - def __init__(self, standalone): - standalone.open() - self.standalone = standalone - - -@pytest.fixture(scope="module") -def topology(request): - global installation1_prefix - if installation1_prefix: - args_instance[SER_DEPLOYED_DIR] = installation1_prefix - - # Creating standalone instance ... - standalone = DirSrv(verbose=False) - args_instance[SER_HOST] = HOST_STANDALONE - args_instance[SER_PORT] = PORT_STANDALONE - args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE - args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX - args_standalone = args_instance.copy() - standalone.allocate(args_standalone) - instance_standalone = standalone.exists() - if instance_standalone: - standalone.delete() - standalone.create() - standalone.open() - - # Clear out the tmp dir - standalone.clearTmpDir(__file__) - - return TopologyStandalone(standalone) - - -def test_usn_init(topology): - ''' - Write any test suite initialization here(if needed) - ''' - - return - - -def test_usn_(topology): - ''' - Write a single test here... - ''' - - return - - -def test_usn_final(topology): - topology.standalone.delete() - log.info('usn test suite PASSED') - - -def run_isolated(): - global installation1_prefix - installation1_prefix = None - - topo = topology(True) - test_usn_init(topo) - test_usn_(topo) - test_usn_final(topo) - - -if __name__ == '__main__': - run_isolated() - diff --git a/dirsrvtests/suites/views_plugin/views_test.py b/dirsrvtests/suites/views_plugin/views_test.py deleted file mode 100644 index 28afcc8..0000000 --- a/dirsrvtests/suites/views_plugin/views_test.py +++ /dev/null @@ -1,93 +0,0 @@ -# --- BEGIN COPYRIGHT BLOCK --- -# Copyright (C) 2015 Red Hat, Inc. -# All rights reserved. -# -# License: GPL (version 3 or any later version). -# See LICENSE for details. -# --- END COPYRIGHT BLOCK --- -# -import os -import sys -import time -import ldap -import logging -import pytest -from lib389 import DirSrv, Entry, tools, tasks -from lib389.tools import DirSrvTools -from lib389._constants import * -from lib389.properties import * -from lib389.tasks import * -from lib389.utils import * - -logging.getLogger(__name__).setLevel(logging.DEBUG) -log = logging.getLogger(__name__) - -installation1_prefix = None - - -class TopologyStandalone(object): - def __init__(self, standalone): - standalone.open() - self.standalone = standalone - - -@pytest.fixture(scope="module") -def topology(request): - global installation1_prefix - if installation1_prefix: - args_instance[SER_DEPLOYED_DIR] = installation1_prefix - - # Creating standalone instance ... - standalone = DirSrv(verbose=False) - args_instance[SER_HOST] = HOST_STANDALONE - args_instance[SER_PORT] = PORT_STANDALONE - args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE - args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX - args_standalone = args_instance.copy() - standalone.allocate(args_standalone) - instance_standalone = standalone.exists() - if instance_standalone: - standalone.delete() - standalone.create() - standalone.open() - - # Clear out the tmp dir - standalone.clearTmpDir(__file__) - - return TopologyStandalone(standalone) - - -def test_views_init(topology): - ''' - Write any test suite initialization here(if needed) - ''' - - return - - -def test_views_(topology): - ''' - Write a single test here... - ''' - - return - - -def test_views_final(topology): - topology.standalone.delete() - log.info('views test suite PASSED') - - -def run_isolated(): - global installation1_prefix - installation1_prefix = None - - topo = topology(True) - test_views_init(topo) - test_views_(topo) - test_views_final(topo) - - -if __name__ == '__main__': - run_isolated() - diff --git a/dirsrvtests/suites/vlv/vlv_test.py b/dirsrvtests/suites/vlv/vlv_test.py deleted file mode 100644 index ee8b86e..0000000 --- a/dirsrvtests/suites/vlv/vlv_test.py +++ /dev/null @@ -1,93 +0,0 @@ -# --- BEGIN COPYRIGHT BLOCK --- -# Copyright (C) 2015 Red Hat, Inc. -# All rights reserved. -# -# License: GPL (version 3 or any later version). -# See LICENSE for details. -# --- END COPYRIGHT BLOCK --- -# -import os -import sys -import time -import ldap -import logging -import pytest -from lib389 import DirSrv, Entry, tools, tasks -from lib389.tools import DirSrvTools -from lib389._constants import * -from lib389.properties import * -from lib389.tasks import * -from lib389.utils import * - -logging.getLogger(__name__).setLevel(logging.DEBUG) -log = logging.getLogger(__name__) - -installation1_prefix = None - - -class TopologyStandalone(object): - def __init__(self, standalone): - standalone.open() - self.standalone = standalone - - -@pytest.fixture(scope="module") -def topology(request): - global installation1_prefix - if installation1_prefix: - args_instance[SER_DEPLOYED_DIR] = installation1_prefix - - # Creating standalone instance ... - standalone = DirSrv(verbose=False) - args_instance[SER_HOST] = HOST_STANDALONE - args_instance[SER_PORT] = PORT_STANDALONE - args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE - args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX - args_standalone = args_instance.copy() - standalone.allocate(args_standalone) - instance_standalone = standalone.exists() - if instance_standalone: - standalone.delete() - standalone.create() - standalone.open() - - # Clear out the tmp dir - standalone.clearTmpDir(__file__) - - return TopologyStandalone(standalone) - - -def test_vlv_init(topology): - ''' - Write any test suite initialization here(if needed) - ''' - - return - - -def test_vlv_(topology): - ''' - Write a single test here... - ''' - - return - - -def test_vlv_final(topology): - topology.standalone.delete() - log.info('vlv test suite PASSED') - - -def run_isolated(): - global installation1_prefix - installation1_prefix = None - - topo = topology(True) - test_vlv_init(topo) - test_vlv_(topo) - test_vlv_final(topo) - - -if __name__ == '__main__': - run_isolated() - diff --git a/dirsrvtests/suites/whoami_plugin/whoami_test.py b/dirsrvtests/suites/whoami_plugin/whoami_test.py deleted file mode 100644 index af6f19f..0000000 --- a/dirsrvtests/suites/whoami_plugin/whoami_test.py +++ /dev/null @@ -1,93 +0,0 @@ -# --- BEGIN COPYRIGHT BLOCK --- -# Copyright (C) 2015 Red Hat, Inc. -# All rights reserved. -# -# License: GPL (version 3 or any later version). -# See LICENSE for details. -# --- END COPYRIGHT BLOCK --- -# -import os -import sys -import time -import ldap -import logging -import pytest -from lib389 import DirSrv, Entry, tools, tasks -from lib389.tools import DirSrvTools -from lib389._constants import * -from lib389.properties import * -from lib389.tasks import * -from lib389.utils import * - -logging.getLogger(__name__).setLevel(logging.DEBUG) -log = logging.getLogger(__name__) - -installation1_prefix = None - - -class TopologyStandalone(object): - def __init__(self, standalone): - standalone.open() - self.standalone = standalone - - -@pytest.fixture(scope="module") -def topology(request): - global installation1_prefix - if installation1_prefix: - args_instance[SER_DEPLOYED_DIR] = installation1_prefix - - # Creating standalone instance ... - standalone = DirSrv(verbose=False) - args_instance[SER_HOST] = HOST_STANDALONE - args_instance[SER_PORT] = PORT_STANDALONE - args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE - args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX - args_standalone = args_instance.copy() - standalone.allocate(args_standalone) - instance_standalone = standalone.exists() - if instance_standalone: - standalone.delete() - standalone.create() - standalone.open() - - # Clear out the tmp dir - standalone.clearTmpDir(__file__) - - return TopologyStandalone(standalone) - - -def test_whoami_init(topology): - ''' - Write any test suite initialization here(if needed) - ''' - - return - - -def test_whoami_(topology): - ''' - Write a single test here... - ''' - - return - - -def test_whoami_final(topology): - topology.standalone.delete() - log.info('whoami test suite PASSED') - - -def run_isolated(): - global installation1_prefix - installation1_prefix = None - - topo = topology(True) - test_whoami_init(topo) - test_whoami_(topo) - test_whoami_final(topo) - - -if __name__ == '__main__': - run_isolated() - diff --git a/dirsrvtests/tests/data/README b/dirsrvtests/tests/data/README new file mode 100644 index 0000000..4261f92 --- /dev/null +++ b/dirsrvtests/tests/data/README @@ -0,0 +1,11 @@ +DATA DIRECTORY README + +This directory is used for storing LDIF files used by the dirsrvtests scripts. +This directory can be retrieved via getDir() from the DirSrv class. + +Example: + + data_dir_path = topology.standalone.getDir(__file__, DATA_DIR) + + ldif_file = data_dir_path + "ticket44444/1000entries.ldif" + diff --git a/dirsrvtests/tests/data/basic/dse.ldif.broken b/dirsrvtests/tests/data/basic/dse.ldif.broken new file mode 100644 index 0000000..489b443 --- /dev/null +++ b/dirsrvtests/tests/data/basic/dse.ldif.broken @@ -0,0 +1,95 @@ +dn: +objectClass: top +aci: (targetattr != "aci")(version 3.0; aci "rootdse anon read access"; allow( + read,search,compare) userdn="ldap:///anyone";) +creatorsName: cn=server,cn=plugins,cn=config +modifiersName: cn=server,cn=plugins,cn=config +createTimestamp: 20150204165610Z +modifyTimestamp: 20150204165610Z + +dn: cn=config +cn: config +objectClass: top +objectClass: extensibleObject +objectClass: nsslapdConfig +nsslapd-schemadir: /etc/dirsrv/slapd-localhost/schema +nsslapd-lockdir: /var/lock/dirsrv/slapd-localhost +nsslapd-tmpdir: /tmp +nsslapd-certdir: /etc/dirsrv/slapd-localhost +nsslapd-ldifdir: /var/lib/dirsrv/slapd-localhost/ldif +nsslapd-bakdir: /var/lib/dirsrv/slapd-localhost/bak +nsslapd-rundir: /var/run/dirsrv +nsslapd-instancedir: /usr/lib64/dirsrv/slapd-localhost +nsslapd-accesslog-logging-enabled: on +nsslapd-accesslog-maxlogsperdir: 10 +nsslapd-accesslog-mode: 600 +nsslapd-accesslog-maxlogsize: 100 +nsslapd-accesslog-logrotationtime: 1 +nsslapd-accesslog-logrotationtimeunit: day +nsslapd-accesslog-logrotationsync-enabled: off +nsslapd-accesslog-logrotationsynchour: 0 +nsslapd-accesslog-logrotationsyncmin: 0 +nsslapd-accesslog: /var/log/dirsrv/slapd-localhost/access +nsslapd-enquote-sup-oc: off +nsslapd-localhost: localhost.localdomain +nsslapd-schemacheck: on +nsslapd-syntaxcheck: on +nsslapd-dn-validate-strict: off +nsslapd-rewrite-rfc1274: off +nsslapd-return-exact-case: on +nsslapd-ssl-check-hostname: on +nsslapd-validate-cert: warn +nsslapd-allow-unauthenticated-binds: off +nsslapd-require-secure-binds: off +nsslapd-allow-anonymous####-access: on +nsslapd-localssf: 71 +nsslapd-minssf: 0 +nsslapd-port: 389 +nsslapd-localuser: nobody +nsslapd-errorlog-logging-enabled: on +nsslapd-errorlog-mode: 600 +nsslapd-errorlog-maxlogsperdir: 2 +nsslapd-errorlog-maxlogsize: 100 +nsslapd-errorlog-logrotationtime: 1 +nsslapd-errorlog-logrotationtimeunit: week +nsslapd-errorlog-logrotationsync-enabled: off +nsslapd-errorlog-logrotationsynchour: 0 +nsslapd-errorlog-logrotationsyncmin: 0 +nsslapd-errorlog: /var/log/dirsrv/slapd-localhost/errors +nsslapd-auditlog: /var/log/dirsrv/slapd-localhost/audit +nsslapd-auditlog-mode: 600 +nsslapd-auditlog-maxlogsize: 100 +nsslapd-auditlog-logrotationtime: 1 +nsslapd-auditlog-logrotationtimeunit: day +nsslapd-rootdn: cn=dm +nsslapd-maxdescriptors: 1024 +nsslapd-max-filter-nest-level: 40 +nsslapd-ndn-cache-enabled: on +nsslapd-sasl-mapping-fallback: off +nsslapd-dynamic-plugins: off +nsslapd-allow-hashed-passwords: off +nsslapd-ldapifilepath: /var/run/slapd-localhost.socket +nsslapd-ldapilisten: off +nsslapd-ldapiautobind: off +nsslapd-ldapimaprootdn: cn=dm +nsslapd-ldapimaptoentries: off +nsslapd-ldapiuidnumbertype: uidNumber +nsslapd-ldapigidnumbertype: gidNumber +nsslapd-ldapientrysearchbase: dc=example,dc=com +nsslapd-defaultnamingcontext: dc=example,dc=com +aci: (targetattr="*")(version 3.0; acl "Configuration Administrators Group"; a + llow (all) groupdn="ldap:///cn=Configuration Administrators,ou=Groups,ou=Topo + logyManagement,o=NetscapeRoot";) +aci: (targetattr="*")(version 3.0; acl "Configuration Administrator"; allow (a + ll) userdn="ldap:///uid=admin,ou=Administrators,ou=TopologyManagement,o=Netsc + apeRoot";) +aci: (targetattr = "*")(version 3.0; acl "SIE Group"; allow (all) groupdn = "l + dap:///cn=slapd-localhost,cn=389 Directory Server,cn=Server Group,cn=localhos + t.localdomain,ou=example.com,o=NetscapeRoot";) +modifiersName: cn=dm +modifyTimestamp: 20150205195242Z +nsslapd-auditlog-logging-enabled: on +nsslapd-auditlog-logging-hide-unhashed-pw: off +nsslapd-rootpw: {SSHA}AQH9bTYZW4kfkfyHg1k+lG88H2dFOuwakzFEpw== +numSubordinates: 10 + diff --git a/dirsrvtests/tests/data/ticket47953/ticket47953.ldif b/dirsrvtests/tests/data/ticket47953/ticket47953.ldif new file mode 100644 index 0000000..e59977e --- /dev/null +++ b/dirsrvtests/tests/data/ticket47953/ticket47953.ldif @@ -0,0 +1,27 @@ +dn: dc=example,dc=com +objectClass: top +objectClass: domain +dc: example +aci: (targetattr!="userPassword")(version 3.0; acl "Enable anonymous access"; + allow (read, search, compare) userdn="ldap:///anyone";) +aci: (targetattr="carLicense || description || displayName || facsimileTelepho + neNumber || homePhone || homePostalAddress || initials || jpegPhoto || labele + dURI || mail || mobile || pager || photo || postOfficeBox || postalAddress || + postalCode || preferredDeliveryMethod || preferredLanguage || registeredAddr + ess || roomNumber || secretary || seeAlso || st || street || telephoneNumber + || telexNumber || title || userCertificate || userPassword || userSMIMECertif + icate || x500UniqueIdentifier")(version 3.0; acl "Enable self write for commo + n attributes"; allow (write) userdn="ldap:///self";) +aci: (targetattr ="fffff")(version 3.0;acl "Directory Administrators Group";al + low (all) (groupdn = "ldap:///cn=Directory Administrators, dc=example,dc=com" + );) +aci: (targetattr="*")(version 3.0; acl "Configuration Administrators Group"; a + llow (all) groupdn="ldap:///cn=Configuration Administrators,ou=Groups,ou=Topo + logyManagement,o=NetscapeRoot";) +aci: (targetattr="*")(version 3.0; acl "Configuration Administrator"; allow (a + ll) userdn="ldap:///uid=admin,ou=Administrators,ou=TopologyManagement,o=Netsc + apeRoot";) +aci: (targetattr = "*")(version 3.0; acl "TEST ACI"; allow (writ + e) groupdn = "ldap:///cn=slapd-localhost,cn=389 Directory Server,cn=Server Gr + oup,cn=localhost.localdomain,ou=example.com,o=NetscapeRoot";) + diff --git a/dirsrvtests/tests/data/ticket47988/schema_ipa3.3.tar.gz b/dirsrvtests/tests/data/ticket47988/schema_ipa3.3.tar.gz new file mode 100644 index 0000000..2b309a0 Binary files /dev/null and b/dirsrvtests/tests/data/ticket47988/schema_ipa3.3.tar.gz differ diff --git a/dirsrvtests/tests/data/ticket47988/schema_ipa4.1.tar.gz b/dirsrvtests/tests/data/ticket47988/schema_ipa4.1.tar.gz new file mode 100644 index 0000000..84de0e9 Binary files /dev/null and b/dirsrvtests/tests/data/ticket47988/schema_ipa4.1.tar.gz differ diff --git a/dirsrvtests/tests/data/ticket48212/example1k_posix.ldif b/dirsrvtests/tests/data/ticket48212/example1k_posix.ldif new file mode 100644 index 0000000..50000f2 --- /dev/null +++ b/dirsrvtests/tests/data/ticket48212/example1k_posix.ldif @@ -0,0 +1,17017 @@ +dn: dc=example,dc=com +objectClass: top +objectClass: domain +dc: example +aci: (target=ldap:///dc=example,dc=com)(targetattr=*)(version 3.0; acl "acl1"; allow(write) userdn = "ldap:///self";) +aci: (target=ldap:///dc=example,dc=com)(targetattr=*)(version 3.0; acl "acl2"; allow(read, search, compare) userdn = "ldap:///anyone";) + +dn: ou=People,dc=example,dc=com +objectClass: top +objectClass: organizationalunit +ou: People + +dn: ou=Groups,dc=example,dc=com +objectClass: top +objectClass: organizationalunit +ou: Groups + +dn: cn=user0,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user0 +sn: user0 +uid: uid0 +givenname: givenname0 +description: description0 +userPassword: password0 +mail: uid0 +uidnumber: 0 +gidnumber: 0 +homeDirectory: /home/uid0 + +dn: cn=user1,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user1 +sn: user1 +uid: uid1 +givenname: givenname1 +description: description1 +userPassword: password1 +mail: uid1 +uidnumber: 1 +gidnumber: 1 +homeDirectory: /home/uid1 + +dn: cn=user2,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user2 +sn: user2 +uid: uid2 +givenname: givenname2 +description: description2 +userPassword: password2 +mail: uid2 +uidnumber: 2 +gidnumber: 2 +homeDirectory: /home/uid2 + +dn: cn=user3,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user3 +sn: user3 +uid: uid3 +givenname: givenname3 +description: description3 +userPassword: password3 +mail: uid3 +uidnumber: 3 +gidnumber: 3 +homeDirectory: /home/uid3 + +dn: cn=user4,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user4 +sn: user4 +uid: uid4 +givenname: givenname4 +description: description4 +userPassword: password4 +mail: uid4 +uidnumber: 4 +gidnumber: 4 +homeDirectory: /home/uid4 + +dn: cn=user5,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user5 +sn: user5 +uid: uid5 +givenname: givenname5 +description: description5 +userPassword: password5 +mail: uid5 +uidnumber: 5 +gidnumber: 5 +homeDirectory: /home/uid5 + +dn: cn=user6,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user6 +sn: user6 +uid: uid6 +givenname: givenname6 +description: description6 +userPassword: password6 +mail: uid6 +uidnumber: 6 +gidnumber: 6 +homeDirectory: /home/uid6 + +dn: cn=user7,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user7 +sn: user7 +uid: uid7 +givenname: givenname7 +description: description7 +userPassword: password7 +mail: uid7 +uidnumber: 7 +gidnumber: 7 +homeDirectory: /home/uid7 + +dn: cn=user8,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user8 +sn: user8 +uid: uid8 +givenname: givenname8 +description: description8 +userPassword: password8 +mail: uid8 +uidnumber: 8 +gidnumber: 8 +homeDirectory: /home/uid8 + +dn: cn=user9,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user9 +sn: user9 +uid: uid9 +givenname: givenname9 +description: description9 +userPassword: password9 +mail: uid9 +uidnumber: 9 +gidnumber: 9 +homeDirectory: /home/uid9 + +dn: cn=user10,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user10 +sn: user10 +uid: uid10 +givenname: givenname10 +description: description10 +userPassword: password10 +mail: uid10 +uidnumber: 10 +gidnumber: 10 +homeDirectory: /home/uid10 + +dn: cn=user11,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user11 +sn: user11 +uid: uid11 +givenname: givenname11 +description: description11 +userPassword: password11 +mail: uid11 +uidnumber: 11 +gidnumber: 11 +homeDirectory: /home/uid11 + +dn: cn=user12,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user12 +sn: user12 +uid: uid12 +givenname: givenname12 +description: description12 +userPassword: password12 +mail: uid12 +uidnumber: 12 +gidnumber: 12 +homeDirectory: /home/uid12 + +dn: cn=user13,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user13 +sn: user13 +uid: uid13 +givenname: givenname13 +description: description13 +userPassword: password13 +mail: uid13 +uidnumber: 13 +gidnumber: 13 +homeDirectory: /home/uid13 + +dn: cn=user14,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user14 +sn: user14 +uid: uid14 +givenname: givenname14 +description: description14 +userPassword: password14 +mail: uid14 +uidnumber: 14 +gidnumber: 14 +homeDirectory: /home/uid14 + +dn: cn=user15,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user15 +sn: user15 +uid: uid15 +givenname: givenname15 +description: description15 +userPassword: password15 +mail: uid15 +uidnumber: 15 +gidnumber: 15 +homeDirectory: /home/uid15 + +dn: cn=user16,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user16 +sn: user16 +uid: uid16 +givenname: givenname16 +description: description16 +userPassword: password16 +mail: uid16 +uidnumber: 16 +gidnumber: 16 +homeDirectory: /home/uid16 + +dn: cn=user17,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user17 +sn: user17 +uid: uid17 +givenname: givenname17 +description: description17 +userPassword: password17 +mail: uid17 +uidnumber: 17 +gidnumber: 17 +homeDirectory: /home/uid17 + +dn: cn=user18,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user18 +sn: user18 +uid: uid18 +givenname: givenname18 +description: description18 +userPassword: password18 +mail: uid18 +uidnumber: 18 +gidnumber: 18 +homeDirectory: /home/uid18 + +dn: cn=user19,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user19 +sn: user19 +uid: uid19 +givenname: givenname19 +description: description19 +userPassword: password19 +mail: uid19 +uidnumber: 19 +gidnumber: 19 +homeDirectory: /home/uid19 + +dn: cn=user20,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user20 +sn: user20 +uid: uid20 +givenname: givenname20 +description: description20 +userPassword: password20 +mail: uid20 +uidnumber: 20 +gidnumber: 20 +homeDirectory: /home/uid20 + +dn: cn=user21,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user21 +sn: user21 +uid: uid21 +givenname: givenname21 +description: description21 +userPassword: password21 +mail: uid21 +uidnumber: 21 +gidnumber: 21 +homeDirectory: /home/uid21 + +dn: cn=user22,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user22 +sn: user22 +uid: uid22 +givenname: givenname22 +description: description22 +userPassword: password22 +mail: uid22 +uidnumber: 22 +gidnumber: 22 +homeDirectory: /home/uid22 + +dn: cn=user23,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user23 +sn: user23 +uid: uid23 +givenname: givenname23 +description: description23 +userPassword: password23 +mail: uid23 +uidnumber: 23 +gidnumber: 23 +homeDirectory: /home/uid23 + +dn: cn=user24,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user24 +sn: user24 +uid: uid24 +givenname: givenname24 +description: description24 +userPassword: password24 +mail: uid24 +uidnumber: 24 +gidnumber: 24 +homeDirectory: /home/uid24 + +dn: cn=user25,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user25 +sn: user25 +uid: uid25 +givenname: givenname25 +description: description25 +userPassword: password25 +mail: uid25 +uidnumber: 25 +gidnumber: 25 +homeDirectory: /home/uid25 + +dn: cn=user26,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user26 +sn: user26 +uid: uid26 +givenname: givenname26 +description: description26 +userPassword: password26 +mail: uid26 +uidnumber: 26 +gidnumber: 26 +homeDirectory: /home/uid26 + +dn: cn=user27,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user27 +sn: user27 +uid: uid27 +givenname: givenname27 +description: description27 +userPassword: password27 +mail: uid27 +uidnumber: 27 +gidnumber: 27 +homeDirectory: /home/uid27 + +dn: cn=user28,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user28 +sn: user28 +uid: uid28 +givenname: givenname28 +description: description28 +userPassword: password28 +mail: uid28 +uidnumber: 28 +gidnumber: 28 +homeDirectory: /home/uid28 + +dn: cn=user29,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user29 +sn: user29 +uid: uid29 +givenname: givenname29 +description: description29 +userPassword: password29 +mail: uid29 +uidnumber: 29 +gidnumber: 29 +homeDirectory: /home/uid29 + +dn: cn=user30,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user30 +sn: user30 +uid: uid30 +givenname: givenname30 +description: description30 +userPassword: password30 +mail: uid30 +uidnumber: 30 +gidnumber: 30 +homeDirectory: /home/uid30 + +dn: cn=user31,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user31 +sn: user31 +uid: uid31 +givenname: givenname31 +description: description31 +userPassword: password31 +mail: uid31 +uidnumber: 31 +gidnumber: 31 +homeDirectory: /home/uid31 + +dn: cn=user32,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user32 +sn: user32 +uid: uid32 +givenname: givenname32 +description: description32 +userPassword: password32 +mail: uid32 +uidnumber: 32 +gidnumber: 32 +homeDirectory: /home/uid32 + +dn: cn=user33,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user33 +sn: user33 +uid: uid33 +givenname: givenname33 +description: description33 +userPassword: password33 +mail: uid33 +uidnumber: 33 +gidnumber: 33 +homeDirectory: /home/uid33 + +dn: cn=user34,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user34 +sn: user34 +uid: uid34 +givenname: givenname34 +description: description34 +userPassword: password34 +mail: uid34 +uidnumber: 34 +gidnumber: 34 +homeDirectory: /home/uid34 + +dn: cn=user35,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user35 +sn: user35 +uid: uid35 +givenname: givenname35 +description: description35 +userPassword: password35 +mail: uid35 +uidnumber: 35 +gidnumber: 35 +homeDirectory: /home/uid35 + +dn: cn=user36,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user36 +sn: user36 +uid: uid36 +givenname: givenname36 +description: description36 +userPassword: password36 +mail: uid36 +uidnumber: 36 +gidnumber: 36 +homeDirectory: /home/uid36 + +dn: cn=user37,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user37 +sn: user37 +uid: uid37 +givenname: givenname37 +description: description37 +userPassword: password37 +mail: uid37 +uidnumber: 37 +gidnumber: 37 +homeDirectory: /home/uid37 + +dn: cn=user38,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user38 +sn: user38 +uid: uid38 +givenname: givenname38 +description: description38 +userPassword: password38 +mail: uid38 +uidnumber: 38 +gidnumber: 38 +homeDirectory: /home/uid38 + +dn: cn=user39,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user39 +sn: user39 +uid: uid39 +givenname: givenname39 +description: description39 +userPassword: password39 +mail: uid39 +uidnumber: 39 +gidnumber: 39 +homeDirectory: /home/uid39 + +dn: cn=user40,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user40 +sn: user40 +uid: uid40 +givenname: givenname40 +description: description40 +userPassword: password40 +mail: uid40 +uidnumber: 40 +gidnumber: 40 +homeDirectory: /home/uid40 + +dn: cn=user41,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user41 +sn: user41 +uid: uid41 +givenname: givenname41 +description: description41 +userPassword: password41 +mail: uid41 +uidnumber: 41 +gidnumber: 41 +homeDirectory: /home/uid41 + +dn: cn=user42,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user42 +sn: user42 +uid: uid42 +givenname: givenname42 +description: description42 +userPassword: password42 +mail: uid42 +uidnumber: 42 +gidnumber: 42 +homeDirectory: /home/uid42 + +dn: cn=user43,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user43 +sn: user43 +uid: uid43 +givenname: givenname43 +description: description43 +userPassword: password43 +mail: uid43 +uidnumber: 43 +gidnumber: 43 +homeDirectory: /home/uid43 + +dn: cn=user44,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user44 +sn: user44 +uid: uid44 +givenname: givenname44 +description: description44 +userPassword: password44 +mail: uid44 +uidnumber: 44 +gidnumber: 44 +homeDirectory: /home/uid44 + +dn: cn=user45,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user45 +sn: user45 +uid: uid45 +givenname: givenname45 +description: description45 +userPassword: password45 +mail: uid45 +uidnumber: 45 +gidnumber: 45 +homeDirectory: /home/uid45 + +dn: cn=user46,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user46 +sn: user46 +uid: uid46 +givenname: givenname46 +description: description46 +userPassword: password46 +mail: uid46 +uidnumber: 46 +gidnumber: 46 +homeDirectory: /home/uid46 + +dn: cn=user47,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user47 +sn: user47 +uid: uid47 +givenname: givenname47 +description: description47 +userPassword: password47 +mail: uid47 +uidnumber: 47 +gidnumber: 47 +homeDirectory: /home/uid47 + +dn: cn=user48,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user48 +sn: user48 +uid: uid48 +givenname: givenname48 +description: description48 +userPassword: password48 +mail: uid48 +uidnumber: 48 +gidnumber: 48 +homeDirectory: /home/uid48 + +dn: cn=user49,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user49 +sn: user49 +uid: uid49 +givenname: givenname49 +description: description49 +userPassword: password49 +mail: uid49 +uidnumber: 49 +gidnumber: 49 +homeDirectory: /home/uid49 + +dn: cn=user50,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user50 +sn: user50 +uid: uid50 +givenname: givenname50 +description: description50 +userPassword: password50 +mail: uid50 +uidnumber: 50 +gidnumber: 50 +homeDirectory: /home/uid50 + +dn: cn=user51,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user51 +sn: user51 +uid: uid51 +givenname: givenname51 +description: description51 +userPassword: password51 +mail: uid51 +uidnumber: 51 +gidnumber: 51 +homeDirectory: /home/uid51 + +dn: cn=user52,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user52 +sn: user52 +uid: uid52 +givenname: givenname52 +description: description52 +userPassword: password52 +mail: uid52 +uidnumber: 52 +gidnumber: 52 +homeDirectory: /home/uid52 + +dn: cn=user53,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user53 +sn: user53 +uid: uid53 +givenname: givenname53 +description: description53 +userPassword: password53 +mail: uid53 +uidnumber: 53 +gidnumber: 53 +homeDirectory: /home/uid53 + +dn: cn=user54,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user54 +sn: user54 +uid: uid54 +givenname: givenname54 +description: description54 +userPassword: password54 +mail: uid54 +uidnumber: 54 +gidnumber: 54 +homeDirectory: /home/uid54 + +dn: cn=user55,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user55 +sn: user55 +uid: uid55 +givenname: givenname55 +description: description55 +userPassword: password55 +mail: uid55 +uidnumber: 55 +gidnumber: 55 +homeDirectory: /home/uid55 + +dn: cn=user56,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user56 +sn: user56 +uid: uid56 +givenname: givenname56 +description: description56 +userPassword: password56 +mail: uid56 +uidnumber: 56 +gidnumber: 56 +homeDirectory: /home/uid56 + +dn: cn=user57,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user57 +sn: user57 +uid: uid57 +givenname: givenname57 +description: description57 +userPassword: password57 +mail: uid57 +uidnumber: 57 +gidnumber: 57 +homeDirectory: /home/uid57 + +dn: cn=user58,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user58 +sn: user58 +uid: uid58 +givenname: givenname58 +description: description58 +userPassword: password58 +mail: uid58 +uidnumber: 58 +gidnumber: 58 +homeDirectory: /home/uid58 + +dn: cn=user59,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user59 +sn: user59 +uid: uid59 +givenname: givenname59 +description: description59 +userPassword: password59 +mail: uid59 +uidnumber: 59 +gidnumber: 59 +homeDirectory: /home/uid59 + +dn: cn=user60,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user60 +sn: user60 +uid: uid60 +givenname: givenname60 +description: description60 +userPassword: password60 +mail: uid60 +uidnumber: 60 +gidnumber: 60 +homeDirectory: /home/uid60 + +dn: cn=user61,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user61 +sn: user61 +uid: uid61 +givenname: givenname61 +description: description61 +userPassword: password61 +mail: uid61 +uidnumber: 61 +gidnumber: 61 +homeDirectory: /home/uid61 + +dn: cn=user62,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user62 +sn: user62 +uid: uid62 +givenname: givenname62 +description: description62 +userPassword: password62 +mail: uid62 +uidnumber: 62 +gidnumber: 62 +homeDirectory: /home/uid62 + +dn: cn=user63,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user63 +sn: user63 +uid: uid63 +givenname: givenname63 +description: description63 +userPassword: password63 +mail: uid63 +uidnumber: 63 +gidnumber: 63 +homeDirectory: /home/uid63 + +dn: cn=user64,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user64 +sn: user64 +uid: uid64 +givenname: givenname64 +description: description64 +userPassword: password64 +mail: uid64 +uidnumber: 64 +gidnumber: 64 +homeDirectory: /home/uid64 + +dn: cn=user65,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user65 +sn: user65 +uid: uid65 +givenname: givenname65 +description: description65 +userPassword: password65 +mail: uid65 +uidnumber: 65 +gidnumber: 65 +homeDirectory: /home/uid65 + +dn: cn=user66,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user66 +sn: user66 +uid: uid66 +givenname: givenname66 +description: description66 +userPassword: password66 +mail: uid66 +uidnumber: 66 +gidnumber: 66 +homeDirectory: /home/uid66 + +dn: cn=user67,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user67 +sn: user67 +uid: uid67 +givenname: givenname67 +description: description67 +userPassword: password67 +mail: uid67 +uidnumber: 67 +gidnumber: 67 +homeDirectory: /home/uid67 + +dn: cn=user68,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user68 +sn: user68 +uid: uid68 +givenname: givenname68 +description: description68 +userPassword: password68 +mail: uid68 +uidnumber: 68 +gidnumber: 68 +homeDirectory: /home/uid68 + +dn: cn=user69,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user69 +sn: user69 +uid: uid69 +givenname: givenname69 +description: description69 +userPassword: password69 +mail: uid69 +uidnumber: 69 +gidnumber: 69 +homeDirectory: /home/uid69 + +dn: cn=user70,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user70 +sn: user70 +uid: uid70 +givenname: givenname70 +description: description70 +userPassword: password70 +mail: uid70 +uidnumber: 70 +gidnumber: 70 +homeDirectory: /home/uid70 + +dn: cn=user71,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user71 +sn: user71 +uid: uid71 +givenname: givenname71 +description: description71 +userPassword: password71 +mail: uid71 +uidnumber: 71 +gidnumber: 71 +homeDirectory: /home/uid71 + +dn: cn=user72,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user72 +sn: user72 +uid: uid72 +givenname: givenname72 +description: description72 +userPassword: password72 +mail: uid72 +uidnumber: 72 +gidnumber: 72 +homeDirectory: /home/uid72 + +dn: cn=user73,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user73 +sn: user73 +uid: uid73 +givenname: givenname73 +description: description73 +userPassword: password73 +mail: uid73 +uidnumber: 73 +gidnumber: 73 +homeDirectory: /home/uid73 + +dn: cn=user74,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user74 +sn: user74 +uid: uid74 +givenname: givenname74 +description: description74 +userPassword: password74 +mail: uid74 +uidnumber: 74 +gidnumber: 74 +homeDirectory: /home/uid74 + +dn: cn=user75,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user75 +sn: user75 +uid: uid75 +givenname: givenname75 +description: description75 +userPassword: password75 +mail: uid75 +uidnumber: 75 +gidnumber: 75 +homeDirectory: /home/uid75 + +dn: cn=user76,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user76 +sn: user76 +uid: uid76 +givenname: givenname76 +description: description76 +userPassword: password76 +mail: uid76 +uidnumber: 76 +gidnumber: 76 +homeDirectory: /home/uid76 + +dn: cn=user77,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user77 +sn: user77 +uid: uid77 +givenname: givenname77 +description: description77 +userPassword: password77 +mail: uid77 +uidnumber: 77 +gidnumber: 77 +homeDirectory: /home/uid77 + +dn: cn=user78,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user78 +sn: user78 +uid: uid78 +givenname: givenname78 +description: description78 +userPassword: password78 +mail: uid78 +uidnumber: 78 +gidnumber: 78 +homeDirectory: /home/uid78 + +dn: cn=user79,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user79 +sn: user79 +uid: uid79 +givenname: givenname79 +description: description79 +userPassword: password79 +mail: uid79 +uidnumber: 79 +gidnumber: 79 +homeDirectory: /home/uid79 + +dn: cn=user80,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user80 +sn: user80 +uid: uid80 +givenname: givenname80 +description: description80 +userPassword: password80 +mail: uid80 +uidnumber: 80 +gidnumber: 80 +homeDirectory: /home/uid80 + +dn: cn=user81,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user81 +sn: user81 +uid: uid81 +givenname: givenname81 +description: description81 +userPassword: password81 +mail: uid81 +uidnumber: 81 +gidnumber: 81 +homeDirectory: /home/uid81 + +dn: cn=user82,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user82 +sn: user82 +uid: uid82 +givenname: givenname82 +description: description82 +userPassword: password82 +mail: uid82 +uidnumber: 82 +gidnumber: 82 +homeDirectory: /home/uid82 + +dn: cn=user83,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user83 +sn: user83 +uid: uid83 +givenname: givenname83 +description: description83 +userPassword: password83 +mail: uid83 +uidnumber: 83 +gidnumber: 83 +homeDirectory: /home/uid83 + +dn: cn=user84,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user84 +sn: user84 +uid: uid84 +givenname: givenname84 +description: description84 +userPassword: password84 +mail: uid84 +uidnumber: 84 +gidnumber: 84 +homeDirectory: /home/uid84 + +dn: cn=user85,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user85 +sn: user85 +uid: uid85 +givenname: givenname85 +description: description85 +userPassword: password85 +mail: uid85 +uidnumber: 85 +gidnumber: 85 +homeDirectory: /home/uid85 + +dn: cn=user86,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user86 +sn: user86 +uid: uid86 +givenname: givenname86 +description: description86 +userPassword: password86 +mail: uid86 +uidnumber: 86 +gidnumber: 86 +homeDirectory: /home/uid86 + +dn: cn=user87,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user87 +sn: user87 +uid: uid87 +givenname: givenname87 +description: description87 +userPassword: password87 +mail: uid87 +uidnumber: 87 +gidnumber: 87 +homeDirectory: /home/uid87 + +dn: cn=user88,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user88 +sn: user88 +uid: uid88 +givenname: givenname88 +description: description88 +userPassword: password88 +mail: uid88 +uidnumber: 88 +gidnumber: 88 +homeDirectory: /home/uid88 + +dn: cn=user89,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user89 +sn: user89 +uid: uid89 +givenname: givenname89 +description: description89 +userPassword: password89 +mail: uid89 +uidnumber: 89 +gidnumber: 89 +homeDirectory: /home/uid89 + +dn: cn=user90,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user90 +sn: user90 +uid: uid90 +givenname: givenname90 +description: description90 +userPassword: password90 +mail: uid90 +uidnumber: 90 +gidnumber: 90 +homeDirectory: /home/uid90 + +dn: cn=user91,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user91 +sn: user91 +uid: uid91 +givenname: givenname91 +description: description91 +userPassword: password91 +mail: uid91 +uidnumber: 91 +gidnumber: 91 +homeDirectory: /home/uid91 + +dn: cn=user92,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user92 +sn: user92 +uid: uid92 +givenname: givenname92 +description: description92 +userPassword: password92 +mail: uid92 +uidnumber: 92 +gidnumber: 92 +homeDirectory: /home/uid92 + +dn: cn=user93,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user93 +sn: user93 +uid: uid93 +givenname: givenname93 +description: description93 +userPassword: password93 +mail: uid93 +uidnumber: 93 +gidnumber: 93 +homeDirectory: /home/uid93 + +dn: cn=user94,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user94 +sn: user94 +uid: uid94 +givenname: givenname94 +description: description94 +userPassword: password94 +mail: uid94 +uidnumber: 94 +gidnumber: 94 +homeDirectory: /home/uid94 + +dn: cn=user95,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user95 +sn: user95 +uid: uid95 +givenname: givenname95 +description: description95 +userPassword: password95 +mail: uid95 +uidnumber: 95 +gidnumber: 95 +homeDirectory: /home/uid95 + +dn: cn=user96,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user96 +sn: user96 +uid: uid96 +givenname: givenname96 +description: description96 +userPassword: password96 +mail: uid96 +uidnumber: 96 +gidnumber: 96 +homeDirectory: /home/uid96 + +dn: cn=user97,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user97 +sn: user97 +uid: uid97 +givenname: givenname97 +description: description97 +userPassword: password97 +mail: uid97 +uidnumber: 97 +gidnumber: 97 +homeDirectory: /home/uid97 + +dn: cn=user98,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user98 +sn: user98 +uid: uid98 +givenname: givenname98 +description: description98 +userPassword: password98 +mail: uid98 +uidnumber: 98 +gidnumber: 98 +homeDirectory: /home/uid98 + +dn: cn=user99,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user99 +sn: user99 +uid: uid99 +givenname: givenname99 +description: description99 +userPassword: password99 +mail: uid99 +uidnumber: 99 +gidnumber: 99 +homeDirectory: /home/uid99 + +dn: cn=user100,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user100 +sn: user100 +uid: uid100 +givenname: givenname100 +description: description100 +userPassword: password100 +mail: uid100 +uidnumber: 100 +gidnumber: 100 +homeDirectory: /home/uid100 + +dn: cn=user101,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user101 +sn: user101 +uid: uid101 +givenname: givenname101 +description: description101 +userPassword: password101 +mail: uid101 +uidnumber: 101 +gidnumber: 101 +homeDirectory: /home/uid101 + +dn: cn=user102,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user102 +sn: user102 +uid: uid102 +givenname: givenname102 +description: description102 +userPassword: password102 +mail: uid102 +uidnumber: 102 +gidnumber: 102 +homeDirectory: /home/uid102 + +dn: cn=user103,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user103 +sn: user103 +uid: uid103 +givenname: givenname103 +description: description103 +userPassword: password103 +mail: uid103 +uidnumber: 103 +gidnumber: 103 +homeDirectory: /home/uid103 + +dn: cn=user104,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user104 +sn: user104 +uid: uid104 +givenname: givenname104 +description: description104 +userPassword: password104 +mail: uid104 +uidnumber: 104 +gidnumber: 104 +homeDirectory: /home/uid104 + +dn: cn=user105,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user105 +sn: user105 +uid: uid105 +givenname: givenname105 +description: description105 +userPassword: password105 +mail: uid105 +uidnumber: 105 +gidnumber: 105 +homeDirectory: /home/uid105 + +dn: cn=user106,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user106 +sn: user106 +uid: uid106 +givenname: givenname106 +description: description106 +userPassword: password106 +mail: uid106 +uidnumber: 106 +gidnumber: 106 +homeDirectory: /home/uid106 + +dn: cn=user107,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user107 +sn: user107 +uid: uid107 +givenname: givenname107 +description: description107 +userPassword: password107 +mail: uid107 +uidnumber: 107 +gidnumber: 107 +homeDirectory: /home/uid107 + +dn: cn=user108,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user108 +sn: user108 +uid: uid108 +givenname: givenname108 +description: description108 +userPassword: password108 +mail: uid108 +uidnumber: 108 +gidnumber: 108 +homeDirectory: /home/uid108 + +dn: cn=user109,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user109 +sn: user109 +uid: uid109 +givenname: givenname109 +description: description109 +userPassword: password109 +mail: uid109 +uidnumber: 109 +gidnumber: 109 +homeDirectory: /home/uid109 + +dn: cn=user110,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user110 +sn: user110 +uid: uid110 +givenname: givenname110 +description: description110 +userPassword: password110 +mail: uid110 +uidnumber: 110 +gidnumber: 110 +homeDirectory: /home/uid110 + +dn: cn=user111,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user111 +sn: user111 +uid: uid111 +givenname: givenname111 +description: description111 +userPassword: password111 +mail: uid111 +uidnumber: 111 +gidnumber: 111 +homeDirectory: /home/uid111 + +dn: cn=user112,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user112 +sn: user112 +uid: uid112 +givenname: givenname112 +description: description112 +userPassword: password112 +mail: uid112 +uidnumber: 112 +gidnumber: 112 +homeDirectory: /home/uid112 + +dn: cn=user113,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user113 +sn: user113 +uid: uid113 +givenname: givenname113 +description: description113 +userPassword: password113 +mail: uid113 +uidnumber: 113 +gidnumber: 113 +homeDirectory: /home/uid113 + +dn: cn=user114,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user114 +sn: user114 +uid: uid114 +givenname: givenname114 +description: description114 +userPassword: password114 +mail: uid114 +uidnumber: 114 +gidnumber: 114 +homeDirectory: /home/uid114 + +dn: cn=user115,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user115 +sn: user115 +uid: uid115 +givenname: givenname115 +description: description115 +userPassword: password115 +mail: uid115 +uidnumber: 115 +gidnumber: 115 +homeDirectory: /home/uid115 + +dn: cn=user116,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user116 +sn: user116 +uid: uid116 +givenname: givenname116 +description: description116 +userPassword: password116 +mail: uid116 +uidnumber: 116 +gidnumber: 116 +homeDirectory: /home/uid116 + +dn: cn=user117,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user117 +sn: user117 +uid: uid117 +givenname: givenname117 +description: description117 +userPassword: password117 +mail: uid117 +uidnumber: 117 +gidnumber: 117 +homeDirectory: /home/uid117 + +dn: cn=user118,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user118 +sn: user118 +uid: uid118 +givenname: givenname118 +description: description118 +userPassword: password118 +mail: uid118 +uidnumber: 118 +gidnumber: 118 +homeDirectory: /home/uid118 + +dn: cn=user119,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user119 +sn: user119 +uid: uid119 +givenname: givenname119 +description: description119 +userPassword: password119 +mail: uid119 +uidnumber: 119 +gidnumber: 119 +homeDirectory: /home/uid119 + +dn: cn=user120,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user120 +sn: user120 +uid: uid120 +givenname: givenname120 +description: description120 +userPassword: password120 +mail: uid120 +uidnumber: 120 +gidnumber: 120 +homeDirectory: /home/uid120 + +dn: cn=user121,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user121 +sn: user121 +uid: uid121 +givenname: givenname121 +description: description121 +userPassword: password121 +mail: uid121 +uidnumber: 121 +gidnumber: 121 +homeDirectory: /home/uid121 + +dn: cn=user122,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user122 +sn: user122 +uid: uid122 +givenname: givenname122 +description: description122 +userPassword: password122 +mail: uid122 +uidnumber: 122 +gidnumber: 122 +homeDirectory: /home/uid122 + +dn: cn=user123,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user123 +sn: user123 +uid: uid123 +givenname: givenname123 +description: description123 +userPassword: password123 +mail: uid123 +uidnumber: 123 +gidnumber: 123 +homeDirectory: /home/uid123 + +dn: cn=user124,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user124 +sn: user124 +uid: uid124 +givenname: givenname124 +description: description124 +userPassword: password124 +mail: uid124 +uidnumber: 124 +gidnumber: 124 +homeDirectory: /home/uid124 + +dn: cn=user125,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user125 +sn: user125 +uid: uid125 +givenname: givenname125 +description: description125 +userPassword: password125 +mail: uid125 +uidnumber: 125 +gidnumber: 125 +homeDirectory: /home/uid125 + +dn: cn=user126,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user126 +sn: user126 +uid: uid126 +givenname: givenname126 +description: description126 +userPassword: password126 +mail: uid126 +uidnumber: 126 +gidnumber: 126 +homeDirectory: /home/uid126 + +dn: cn=user127,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user127 +sn: user127 +uid: uid127 +givenname: givenname127 +description: description127 +userPassword: password127 +mail: uid127 +uidnumber: 127 +gidnumber: 127 +homeDirectory: /home/uid127 + +dn: cn=user128,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user128 +sn: user128 +uid: uid128 +givenname: givenname128 +description: description128 +userPassword: password128 +mail: uid128 +uidnumber: 128 +gidnumber: 128 +homeDirectory: /home/uid128 + +dn: cn=user129,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user129 +sn: user129 +uid: uid129 +givenname: givenname129 +description: description129 +userPassword: password129 +mail: uid129 +uidnumber: 129 +gidnumber: 129 +homeDirectory: /home/uid129 + +dn: cn=user130,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user130 +sn: user130 +uid: uid130 +givenname: givenname130 +description: description130 +userPassword: password130 +mail: uid130 +uidnumber: 130 +gidnumber: 130 +homeDirectory: /home/uid130 + +dn: cn=user131,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user131 +sn: user131 +uid: uid131 +givenname: givenname131 +description: description131 +userPassword: password131 +mail: uid131 +uidnumber: 131 +gidnumber: 131 +homeDirectory: /home/uid131 + +dn: cn=user132,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user132 +sn: user132 +uid: uid132 +givenname: givenname132 +description: description132 +userPassword: password132 +mail: uid132 +uidnumber: 132 +gidnumber: 132 +homeDirectory: /home/uid132 + +dn: cn=user133,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user133 +sn: user133 +uid: uid133 +givenname: givenname133 +description: description133 +userPassword: password133 +mail: uid133 +uidnumber: 133 +gidnumber: 133 +homeDirectory: /home/uid133 + +dn: cn=user134,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user134 +sn: user134 +uid: uid134 +givenname: givenname134 +description: description134 +userPassword: password134 +mail: uid134 +uidnumber: 134 +gidnumber: 134 +homeDirectory: /home/uid134 + +dn: cn=user135,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user135 +sn: user135 +uid: uid135 +givenname: givenname135 +description: description135 +userPassword: password135 +mail: uid135 +uidnumber: 135 +gidnumber: 135 +homeDirectory: /home/uid135 + +dn: cn=user136,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user136 +sn: user136 +uid: uid136 +givenname: givenname136 +description: description136 +userPassword: password136 +mail: uid136 +uidnumber: 136 +gidnumber: 136 +homeDirectory: /home/uid136 + +dn: cn=user137,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user137 +sn: user137 +uid: uid137 +givenname: givenname137 +description: description137 +userPassword: password137 +mail: uid137 +uidnumber: 137 +gidnumber: 137 +homeDirectory: /home/uid137 + +dn: cn=user138,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user138 +sn: user138 +uid: uid138 +givenname: givenname138 +description: description138 +userPassword: password138 +mail: uid138 +uidnumber: 138 +gidnumber: 138 +homeDirectory: /home/uid138 + +dn: cn=user139,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user139 +sn: user139 +uid: uid139 +givenname: givenname139 +description: description139 +userPassword: password139 +mail: uid139 +uidnumber: 139 +gidnumber: 139 +homeDirectory: /home/uid139 + +dn: cn=user140,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user140 +sn: user140 +uid: uid140 +givenname: givenname140 +description: description140 +userPassword: password140 +mail: uid140 +uidnumber: 140 +gidnumber: 140 +homeDirectory: /home/uid140 + +dn: cn=user141,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user141 +sn: user141 +uid: uid141 +givenname: givenname141 +description: description141 +userPassword: password141 +mail: uid141 +uidnumber: 141 +gidnumber: 141 +homeDirectory: /home/uid141 + +dn: cn=user142,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user142 +sn: user142 +uid: uid142 +givenname: givenname142 +description: description142 +userPassword: password142 +mail: uid142 +uidnumber: 142 +gidnumber: 142 +homeDirectory: /home/uid142 + +dn: cn=user143,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user143 +sn: user143 +uid: uid143 +givenname: givenname143 +description: description143 +userPassword: password143 +mail: uid143 +uidnumber: 143 +gidnumber: 143 +homeDirectory: /home/uid143 + +dn: cn=user144,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user144 +sn: user144 +uid: uid144 +givenname: givenname144 +description: description144 +userPassword: password144 +mail: uid144 +uidnumber: 144 +gidnumber: 144 +homeDirectory: /home/uid144 + +dn: cn=user145,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user145 +sn: user145 +uid: uid145 +givenname: givenname145 +description: description145 +userPassword: password145 +mail: uid145 +uidnumber: 145 +gidnumber: 145 +homeDirectory: /home/uid145 + +dn: cn=user146,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user146 +sn: user146 +uid: uid146 +givenname: givenname146 +description: description146 +userPassword: password146 +mail: uid146 +uidnumber: 146 +gidnumber: 146 +homeDirectory: /home/uid146 + +dn: cn=user147,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user147 +sn: user147 +uid: uid147 +givenname: givenname147 +description: description147 +userPassword: password147 +mail: uid147 +uidnumber: 147 +gidnumber: 147 +homeDirectory: /home/uid147 + +dn: cn=user148,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user148 +sn: user148 +uid: uid148 +givenname: givenname148 +description: description148 +userPassword: password148 +mail: uid148 +uidnumber: 148 +gidnumber: 148 +homeDirectory: /home/uid148 + +dn: cn=user149,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user149 +sn: user149 +uid: uid149 +givenname: givenname149 +description: description149 +userPassword: password149 +mail: uid149 +uidnumber: 149 +gidnumber: 149 +homeDirectory: /home/uid149 + +dn: cn=user150,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user150 +sn: user150 +uid: uid150 +givenname: givenname150 +description: description150 +userPassword: password150 +mail: uid150 +uidnumber: 150 +gidnumber: 150 +homeDirectory: /home/uid150 + +dn: cn=user151,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user151 +sn: user151 +uid: uid151 +givenname: givenname151 +description: description151 +userPassword: password151 +mail: uid151 +uidnumber: 151 +gidnumber: 151 +homeDirectory: /home/uid151 + +dn: cn=user152,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user152 +sn: user152 +uid: uid152 +givenname: givenname152 +description: description152 +userPassword: password152 +mail: uid152 +uidnumber: 152 +gidnumber: 152 +homeDirectory: /home/uid152 + +dn: cn=user153,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user153 +sn: user153 +uid: uid153 +givenname: givenname153 +description: description153 +userPassword: password153 +mail: uid153 +uidnumber: 153 +gidnumber: 153 +homeDirectory: /home/uid153 + +dn: cn=user154,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user154 +sn: user154 +uid: uid154 +givenname: givenname154 +description: description154 +userPassword: password154 +mail: uid154 +uidnumber: 154 +gidnumber: 154 +homeDirectory: /home/uid154 + +dn: cn=user155,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user155 +sn: user155 +uid: uid155 +givenname: givenname155 +description: description155 +userPassword: password155 +mail: uid155 +uidnumber: 155 +gidnumber: 155 +homeDirectory: /home/uid155 + +dn: cn=user156,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user156 +sn: user156 +uid: uid156 +givenname: givenname156 +description: description156 +userPassword: password156 +mail: uid156 +uidnumber: 156 +gidnumber: 156 +homeDirectory: /home/uid156 + +dn: cn=user157,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user157 +sn: user157 +uid: uid157 +givenname: givenname157 +description: description157 +userPassword: password157 +mail: uid157 +uidnumber: 157 +gidnumber: 157 +homeDirectory: /home/uid157 + +dn: cn=user158,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user158 +sn: user158 +uid: uid158 +givenname: givenname158 +description: description158 +userPassword: password158 +mail: uid158 +uidnumber: 158 +gidnumber: 158 +homeDirectory: /home/uid158 + +dn: cn=user159,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user159 +sn: user159 +uid: uid159 +givenname: givenname159 +description: description159 +userPassword: password159 +mail: uid159 +uidnumber: 159 +gidnumber: 159 +homeDirectory: /home/uid159 + +dn: cn=user160,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user160 +sn: user160 +uid: uid160 +givenname: givenname160 +description: description160 +userPassword: password160 +mail: uid160 +uidnumber: 160 +gidnumber: 160 +homeDirectory: /home/uid160 + +dn: cn=user161,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user161 +sn: user161 +uid: uid161 +givenname: givenname161 +description: description161 +userPassword: password161 +mail: uid161 +uidnumber: 161 +gidnumber: 161 +homeDirectory: /home/uid161 + +dn: cn=user162,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user162 +sn: user162 +uid: uid162 +givenname: givenname162 +description: description162 +userPassword: password162 +mail: uid162 +uidnumber: 162 +gidnumber: 162 +homeDirectory: /home/uid162 + +dn: cn=user163,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user163 +sn: user163 +uid: uid163 +givenname: givenname163 +description: description163 +userPassword: password163 +mail: uid163 +uidnumber: 163 +gidnumber: 163 +homeDirectory: /home/uid163 + +dn: cn=user164,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user164 +sn: user164 +uid: uid164 +givenname: givenname164 +description: description164 +userPassword: password164 +mail: uid164 +uidnumber: 164 +gidnumber: 164 +homeDirectory: /home/uid164 + +dn: cn=user165,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user165 +sn: user165 +uid: uid165 +givenname: givenname165 +description: description165 +userPassword: password165 +mail: uid165 +uidnumber: 165 +gidnumber: 165 +homeDirectory: /home/uid165 + +dn: cn=user166,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user166 +sn: user166 +uid: uid166 +givenname: givenname166 +description: description166 +userPassword: password166 +mail: uid166 +uidnumber: 166 +gidnumber: 166 +homeDirectory: /home/uid166 + +dn: cn=user167,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user167 +sn: user167 +uid: uid167 +givenname: givenname167 +description: description167 +userPassword: password167 +mail: uid167 +uidnumber: 167 +gidnumber: 167 +homeDirectory: /home/uid167 + +dn: cn=user168,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user168 +sn: user168 +uid: uid168 +givenname: givenname168 +description: description168 +userPassword: password168 +mail: uid168 +uidnumber: 168 +gidnumber: 168 +homeDirectory: /home/uid168 + +dn: cn=user169,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user169 +sn: user169 +uid: uid169 +givenname: givenname169 +description: description169 +userPassword: password169 +mail: uid169 +uidnumber: 169 +gidnumber: 169 +homeDirectory: /home/uid169 + +dn: cn=user170,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user170 +sn: user170 +uid: uid170 +givenname: givenname170 +description: description170 +userPassword: password170 +mail: uid170 +uidnumber: 170 +gidnumber: 170 +homeDirectory: /home/uid170 + +dn: cn=user171,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user171 +sn: user171 +uid: uid171 +givenname: givenname171 +description: description171 +userPassword: password171 +mail: uid171 +uidnumber: 171 +gidnumber: 171 +homeDirectory: /home/uid171 + +dn: cn=user172,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user172 +sn: user172 +uid: uid172 +givenname: givenname172 +description: description172 +userPassword: password172 +mail: uid172 +uidnumber: 172 +gidnumber: 172 +homeDirectory: /home/uid172 + +dn: cn=user173,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user173 +sn: user173 +uid: uid173 +givenname: givenname173 +description: description173 +userPassword: password173 +mail: uid173 +uidnumber: 173 +gidnumber: 173 +homeDirectory: /home/uid173 + +dn: cn=user174,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user174 +sn: user174 +uid: uid174 +givenname: givenname174 +description: description174 +userPassword: password174 +mail: uid174 +uidnumber: 174 +gidnumber: 174 +homeDirectory: /home/uid174 + +dn: cn=user175,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user175 +sn: user175 +uid: uid175 +givenname: givenname175 +description: description175 +userPassword: password175 +mail: uid175 +uidnumber: 175 +gidnumber: 175 +homeDirectory: /home/uid175 + +dn: cn=user176,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user176 +sn: user176 +uid: uid176 +givenname: givenname176 +description: description176 +userPassword: password176 +mail: uid176 +uidnumber: 176 +gidnumber: 176 +homeDirectory: /home/uid176 + +dn: cn=user177,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user177 +sn: user177 +uid: uid177 +givenname: givenname177 +description: description177 +userPassword: password177 +mail: uid177 +uidnumber: 177 +gidnumber: 177 +homeDirectory: /home/uid177 + +dn: cn=user178,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user178 +sn: user178 +uid: uid178 +givenname: givenname178 +description: description178 +userPassword: password178 +mail: uid178 +uidnumber: 178 +gidnumber: 178 +homeDirectory: /home/uid178 + +dn: cn=user179,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user179 +sn: user179 +uid: uid179 +givenname: givenname179 +description: description179 +userPassword: password179 +mail: uid179 +uidnumber: 179 +gidnumber: 179 +homeDirectory: /home/uid179 + +dn: cn=user180,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user180 +sn: user180 +uid: uid180 +givenname: givenname180 +description: description180 +userPassword: password180 +mail: uid180 +uidnumber: 180 +gidnumber: 180 +homeDirectory: /home/uid180 + +dn: cn=user181,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user181 +sn: user181 +uid: uid181 +givenname: givenname181 +description: description181 +userPassword: password181 +mail: uid181 +uidnumber: 181 +gidnumber: 181 +homeDirectory: /home/uid181 + +dn: cn=user182,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user182 +sn: user182 +uid: uid182 +givenname: givenname182 +description: description182 +userPassword: password182 +mail: uid182 +uidnumber: 182 +gidnumber: 182 +homeDirectory: /home/uid182 + +dn: cn=user183,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user183 +sn: user183 +uid: uid183 +givenname: givenname183 +description: description183 +userPassword: password183 +mail: uid183 +uidnumber: 183 +gidnumber: 183 +homeDirectory: /home/uid183 + +dn: cn=user184,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user184 +sn: user184 +uid: uid184 +givenname: givenname184 +description: description184 +userPassword: password184 +mail: uid184 +uidnumber: 184 +gidnumber: 184 +homeDirectory: /home/uid184 + +dn: cn=user185,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user185 +sn: user185 +uid: uid185 +givenname: givenname185 +description: description185 +userPassword: password185 +mail: uid185 +uidnumber: 185 +gidnumber: 185 +homeDirectory: /home/uid185 + +dn: cn=user186,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user186 +sn: user186 +uid: uid186 +givenname: givenname186 +description: description186 +userPassword: password186 +mail: uid186 +uidnumber: 186 +gidnumber: 186 +homeDirectory: /home/uid186 + +dn: cn=user187,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user187 +sn: user187 +uid: uid187 +givenname: givenname187 +description: description187 +userPassword: password187 +mail: uid187 +uidnumber: 187 +gidnumber: 187 +homeDirectory: /home/uid187 + +dn: cn=user188,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user188 +sn: user188 +uid: uid188 +givenname: givenname188 +description: description188 +userPassword: password188 +mail: uid188 +uidnumber: 188 +gidnumber: 188 +homeDirectory: /home/uid188 + +dn: cn=user189,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user189 +sn: user189 +uid: uid189 +givenname: givenname189 +description: description189 +userPassword: password189 +mail: uid189 +uidnumber: 189 +gidnumber: 189 +homeDirectory: /home/uid189 + +dn: cn=user190,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user190 +sn: user190 +uid: uid190 +givenname: givenname190 +description: description190 +userPassword: password190 +mail: uid190 +uidnumber: 190 +gidnumber: 190 +homeDirectory: /home/uid190 + +dn: cn=user191,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user191 +sn: user191 +uid: uid191 +givenname: givenname191 +description: description191 +userPassword: password191 +mail: uid191 +uidnumber: 191 +gidnumber: 191 +homeDirectory: /home/uid191 + +dn: cn=user192,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user192 +sn: user192 +uid: uid192 +givenname: givenname192 +description: description192 +userPassword: password192 +mail: uid192 +uidnumber: 192 +gidnumber: 192 +homeDirectory: /home/uid192 + +dn: cn=user193,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user193 +sn: user193 +uid: uid193 +givenname: givenname193 +description: description193 +userPassword: password193 +mail: uid193 +uidnumber: 193 +gidnumber: 193 +homeDirectory: /home/uid193 + +dn: cn=user194,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user194 +sn: user194 +uid: uid194 +givenname: givenname194 +description: description194 +userPassword: password194 +mail: uid194 +uidnumber: 194 +gidnumber: 194 +homeDirectory: /home/uid194 + +dn: cn=user195,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user195 +sn: user195 +uid: uid195 +givenname: givenname195 +description: description195 +userPassword: password195 +mail: uid195 +uidnumber: 195 +gidnumber: 195 +homeDirectory: /home/uid195 + +dn: cn=user196,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user196 +sn: user196 +uid: uid196 +givenname: givenname196 +description: description196 +userPassword: password196 +mail: uid196 +uidnumber: 196 +gidnumber: 196 +homeDirectory: /home/uid196 + +dn: cn=user197,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user197 +sn: user197 +uid: uid197 +givenname: givenname197 +description: description197 +userPassword: password197 +mail: uid197 +uidnumber: 197 +gidnumber: 197 +homeDirectory: /home/uid197 + +dn: cn=user198,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user198 +sn: user198 +uid: uid198 +givenname: givenname198 +description: description198 +userPassword: password198 +mail: uid198 +uidnumber: 198 +gidnumber: 198 +homeDirectory: /home/uid198 + +dn: cn=user199,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user199 +sn: user199 +uid: uid199 +givenname: givenname199 +description: description199 +userPassword: password199 +mail: uid199 +uidnumber: 199 +gidnumber: 199 +homeDirectory: /home/uid199 + +dn: cn=user200,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user200 +sn: user200 +uid: uid200 +givenname: givenname200 +description: description200 +userPassword: password200 +mail: uid200 +uidnumber: 200 +gidnumber: 200 +homeDirectory: /home/uid200 + +dn: cn=user201,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user201 +sn: user201 +uid: uid201 +givenname: givenname201 +description: description201 +userPassword: password201 +mail: uid201 +uidnumber: 201 +gidnumber: 201 +homeDirectory: /home/uid201 + +dn: cn=user202,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user202 +sn: user202 +uid: uid202 +givenname: givenname202 +description: description202 +userPassword: password202 +mail: uid202 +uidnumber: 202 +gidnumber: 202 +homeDirectory: /home/uid202 + +dn: cn=user203,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user203 +sn: user203 +uid: uid203 +givenname: givenname203 +description: description203 +userPassword: password203 +mail: uid203 +uidnumber: 203 +gidnumber: 203 +homeDirectory: /home/uid203 + +dn: cn=user204,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user204 +sn: user204 +uid: uid204 +givenname: givenname204 +description: description204 +userPassword: password204 +mail: uid204 +uidnumber: 204 +gidnumber: 204 +homeDirectory: /home/uid204 + +dn: cn=user205,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user205 +sn: user205 +uid: uid205 +givenname: givenname205 +description: description205 +userPassword: password205 +mail: uid205 +uidnumber: 205 +gidnumber: 205 +homeDirectory: /home/uid205 + +dn: cn=user206,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user206 +sn: user206 +uid: uid206 +givenname: givenname206 +description: description206 +userPassword: password206 +mail: uid206 +uidnumber: 206 +gidnumber: 206 +homeDirectory: /home/uid206 + +dn: cn=user207,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user207 +sn: user207 +uid: uid207 +givenname: givenname207 +description: description207 +userPassword: password207 +mail: uid207 +uidnumber: 207 +gidnumber: 207 +homeDirectory: /home/uid207 + +dn: cn=user208,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user208 +sn: user208 +uid: uid208 +givenname: givenname208 +description: description208 +userPassword: password208 +mail: uid208 +uidnumber: 208 +gidnumber: 208 +homeDirectory: /home/uid208 + +dn: cn=user209,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user209 +sn: user209 +uid: uid209 +givenname: givenname209 +description: description209 +userPassword: password209 +mail: uid209 +uidnumber: 209 +gidnumber: 209 +homeDirectory: /home/uid209 + +dn: cn=user210,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user210 +sn: user210 +uid: uid210 +givenname: givenname210 +description: description210 +userPassword: password210 +mail: uid210 +uidnumber: 210 +gidnumber: 210 +homeDirectory: /home/uid210 + +dn: cn=user211,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user211 +sn: user211 +uid: uid211 +givenname: givenname211 +description: description211 +userPassword: password211 +mail: uid211 +uidnumber: 211 +gidnumber: 211 +homeDirectory: /home/uid211 + +dn: cn=user212,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user212 +sn: user212 +uid: uid212 +givenname: givenname212 +description: description212 +userPassword: password212 +mail: uid212 +uidnumber: 212 +gidnumber: 212 +homeDirectory: /home/uid212 + +dn: cn=user213,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user213 +sn: user213 +uid: uid213 +givenname: givenname213 +description: description213 +userPassword: password213 +mail: uid213 +uidnumber: 213 +gidnumber: 213 +homeDirectory: /home/uid213 + +dn: cn=user214,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user214 +sn: user214 +uid: uid214 +givenname: givenname214 +description: description214 +userPassword: password214 +mail: uid214 +uidnumber: 214 +gidnumber: 214 +homeDirectory: /home/uid214 + +dn: cn=user215,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user215 +sn: user215 +uid: uid215 +givenname: givenname215 +description: description215 +userPassword: password215 +mail: uid215 +uidnumber: 215 +gidnumber: 215 +homeDirectory: /home/uid215 + +dn: cn=user216,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user216 +sn: user216 +uid: uid216 +givenname: givenname216 +description: description216 +userPassword: password216 +mail: uid216 +uidnumber: 216 +gidnumber: 216 +homeDirectory: /home/uid216 + +dn: cn=user217,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user217 +sn: user217 +uid: uid217 +givenname: givenname217 +description: description217 +userPassword: password217 +mail: uid217 +uidnumber: 217 +gidnumber: 217 +homeDirectory: /home/uid217 + +dn: cn=user218,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user218 +sn: user218 +uid: uid218 +givenname: givenname218 +description: description218 +userPassword: password218 +mail: uid218 +uidnumber: 218 +gidnumber: 218 +homeDirectory: /home/uid218 + +dn: cn=user219,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user219 +sn: user219 +uid: uid219 +givenname: givenname219 +description: description219 +userPassword: password219 +mail: uid219 +uidnumber: 219 +gidnumber: 219 +homeDirectory: /home/uid219 + +dn: cn=user220,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user220 +sn: user220 +uid: uid220 +givenname: givenname220 +description: description220 +userPassword: password220 +mail: uid220 +uidnumber: 220 +gidnumber: 220 +homeDirectory: /home/uid220 + +dn: cn=user221,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user221 +sn: user221 +uid: uid221 +givenname: givenname221 +description: description221 +userPassword: password221 +mail: uid221 +uidnumber: 221 +gidnumber: 221 +homeDirectory: /home/uid221 + +dn: cn=user222,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user222 +sn: user222 +uid: uid222 +givenname: givenname222 +description: description222 +userPassword: password222 +mail: uid222 +uidnumber: 222 +gidnumber: 222 +homeDirectory: /home/uid222 + +dn: cn=user223,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user223 +sn: user223 +uid: uid223 +givenname: givenname223 +description: description223 +userPassword: password223 +mail: uid223 +uidnumber: 223 +gidnumber: 223 +homeDirectory: /home/uid223 + +dn: cn=user224,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user224 +sn: user224 +uid: uid224 +givenname: givenname224 +description: description224 +userPassword: password224 +mail: uid224 +uidnumber: 224 +gidnumber: 224 +homeDirectory: /home/uid224 + +dn: cn=user225,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user225 +sn: user225 +uid: uid225 +givenname: givenname225 +description: description225 +userPassword: password225 +mail: uid225 +uidnumber: 225 +gidnumber: 225 +homeDirectory: /home/uid225 + +dn: cn=user226,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user226 +sn: user226 +uid: uid226 +givenname: givenname226 +description: description226 +userPassword: password226 +mail: uid226 +uidnumber: 226 +gidnumber: 226 +homeDirectory: /home/uid226 + +dn: cn=user227,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user227 +sn: user227 +uid: uid227 +givenname: givenname227 +description: description227 +userPassword: password227 +mail: uid227 +uidnumber: 227 +gidnumber: 227 +homeDirectory: /home/uid227 + +dn: cn=user228,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user228 +sn: user228 +uid: uid228 +givenname: givenname228 +description: description228 +userPassword: password228 +mail: uid228 +uidnumber: 228 +gidnumber: 228 +homeDirectory: /home/uid228 + +dn: cn=user229,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user229 +sn: user229 +uid: uid229 +givenname: givenname229 +description: description229 +userPassword: password229 +mail: uid229 +uidnumber: 229 +gidnumber: 229 +homeDirectory: /home/uid229 + +dn: cn=user230,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user230 +sn: user230 +uid: uid230 +givenname: givenname230 +description: description230 +userPassword: password230 +mail: uid230 +uidnumber: 230 +gidnumber: 230 +homeDirectory: /home/uid230 + +dn: cn=user231,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user231 +sn: user231 +uid: uid231 +givenname: givenname231 +description: description231 +userPassword: password231 +mail: uid231 +uidnumber: 231 +gidnumber: 231 +homeDirectory: /home/uid231 + +dn: cn=user232,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user232 +sn: user232 +uid: uid232 +givenname: givenname232 +description: description232 +userPassword: password232 +mail: uid232 +uidnumber: 232 +gidnumber: 232 +homeDirectory: /home/uid232 + +dn: cn=user233,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user233 +sn: user233 +uid: uid233 +givenname: givenname233 +description: description233 +userPassword: password233 +mail: uid233 +uidnumber: 233 +gidnumber: 233 +homeDirectory: /home/uid233 + +dn: cn=user234,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user234 +sn: user234 +uid: uid234 +givenname: givenname234 +description: description234 +userPassword: password234 +mail: uid234 +uidnumber: 234 +gidnumber: 234 +homeDirectory: /home/uid234 + +dn: cn=user235,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user235 +sn: user235 +uid: uid235 +givenname: givenname235 +description: description235 +userPassword: password235 +mail: uid235 +uidnumber: 235 +gidnumber: 235 +homeDirectory: /home/uid235 + +dn: cn=user236,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user236 +sn: user236 +uid: uid236 +givenname: givenname236 +description: description236 +userPassword: password236 +mail: uid236 +uidnumber: 236 +gidnumber: 236 +homeDirectory: /home/uid236 + +dn: cn=user237,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user237 +sn: user237 +uid: uid237 +givenname: givenname237 +description: description237 +userPassword: password237 +mail: uid237 +uidnumber: 237 +gidnumber: 237 +homeDirectory: /home/uid237 + +dn: cn=user238,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user238 +sn: user238 +uid: uid238 +givenname: givenname238 +description: description238 +userPassword: password238 +mail: uid238 +uidnumber: 238 +gidnumber: 238 +homeDirectory: /home/uid238 + +dn: cn=user239,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user239 +sn: user239 +uid: uid239 +givenname: givenname239 +description: description239 +userPassword: password239 +mail: uid239 +uidnumber: 239 +gidnumber: 239 +homeDirectory: /home/uid239 + +dn: cn=user240,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user240 +sn: user240 +uid: uid240 +givenname: givenname240 +description: description240 +userPassword: password240 +mail: uid240 +uidnumber: 240 +gidnumber: 240 +homeDirectory: /home/uid240 + +dn: cn=user241,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user241 +sn: user241 +uid: uid241 +givenname: givenname241 +description: description241 +userPassword: password241 +mail: uid241 +uidnumber: 241 +gidnumber: 241 +homeDirectory: /home/uid241 + +dn: cn=user242,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user242 +sn: user242 +uid: uid242 +givenname: givenname242 +description: description242 +userPassword: password242 +mail: uid242 +uidnumber: 242 +gidnumber: 242 +homeDirectory: /home/uid242 + +dn: cn=user243,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user243 +sn: user243 +uid: uid243 +givenname: givenname243 +description: description243 +userPassword: password243 +mail: uid243 +uidnumber: 243 +gidnumber: 243 +homeDirectory: /home/uid243 + +dn: cn=user244,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user244 +sn: user244 +uid: uid244 +givenname: givenname244 +description: description244 +userPassword: password244 +mail: uid244 +uidnumber: 244 +gidnumber: 244 +homeDirectory: /home/uid244 + +dn: cn=user245,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user245 +sn: user245 +uid: uid245 +givenname: givenname245 +description: description245 +userPassword: password245 +mail: uid245 +uidnumber: 245 +gidnumber: 245 +homeDirectory: /home/uid245 + +dn: cn=user246,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user246 +sn: user246 +uid: uid246 +givenname: givenname246 +description: description246 +userPassword: password246 +mail: uid246 +uidnumber: 246 +gidnumber: 246 +homeDirectory: /home/uid246 + +dn: cn=user247,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user247 +sn: user247 +uid: uid247 +givenname: givenname247 +description: description247 +userPassword: password247 +mail: uid247 +uidnumber: 247 +gidnumber: 247 +homeDirectory: /home/uid247 + +dn: cn=user248,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user248 +sn: user248 +uid: uid248 +givenname: givenname248 +description: description248 +userPassword: password248 +mail: uid248 +uidnumber: 248 +gidnumber: 248 +homeDirectory: /home/uid248 + +dn: cn=user249,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user249 +sn: user249 +uid: uid249 +givenname: givenname249 +description: description249 +userPassword: password249 +mail: uid249 +uidnumber: 249 +gidnumber: 249 +homeDirectory: /home/uid249 + +dn: cn=user250,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user250 +sn: user250 +uid: uid250 +givenname: givenname250 +description: description250 +userPassword: password250 +mail: uid250 +uidnumber: 250 +gidnumber: 250 +homeDirectory: /home/uid250 + +dn: cn=user251,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user251 +sn: user251 +uid: uid251 +givenname: givenname251 +description: description251 +userPassword: password251 +mail: uid251 +uidnumber: 251 +gidnumber: 251 +homeDirectory: /home/uid251 + +dn: cn=user252,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user252 +sn: user252 +uid: uid252 +givenname: givenname252 +description: description252 +userPassword: password252 +mail: uid252 +uidnumber: 252 +gidnumber: 252 +homeDirectory: /home/uid252 + +dn: cn=user253,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user253 +sn: user253 +uid: uid253 +givenname: givenname253 +description: description253 +userPassword: password253 +mail: uid253 +uidnumber: 253 +gidnumber: 253 +homeDirectory: /home/uid253 + +dn: cn=user254,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user254 +sn: user254 +uid: uid254 +givenname: givenname254 +description: description254 +userPassword: password254 +mail: uid254 +uidnumber: 254 +gidnumber: 254 +homeDirectory: /home/uid254 + +dn: cn=user255,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user255 +sn: user255 +uid: uid255 +givenname: givenname255 +description: description255 +userPassword: password255 +mail: uid255 +uidnumber: 255 +gidnumber: 255 +homeDirectory: /home/uid255 + +dn: cn=user256,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user256 +sn: user256 +uid: uid256 +givenname: givenname256 +description: description256 +userPassword: password256 +mail: uid256 +uidnumber: 256 +gidnumber: 256 +homeDirectory: /home/uid256 + +dn: cn=user257,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user257 +sn: user257 +uid: uid257 +givenname: givenname257 +description: description257 +userPassword: password257 +mail: uid257 +uidnumber: 257 +gidnumber: 257 +homeDirectory: /home/uid257 + +dn: cn=user258,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user258 +sn: user258 +uid: uid258 +givenname: givenname258 +description: description258 +userPassword: password258 +mail: uid258 +uidnumber: 258 +gidnumber: 258 +homeDirectory: /home/uid258 + +dn: cn=user259,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user259 +sn: user259 +uid: uid259 +givenname: givenname259 +description: description259 +userPassword: password259 +mail: uid259 +uidnumber: 259 +gidnumber: 259 +homeDirectory: /home/uid259 + +dn: cn=user260,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user260 +sn: user260 +uid: uid260 +givenname: givenname260 +description: description260 +userPassword: password260 +mail: uid260 +uidnumber: 260 +gidnumber: 260 +homeDirectory: /home/uid260 + +dn: cn=user261,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user261 +sn: user261 +uid: uid261 +givenname: givenname261 +description: description261 +userPassword: password261 +mail: uid261 +uidnumber: 261 +gidnumber: 261 +homeDirectory: /home/uid261 + +dn: cn=user262,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user262 +sn: user262 +uid: uid262 +givenname: givenname262 +description: description262 +userPassword: password262 +mail: uid262 +uidnumber: 262 +gidnumber: 262 +homeDirectory: /home/uid262 + +dn: cn=user263,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user263 +sn: user263 +uid: uid263 +givenname: givenname263 +description: description263 +userPassword: password263 +mail: uid263 +uidnumber: 263 +gidnumber: 263 +homeDirectory: /home/uid263 + +dn: cn=user264,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user264 +sn: user264 +uid: uid264 +givenname: givenname264 +description: description264 +userPassword: password264 +mail: uid264 +uidnumber: 264 +gidnumber: 264 +homeDirectory: /home/uid264 + +dn: cn=user265,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user265 +sn: user265 +uid: uid265 +givenname: givenname265 +description: description265 +userPassword: password265 +mail: uid265 +uidnumber: 265 +gidnumber: 265 +homeDirectory: /home/uid265 + +dn: cn=user266,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user266 +sn: user266 +uid: uid266 +givenname: givenname266 +description: description266 +userPassword: password266 +mail: uid266 +uidnumber: 266 +gidnumber: 266 +homeDirectory: /home/uid266 + +dn: cn=user267,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user267 +sn: user267 +uid: uid267 +givenname: givenname267 +description: description267 +userPassword: password267 +mail: uid267 +uidnumber: 267 +gidnumber: 267 +homeDirectory: /home/uid267 + +dn: cn=user268,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user268 +sn: user268 +uid: uid268 +givenname: givenname268 +description: description268 +userPassword: password268 +mail: uid268 +uidnumber: 268 +gidnumber: 268 +homeDirectory: /home/uid268 + +dn: cn=user269,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user269 +sn: user269 +uid: uid269 +givenname: givenname269 +description: description269 +userPassword: password269 +mail: uid269 +uidnumber: 269 +gidnumber: 269 +homeDirectory: /home/uid269 + +dn: cn=user270,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user270 +sn: user270 +uid: uid270 +givenname: givenname270 +description: description270 +userPassword: password270 +mail: uid270 +uidnumber: 270 +gidnumber: 270 +homeDirectory: /home/uid270 + +dn: cn=user271,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user271 +sn: user271 +uid: uid271 +givenname: givenname271 +description: description271 +userPassword: password271 +mail: uid271 +uidnumber: 271 +gidnumber: 271 +homeDirectory: /home/uid271 + +dn: cn=user272,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user272 +sn: user272 +uid: uid272 +givenname: givenname272 +description: description272 +userPassword: password272 +mail: uid272 +uidnumber: 272 +gidnumber: 272 +homeDirectory: /home/uid272 + +dn: cn=user273,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user273 +sn: user273 +uid: uid273 +givenname: givenname273 +description: description273 +userPassword: password273 +mail: uid273 +uidnumber: 273 +gidnumber: 273 +homeDirectory: /home/uid273 + +dn: cn=user274,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user274 +sn: user274 +uid: uid274 +givenname: givenname274 +description: description274 +userPassword: password274 +mail: uid274 +uidnumber: 274 +gidnumber: 274 +homeDirectory: /home/uid274 + +dn: cn=user275,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user275 +sn: user275 +uid: uid275 +givenname: givenname275 +description: description275 +userPassword: password275 +mail: uid275 +uidnumber: 275 +gidnumber: 275 +homeDirectory: /home/uid275 + +dn: cn=user276,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user276 +sn: user276 +uid: uid276 +givenname: givenname276 +description: description276 +userPassword: password276 +mail: uid276 +uidnumber: 276 +gidnumber: 276 +homeDirectory: /home/uid276 + +dn: cn=user277,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user277 +sn: user277 +uid: uid277 +givenname: givenname277 +description: description277 +userPassword: password277 +mail: uid277 +uidnumber: 277 +gidnumber: 277 +homeDirectory: /home/uid277 + +dn: cn=user278,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user278 +sn: user278 +uid: uid278 +givenname: givenname278 +description: description278 +userPassword: password278 +mail: uid278 +uidnumber: 278 +gidnumber: 278 +homeDirectory: /home/uid278 + +dn: cn=user279,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user279 +sn: user279 +uid: uid279 +givenname: givenname279 +description: description279 +userPassword: password279 +mail: uid279 +uidnumber: 279 +gidnumber: 279 +homeDirectory: /home/uid279 + +dn: cn=user280,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user280 +sn: user280 +uid: uid280 +givenname: givenname280 +description: description280 +userPassword: password280 +mail: uid280 +uidnumber: 280 +gidnumber: 280 +homeDirectory: /home/uid280 + +dn: cn=user281,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user281 +sn: user281 +uid: uid281 +givenname: givenname281 +description: description281 +userPassword: password281 +mail: uid281 +uidnumber: 281 +gidnumber: 281 +homeDirectory: /home/uid281 + +dn: cn=user282,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user282 +sn: user282 +uid: uid282 +givenname: givenname282 +description: description282 +userPassword: password282 +mail: uid282 +uidnumber: 282 +gidnumber: 282 +homeDirectory: /home/uid282 + +dn: cn=user283,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user283 +sn: user283 +uid: uid283 +givenname: givenname283 +description: description283 +userPassword: password283 +mail: uid283 +uidnumber: 283 +gidnumber: 283 +homeDirectory: /home/uid283 + +dn: cn=user284,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user284 +sn: user284 +uid: uid284 +givenname: givenname284 +description: description284 +userPassword: password284 +mail: uid284 +uidnumber: 284 +gidnumber: 284 +homeDirectory: /home/uid284 + +dn: cn=user285,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user285 +sn: user285 +uid: uid285 +givenname: givenname285 +description: description285 +userPassword: password285 +mail: uid285 +uidnumber: 285 +gidnumber: 285 +homeDirectory: /home/uid285 + +dn: cn=user286,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user286 +sn: user286 +uid: uid286 +givenname: givenname286 +description: description286 +userPassword: password286 +mail: uid286 +uidnumber: 286 +gidnumber: 286 +homeDirectory: /home/uid286 + +dn: cn=user287,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user287 +sn: user287 +uid: uid287 +givenname: givenname287 +description: description287 +userPassword: password287 +mail: uid287 +uidnumber: 287 +gidnumber: 287 +homeDirectory: /home/uid287 + +dn: cn=user288,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user288 +sn: user288 +uid: uid288 +givenname: givenname288 +description: description288 +userPassword: password288 +mail: uid288 +uidnumber: 288 +gidnumber: 288 +homeDirectory: /home/uid288 + +dn: cn=user289,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user289 +sn: user289 +uid: uid289 +givenname: givenname289 +description: description289 +userPassword: password289 +mail: uid289 +uidnumber: 289 +gidnumber: 289 +homeDirectory: /home/uid289 + +dn: cn=user290,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user290 +sn: user290 +uid: uid290 +givenname: givenname290 +description: description290 +userPassword: password290 +mail: uid290 +uidnumber: 290 +gidnumber: 290 +homeDirectory: /home/uid290 + +dn: cn=user291,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user291 +sn: user291 +uid: uid291 +givenname: givenname291 +description: description291 +userPassword: password291 +mail: uid291 +uidnumber: 291 +gidnumber: 291 +homeDirectory: /home/uid291 + +dn: cn=user292,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user292 +sn: user292 +uid: uid292 +givenname: givenname292 +description: description292 +userPassword: password292 +mail: uid292 +uidnumber: 292 +gidnumber: 292 +homeDirectory: /home/uid292 + +dn: cn=user293,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user293 +sn: user293 +uid: uid293 +givenname: givenname293 +description: description293 +userPassword: password293 +mail: uid293 +uidnumber: 293 +gidnumber: 293 +homeDirectory: /home/uid293 + +dn: cn=user294,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user294 +sn: user294 +uid: uid294 +givenname: givenname294 +description: description294 +userPassword: password294 +mail: uid294 +uidnumber: 294 +gidnumber: 294 +homeDirectory: /home/uid294 + +dn: cn=user295,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user295 +sn: user295 +uid: uid295 +givenname: givenname295 +description: description295 +userPassword: password295 +mail: uid295 +uidnumber: 295 +gidnumber: 295 +homeDirectory: /home/uid295 + +dn: cn=user296,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user296 +sn: user296 +uid: uid296 +givenname: givenname296 +description: description296 +userPassword: password296 +mail: uid296 +uidnumber: 296 +gidnumber: 296 +homeDirectory: /home/uid296 + +dn: cn=user297,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user297 +sn: user297 +uid: uid297 +givenname: givenname297 +description: description297 +userPassword: password297 +mail: uid297 +uidnumber: 297 +gidnumber: 297 +homeDirectory: /home/uid297 + +dn: cn=user298,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user298 +sn: user298 +uid: uid298 +givenname: givenname298 +description: description298 +userPassword: password298 +mail: uid298 +uidnumber: 298 +gidnumber: 298 +homeDirectory: /home/uid298 + +dn: cn=user299,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user299 +sn: user299 +uid: uid299 +givenname: givenname299 +description: description299 +userPassword: password299 +mail: uid299 +uidnumber: 299 +gidnumber: 299 +homeDirectory: /home/uid299 + +dn: cn=user300,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user300 +sn: user300 +uid: uid300 +givenname: givenname300 +description: description300 +userPassword: password300 +mail: uid300 +uidnumber: 300 +gidnumber: 300 +homeDirectory: /home/uid300 + +dn: cn=user301,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user301 +sn: user301 +uid: uid301 +givenname: givenname301 +description: description301 +userPassword: password301 +mail: uid301 +uidnumber: 301 +gidnumber: 301 +homeDirectory: /home/uid301 + +dn: cn=user302,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user302 +sn: user302 +uid: uid302 +givenname: givenname302 +description: description302 +userPassword: password302 +mail: uid302 +uidnumber: 302 +gidnumber: 302 +homeDirectory: /home/uid302 + +dn: cn=user303,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user303 +sn: user303 +uid: uid303 +givenname: givenname303 +description: description303 +userPassword: password303 +mail: uid303 +uidnumber: 303 +gidnumber: 303 +homeDirectory: /home/uid303 + +dn: cn=user304,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user304 +sn: user304 +uid: uid304 +givenname: givenname304 +description: description304 +userPassword: password304 +mail: uid304 +uidnumber: 304 +gidnumber: 304 +homeDirectory: /home/uid304 + +dn: cn=user305,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user305 +sn: user305 +uid: uid305 +givenname: givenname305 +description: description305 +userPassword: password305 +mail: uid305 +uidnumber: 305 +gidnumber: 305 +homeDirectory: /home/uid305 + +dn: cn=user306,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user306 +sn: user306 +uid: uid306 +givenname: givenname306 +description: description306 +userPassword: password306 +mail: uid306 +uidnumber: 306 +gidnumber: 306 +homeDirectory: /home/uid306 + +dn: cn=user307,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user307 +sn: user307 +uid: uid307 +givenname: givenname307 +description: description307 +userPassword: password307 +mail: uid307 +uidnumber: 307 +gidnumber: 307 +homeDirectory: /home/uid307 + +dn: cn=user308,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user308 +sn: user308 +uid: uid308 +givenname: givenname308 +description: description308 +userPassword: password308 +mail: uid308 +uidnumber: 308 +gidnumber: 308 +homeDirectory: /home/uid308 + +dn: cn=user309,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user309 +sn: user309 +uid: uid309 +givenname: givenname309 +description: description309 +userPassword: password309 +mail: uid309 +uidnumber: 309 +gidnumber: 309 +homeDirectory: /home/uid309 + +dn: cn=user310,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user310 +sn: user310 +uid: uid310 +givenname: givenname310 +description: description310 +userPassword: password310 +mail: uid310 +uidnumber: 310 +gidnumber: 310 +homeDirectory: /home/uid310 + +dn: cn=user311,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user311 +sn: user311 +uid: uid311 +givenname: givenname311 +description: description311 +userPassword: password311 +mail: uid311 +uidnumber: 311 +gidnumber: 311 +homeDirectory: /home/uid311 + +dn: cn=user312,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user312 +sn: user312 +uid: uid312 +givenname: givenname312 +description: description312 +userPassword: password312 +mail: uid312 +uidnumber: 312 +gidnumber: 312 +homeDirectory: /home/uid312 + +dn: cn=user313,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user313 +sn: user313 +uid: uid313 +givenname: givenname313 +description: description313 +userPassword: password313 +mail: uid313 +uidnumber: 313 +gidnumber: 313 +homeDirectory: /home/uid313 + +dn: cn=user314,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user314 +sn: user314 +uid: uid314 +givenname: givenname314 +description: description314 +userPassword: password314 +mail: uid314 +uidnumber: 314 +gidnumber: 314 +homeDirectory: /home/uid314 + +dn: cn=user315,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user315 +sn: user315 +uid: uid315 +givenname: givenname315 +description: description315 +userPassword: password315 +mail: uid315 +uidnumber: 315 +gidnumber: 315 +homeDirectory: /home/uid315 + +dn: cn=user316,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user316 +sn: user316 +uid: uid316 +givenname: givenname316 +description: description316 +userPassword: password316 +mail: uid316 +uidnumber: 316 +gidnumber: 316 +homeDirectory: /home/uid316 + +dn: cn=user317,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user317 +sn: user317 +uid: uid317 +givenname: givenname317 +description: description317 +userPassword: password317 +mail: uid317 +uidnumber: 317 +gidnumber: 317 +homeDirectory: /home/uid317 + +dn: cn=user318,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user318 +sn: user318 +uid: uid318 +givenname: givenname318 +description: description318 +userPassword: password318 +mail: uid318 +uidnumber: 318 +gidnumber: 318 +homeDirectory: /home/uid318 + +dn: cn=user319,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user319 +sn: user319 +uid: uid319 +givenname: givenname319 +description: description319 +userPassword: password319 +mail: uid319 +uidnumber: 319 +gidnumber: 319 +homeDirectory: /home/uid319 + +dn: cn=user320,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user320 +sn: user320 +uid: uid320 +givenname: givenname320 +description: description320 +userPassword: password320 +mail: uid320 +uidnumber: 320 +gidnumber: 320 +homeDirectory: /home/uid320 + +dn: cn=user321,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user321 +sn: user321 +uid: uid321 +givenname: givenname321 +description: description321 +userPassword: password321 +mail: uid321 +uidnumber: 321 +gidnumber: 321 +homeDirectory: /home/uid321 + +dn: cn=user322,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user322 +sn: user322 +uid: uid322 +givenname: givenname322 +description: description322 +userPassword: password322 +mail: uid322 +uidnumber: 322 +gidnumber: 322 +homeDirectory: /home/uid322 + +dn: cn=user323,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user323 +sn: user323 +uid: uid323 +givenname: givenname323 +description: description323 +userPassword: password323 +mail: uid323 +uidnumber: 323 +gidnumber: 323 +homeDirectory: /home/uid323 + +dn: cn=user324,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user324 +sn: user324 +uid: uid324 +givenname: givenname324 +description: description324 +userPassword: password324 +mail: uid324 +uidnumber: 324 +gidnumber: 324 +homeDirectory: /home/uid324 + +dn: cn=user325,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user325 +sn: user325 +uid: uid325 +givenname: givenname325 +description: description325 +userPassword: password325 +mail: uid325 +uidnumber: 325 +gidnumber: 325 +homeDirectory: /home/uid325 + +dn: cn=user326,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user326 +sn: user326 +uid: uid326 +givenname: givenname326 +description: description326 +userPassword: password326 +mail: uid326 +uidnumber: 326 +gidnumber: 326 +homeDirectory: /home/uid326 + +dn: cn=user327,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user327 +sn: user327 +uid: uid327 +givenname: givenname327 +description: description327 +userPassword: password327 +mail: uid327 +uidnumber: 327 +gidnumber: 327 +homeDirectory: /home/uid327 + +dn: cn=user328,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user328 +sn: user328 +uid: uid328 +givenname: givenname328 +description: description328 +userPassword: password328 +mail: uid328 +uidnumber: 328 +gidnumber: 328 +homeDirectory: /home/uid328 + +dn: cn=user329,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user329 +sn: user329 +uid: uid329 +givenname: givenname329 +description: description329 +userPassword: password329 +mail: uid329 +uidnumber: 329 +gidnumber: 329 +homeDirectory: /home/uid329 + +dn: cn=user330,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user330 +sn: user330 +uid: uid330 +givenname: givenname330 +description: description330 +userPassword: password330 +mail: uid330 +uidnumber: 330 +gidnumber: 330 +homeDirectory: /home/uid330 + +dn: cn=user331,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user331 +sn: user331 +uid: uid331 +givenname: givenname331 +description: description331 +userPassword: password331 +mail: uid331 +uidnumber: 331 +gidnumber: 331 +homeDirectory: /home/uid331 + +dn: cn=user332,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user332 +sn: user332 +uid: uid332 +givenname: givenname332 +description: description332 +userPassword: password332 +mail: uid332 +uidnumber: 332 +gidnumber: 332 +homeDirectory: /home/uid332 + +dn: cn=user333,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user333 +sn: user333 +uid: uid333 +givenname: givenname333 +description: description333 +userPassword: password333 +mail: uid333 +uidnumber: 333 +gidnumber: 333 +homeDirectory: /home/uid333 + +dn: cn=user334,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user334 +sn: user334 +uid: uid334 +givenname: givenname334 +description: description334 +userPassword: password334 +mail: uid334 +uidnumber: 334 +gidnumber: 334 +homeDirectory: /home/uid334 + +dn: cn=user335,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user335 +sn: user335 +uid: uid335 +givenname: givenname335 +description: description335 +userPassword: password335 +mail: uid335 +uidnumber: 335 +gidnumber: 335 +homeDirectory: /home/uid335 + +dn: cn=user336,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user336 +sn: user336 +uid: uid336 +givenname: givenname336 +description: description336 +userPassword: password336 +mail: uid336 +uidnumber: 336 +gidnumber: 336 +homeDirectory: /home/uid336 + +dn: cn=user337,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user337 +sn: user337 +uid: uid337 +givenname: givenname337 +description: description337 +userPassword: password337 +mail: uid337 +uidnumber: 337 +gidnumber: 337 +homeDirectory: /home/uid337 + +dn: cn=user338,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user338 +sn: user338 +uid: uid338 +givenname: givenname338 +description: description338 +userPassword: password338 +mail: uid338 +uidnumber: 338 +gidnumber: 338 +homeDirectory: /home/uid338 + +dn: cn=user339,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user339 +sn: user339 +uid: uid339 +givenname: givenname339 +description: description339 +userPassword: password339 +mail: uid339 +uidnumber: 339 +gidnumber: 339 +homeDirectory: /home/uid339 + +dn: cn=user340,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user340 +sn: user340 +uid: uid340 +givenname: givenname340 +description: description340 +userPassword: password340 +mail: uid340 +uidnumber: 340 +gidnumber: 340 +homeDirectory: /home/uid340 + +dn: cn=user341,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user341 +sn: user341 +uid: uid341 +givenname: givenname341 +description: description341 +userPassword: password341 +mail: uid341 +uidnumber: 341 +gidnumber: 341 +homeDirectory: /home/uid341 + +dn: cn=user342,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user342 +sn: user342 +uid: uid342 +givenname: givenname342 +description: description342 +userPassword: password342 +mail: uid342 +uidnumber: 342 +gidnumber: 342 +homeDirectory: /home/uid342 + +dn: cn=user343,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user343 +sn: user343 +uid: uid343 +givenname: givenname343 +description: description343 +userPassword: password343 +mail: uid343 +uidnumber: 343 +gidnumber: 343 +homeDirectory: /home/uid343 + +dn: cn=user344,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user344 +sn: user344 +uid: uid344 +givenname: givenname344 +description: description344 +userPassword: password344 +mail: uid344 +uidnumber: 344 +gidnumber: 344 +homeDirectory: /home/uid344 + +dn: cn=user345,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user345 +sn: user345 +uid: uid345 +givenname: givenname345 +description: description345 +userPassword: password345 +mail: uid345 +uidnumber: 345 +gidnumber: 345 +homeDirectory: /home/uid345 + +dn: cn=user346,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user346 +sn: user346 +uid: uid346 +givenname: givenname346 +description: description346 +userPassword: password346 +mail: uid346 +uidnumber: 346 +gidnumber: 346 +homeDirectory: /home/uid346 + +dn: cn=user347,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user347 +sn: user347 +uid: uid347 +givenname: givenname347 +description: description347 +userPassword: password347 +mail: uid347 +uidnumber: 347 +gidnumber: 347 +homeDirectory: /home/uid347 + +dn: cn=user348,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user348 +sn: user348 +uid: uid348 +givenname: givenname348 +description: description348 +userPassword: password348 +mail: uid348 +uidnumber: 348 +gidnumber: 348 +homeDirectory: /home/uid348 + +dn: cn=user349,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user349 +sn: user349 +uid: uid349 +givenname: givenname349 +description: description349 +userPassword: password349 +mail: uid349 +uidnumber: 349 +gidnumber: 349 +homeDirectory: /home/uid349 + +dn: cn=user350,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user350 +sn: user350 +uid: uid350 +givenname: givenname350 +description: description350 +userPassword: password350 +mail: uid350 +uidnumber: 350 +gidnumber: 350 +homeDirectory: /home/uid350 + +dn: cn=user351,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user351 +sn: user351 +uid: uid351 +givenname: givenname351 +description: description351 +userPassword: password351 +mail: uid351 +uidnumber: 351 +gidnumber: 351 +homeDirectory: /home/uid351 + +dn: cn=user352,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user352 +sn: user352 +uid: uid352 +givenname: givenname352 +description: description352 +userPassword: password352 +mail: uid352 +uidnumber: 352 +gidnumber: 352 +homeDirectory: /home/uid352 + +dn: cn=user353,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user353 +sn: user353 +uid: uid353 +givenname: givenname353 +description: description353 +userPassword: password353 +mail: uid353 +uidnumber: 353 +gidnumber: 353 +homeDirectory: /home/uid353 + +dn: cn=user354,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user354 +sn: user354 +uid: uid354 +givenname: givenname354 +description: description354 +userPassword: password354 +mail: uid354 +uidnumber: 354 +gidnumber: 354 +homeDirectory: /home/uid354 + +dn: cn=user355,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user355 +sn: user355 +uid: uid355 +givenname: givenname355 +description: description355 +userPassword: password355 +mail: uid355 +uidnumber: 355 +gidnumber: 355 +homeDirectory: /home/uid355 + +dn: cn=user356,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user356 +sn: user356 +uid: uid356 +givenname: givenname356 +description: description356 +userPassword: password356 +mail: uid356 +uidnumber: 356 +gidnumber: 356 +homeDirectory: /home/uid356 + +dn: cn=user357,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user357 +sn: user357 +uid: uid357 +givenname: givenname357 +description: description357 +userPassword: password357 +mail: uid357 +uidnumber: 357 +gidnumber: 357 +homeDirectory: /home/uid357 + +dn: cn=user358,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user358 +sn: user358 +uid: uid358 +givenname: givenname358 +description: description358 +userPassword: password358 +mail: uid358 +uidnumber: 358 +gidnumber: 358 +homeDirectory: /home/uid358 + +dn: cn=user359,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user359 +sn: user359 +uid: uid359 +givenname: givenname359 +description: description359 +userPassword: password359 +mail: uid359 +uidnumber: 359 +gidnumber: 359 +homeDirectory: /home/uid359 + +dn: cn=user360,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user360 +sn: user360 +uid: uid360 +givenname: givenname360 +description: description360 +userPassword: password360 +mail: uid360 +uidnumber: 360 +gidnumber: 360 +homeDirectory: /home/uid360 + +dn: cn=user361,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user361 +sn: user361 +uid: uid361 +givenname: givenname361 +description: description361 +userPassword: password361 +mail: uid361 +uidnumber: 361 +gidnumber: 361 +homeDirectory: /home/uid361 + +dn: cn=user362,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user362 +sn: user362 +uid: uid362 +givenname: givenname362 +description: description362 +userPassword: password362 +mail: uid362 +uidnumber: 362 +gidnumber: 362 +homeDirectory: /home/uid362 + +dn: cn=user363,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user363 +sn: user363 +uid: uid363 +givenname: givenname363 +description: description363 +userPassword: password363 +mail: uid363 +uidnumber: 363 +gidnumber: 363 +homeDirectory: /home/uid363 + +dn: cn=user364,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user364 +sn: user364 +uid: uid364 +givenname: givenname364 +description: description364 +userPassword: password364 +mail: uid364 +uidnumber: 364 +gidnumber: 364 +homeDirectory: /home/uid364 + +dn: cn=user365,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user365 +sn: user365 +uid: uid365 +givenname: givenname365 +description: description365 +userPassword: password365 +mail: uid365 +uidnumber: 365 +gidnumber: 365 +homeDirectory: /home/uid365 + +dn: cn=user366,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user366 +sn: user366 +uid: uid366 +givenname: givenname366 +description: description366 +userPassword: password366 +mail: uid366 +uidnumber: 366 +gidnumber: 366 +homeDirectory: /home/uid366 + +dn: cn=user367,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user367 +sn: user367 +uid: uid367 +givenname: givenname367 +description: description367 +userPassword: password367 +mail: uid367 +uidnumber: 367 +gidnumber: 367 +homeDirectory: /home/uid367 + +dn: cn=user368,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user368 +sn: user368 +uid: uid368 +givenname: givenname368 +description: description368 +userPassword: password368 +mail: uid368 +uidnumber: 368 +gidnumber: 368 +homeDirectory: /home/uid368 + +dn: cn=user369,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user369 +sn: user369 +uid: uid369 +givenname: givenname369 +description: description369 +userPassword: password369 +mail: uid369 +uidnumber: 369 +gidnumber: 369 +homeDirectory: /home/uid369 + +dn: cn=user370,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user370 +sn: user370 +uid: uid370 +givenname: givenname370 +description: description370 +userPassword: password370 +mail: uid370 +uidnumber: 370 +gidnumber: 370 +homeDirectory: /home/uid370 + +dn: cn=user371,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user371 +sn: user371 +uid: uid371 +givenname: givenname371 +description: description371 +userPassword: password371 +mail: uid371 +uidnumber: 371 +gidnumber: 371 +homeDirectory: /home/uid371 + +dn: cn=user372,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user372 +sn: user372 +uid: uid372 +givenname: givenname372 +description: description372 +userPassword: password372 +mail: uid372 +uidnumber: 372 +gidnumber: 372 +homeDirectory: /home/uid372 + +dn: cn=user373,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user373 +sn: user373 +uid: uid373 +givenname: givenname373 +description: description373 +userPassword: password373 +mail: uid373 +uidnumber: 373 +gidnumber: 373 +homeDirectory: /home/uid373 + +dn: cn=user374,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user374 +sn: user374 +uid: uid374 +givenname: givenname374 +description: description374 +userPassword: password374 +mail: uid374 +uidnumber: 374 +gidnumber: 374 +homeDirectory: /home/uid374 + +dn: cn=user375,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user375 +sn: user375 +uid: uid375 +givenname: givenname375 +description: description375 +userPassword: password375 +mail: uid375 +uidnumber: 375 +gidnumber: 375 +homeDirectory: /home/uid375 + +dn: cn=user376,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user376 +sn: user376 +uid: uid376 +givenname: givenname376 +description: description376 +userPassword: password376 +mail: uid376 +uidnumber: 376 +gidnumber: 376 +homeDirectory: /home/uid376 + +dn: cn=user377,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user377 +sn: user377 +uid: uid377 +givenname: givenname377 +description: description377 +userPassword: password377 +mail: uid377 +uidnumber: 377 +gidnumber: 377 +homeDirectory: /home/uid377 + +dn: cn=user378,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user378 +sn: user378 +uid: uid378 +givenname: givenname378 +description: description378 +userPassword: password378 +mail: uid378 +uidnumber: 378 +gidnumber: 378 +homeDirectory: /home/uid378 + +dn: cn=user379,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user379 +sn: user379 +uid: uid379 +givenname: givenname379 +description: description379 +userPassword: password379 +mail: uid379 +uidnumber: 379 +gidnumber: 379 +homeDirectory: /home/uid379 + +dn: cn=user380,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user380 +sn: user380 +uid: uid380 +givenname: givenname380 +description: description380 +userPassword: password380 +mail: uid380 +uidnumber: 380 +gidnumber: 380 +homeDirectory: /home/uid380 + +dn: cn=user381,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user381 +sn: user381 +uid: uid381 +givenname: givenname381 +description: description381 +userPassword: password381 +mail: uid381 +uidnumber: 381 +gidnumber: 381 +homeDirectory: /home/uid381 + +dn: cn=user382,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user382 +sn: user382 +uid: uid382 +givenname: givenname382 +description: description382 +userPassword: password382 +mail: uid382 +uidnumber: 382 +gidnumber: 382 +homeDirectory: /home/uid382 + +dn: cn=user383,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user383 +sn: user383 +uid: uid383 +givenname: givenname383 +description: description383 +userPassword: password383 +mail: uid383 +uidnumber: 383 +gidnumber: 383 +homeDirectory: /home/uid383 + +dn: cn=user384,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user384 +sn: user384 +uid: uid384 +givenname: givenname384 +description: description384 +userPassword: password384 +mail: uid384 +uidnumber: 384 +gidnumber: 384 +homeDirectory: /home/uid384 + +dn: cn=user385,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user385 +sn: user385 +uid: uid385 +givenname: givenname385 +description: description385 +userPassword: password385 +mail: uid385 +uidnumber: 385 +gidnumber: 385 +homeDirectory: /home/uid385 + +dn: cn=user386,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user386 +sn: user386 +uid: uid386 +givenname: givenname386 +description: description386 +userPassword: password386 +mail: uid386 +uidnumber: 386 +gidnumber: 386 +homeDirectory: /home/uid386 + +dn: cn=user387,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user387 +sn: user387 +uid: uid387 +givenname: givenname387 +description: description387 +userPassword: password387 +mail: uid387 +uidnumber: 387 +gidnumber: 387 +homeDirectory: /home/uid387 + +dn: cn=user388,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user388 +sn: user388 +uid: uid388 +givenname: givenname388 +description: description388 +userPassword: password388 +mail: uid388 +uidnumber: 388 +gidnumber: 388 +homeDirectory: /home/uid388 + +dn: cn=user389,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user389 +sn: user389 +uid: uid389 +givenname: givenname389 +description: description389 +userPassword: password389 +mail: uid389 +uidnumber: 389 +gidnumber: 389 +homeDirectory: /home/uid389 + +dn: cn=user390,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user390 +sn: user390 +uid: uid390 +givenname: givenname390 +description: description390 +userPassword: password390 +mail: uid390 +uidnumber: 390 +gidnumber: 390 +homeDirectory: /home/uid390 + +dn: cn=user391,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user391 +sn: user391 +uid: uid391 +givenname: givenname391 +description: description391 +userPassword: password391 +mail: uid391 +uidnumber: 391 +gidnumber: 391 +homeDirectory: /home/uid391 + +dn: cn=user392,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user392 +sn: user392 +uid: uid392 +givenname: givenname392 +description: description392 +userPassword: password392 +mail: uid392 +uidnumber: 392 +gidnumber: 392 +homeDirectory: /home/uid392 + +dn: cn=user393,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user393 +sn: user393 +uid: uid393 +givenname: givenname393 +description: description393 +userPassword: password393 +mail: uid393 +uidnumber: 393 +gidnumber: 393 +homeDirectory: /home/uid393 + +dn: cn=user394,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user394 +sn: user394 +uid: uid394 +givenname: givenname394 +description: description394 +userPassword: password394 +mail: uid394 +uidnumber: 394 +gidnumber: 394 +homeDirectory: /home/uid394 + +dn: cn=user395,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user395 +sn: user395 +uid: uid395 +givenname: givenname395 +description: description395 +userPassword: password395 +mail: uid395 +uidnumber: 395 +gidnumber: 395 +homeDirectory: /home/uid395 + +dn: cn=user396,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user396 +sn: user396 +uid: uid396 +givenname: givenname396 +description: description396 +userPassword: password396 +mail: uid396 +uidnumber: 396 +gidnumber: 396 +homeDirectory: /home/uid396 + +dn: cn=user397,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user397 +sn: user397 +uid: uid397 +givenname: givenname397 +description: description397 +userPassword: password397 +mail: uid397 +uidnumber: 397 +gidnumber: 397 +homeDirectory: /home/uid397 + +dn: cn=user398,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user398 +sn: user398 +uid: uid398 +givenname: givenname398 +description: description398 +userPassword: password398 +mail: uid398 +uidnumber: 398 +gidnumber: 398 +homeDirectory: /home/uid398 + +dn: cn=user399,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user399 +sn: user399 +uid: uid399 +givenname: givenname399 +description: description399 +userPassword: password399 +mail: uid399 +uidnumber: 399 +gidnumber: 399 +homeDirectory: /home/uid399 + +dn: cn=user400,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user400 +sn: user400 +uid: uid400 +givenname: givenname400 +description: description400 +userPassword: password400 +mail: uid400 +uidnumber: 400 +gidnumber: 400 +homeDirectory: /home/uid400 + +dn: cn=user401,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user401 +sn: user401 +uid: uid401 +givenname: givenname401 +description: description401 +userPassword: password401 +mail: uid401 +uidnumber: 401 +gidnumber: 401 +homeDirectory: /home/uid401 + +dn: cn=user402,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user402 +sn: user402 +uid: uid402 +givenname: givenname402 +description: description402 +userPassword: password402 +mail: uid402 +uidnumber: 402 +gidnumber: 402 +homeDirectory: /home/uid402 + +dn: cn=user403,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user403 +sn: user403 +uid: uid403 +givenname: givenname403 +description: description403 +userPassword: password403 +mail: uid403 +uidnumber: 403 +gidnumber: 403 +homeDirectory: /home/uid403 + +dn: cn=user404,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user404 +sn: user404 +uid: uid404 +givenname: givenname404 +description: description404 +userPassword: password404 +mail: uid404 +uidnumber: 404 +gidnumber: 404 +homeDirectory: /home/uid404 + +dn: cn=user405,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user405 +sn: user405 +uid: uid405 +givenname: givenname405 +description: description405 +userPassword: password405 +mail: uid405 +uidnumber: 405 +gidnumber: 405 +homeDirectory: /home/uid405 + +dn: cn=user406,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user406 +sn: user406 +uid: uid406 +givenname: givenname406 +description: description406 +userPassword: password406 +mail: uid406 +uidnumber: 406 +gidnumber: 406 +homeDirectory: /home/uid406 + +dn: cn=user407,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user407 +sn: user407 +uid: uid407 +givenname: givenname407 +description: description407 +userPassword: password407 +mail: uid407 +uidnumber: 407 +gidnumber: 407 +homeDirectory: /home/uid407 + +dn: cn=user408,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user408 +sn: user408 +uid: uid408 +givenname: givenname408 +description: description408 +userPassword: password408 +mail: uid408 +uidnumber: 408 +gidnumber: 408 +homeDirectory: /home/uid408 + +dn: cn=user409,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user409 +sn: user409 +uid: uid409 +givenname: givenname409 +description: description409 +userPassword: password409 +mail: uid409 +uidnumber: 409 +gidnumber: 409 +homeDirectory: /home/uid409 + +dn: cn=user410,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user410 +sn: user410 +uid: uid410 +givenname: givenname410 +description: description410 +userPassword: password410 +mail: uid410 +uidnumber: 410 +gidnumber: 410 +homeDirectory: /home/uid410 + +dn: cn=user411,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user411 +sn: user411 +uid: uid411 +givenname: givenname411 +description: description411 +userPassword: password411 +mail: uid411 +uidnumber: 411 +gidnumber: 411 +homeDirectory: /home/uid411 + +dn: cn=user412,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user412 +sn: user412 +uid: uid412 +givenname: givenname412 +description: description412 +userPassword: password412 +mail: uid412 +uidnumber: 412 +gidnumber: 412 +homeDirectory: /home/uid412 + +dn: cn=user413,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user413 +sn: user413 +uid: uid413 +givenname: givenname413 +description: description413 +userPassword: password413 +mail: uid413 +uidnumber: 413 +gidnumber: 413 +homeDirectory: /home/uid413 + +dn: cn=user414,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user414 +sn: user414 +uid: uid414 +givenname: givenname414 +description: description414 +userPassword: password414 +mail: uid414 +uidnumber: 414 +gidnumber: 414 +homeDirectory: /home/uid414 + +dn: cn=user415,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user415 +sn: user415 +uid: uid415 +givenname: givenname415 +description: description415 +userPassword: password415 +mail: uid415 +uidnumber: 415 +gidnumber: 415 +homeDirectory: /home/uid415 + +dn: cn=user416,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user416 +sn: user416 +uid: uid416 +givenname: givenname416 +description: description416 +userPassword: password416 +mail: uid416 +uidnumber: 416 +gidnumber: 416 +homeDirectory: /home/uid416 + +dn: cn=user417,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user417 +sn: user417 +uid: uid417 +givenname: givenname417 +description: description417 +userPassword: password417 +mail: uid417 +uidnumber: 417 +gidnumber: 417 +homeDirectory: /home/uid417 + +dn: cn=user418,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user418 +sn: user418 +uid: uid418 +givenname: givenname418 +description: description418 +userPassword: password418 +mail: uid418 +uidnumber: 418 +gidnumber: 418 +homeDirectory: /home/uid418 + +dn: cn=user419,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user419 +sn: user419 +uid: uid419 +givenname: givenname419 +description: description419 +userPassword: password419 +mail: uid419 +uidnumber: 419 +gidnumber: 419 +homeDirectory: /home/uid419 + +dn: cn=user420,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user420 +sn: user420 +uid: uid420 +givenname: givenname420 +description: description420 +userPassword: password420 +mail: uid420 +uidnumber: 420 +gidnumber: 420 +homeDirectory: /home/uid420 + +dn: cn=user421,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user421 +sn: user421 +uid: uid421 +givenname: givenname421 +description: description421 +userPassword: password421 +mail: uid421 +uidnumber: 421 +gidnumber: 421 +homeDirectory: /home/uid421 + +dn: cn=user422,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user422 +sn: user422 +uid: uid422 +givenname: givenname422 +description: description422 +userPassword: password422 +mail: uid422 +uidnumber: 422 +gidnumber: 422 +homeDirectory: /home/uid422 + +dn: cn=user423,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user423 +sn: user423 +uid: uid423 +givenname: givenname423 +description: description423 +userPassword: password423 +mail: uid423 +uidnumber: 423 +gidnumber: 423 +homeDirectory: /home/uid423 + +dn: cn=user424,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user424 +sn: user424 +uid: uid424 +givenname: givenname424 +description: description424 +userPassword: password424 +mail: uid424 +uidnumber: 424 +gidnumber: 424 +homeDirectory: /home/uid424 + +dn: cn=user425,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user425 +sn: user425 +uid: uid425 +givenname: givenname425 +description: description425 +userPassword: password425 +mail: uid425 +uidnumber: 425 +gidnumber: 425 +homeDirectory: /home/uid425 + +dn: cn=user426,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user426 +sn: user426 +uid: uid426 +givenname: givenname426 +description: description426 +userPassword: password426 +mail: uid426 +uidnumber: 426 +gidnumber: 426 +homeDirectory: /home/uid426 + +dn: cn=user427,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user427 +sn: user427 +uid: uid427 +givenname: givenname427 +description: description427 +userPassword: password427 +mail: uid427 +uidnumber: 427 +gidnumber: 427 +homeDirectory: /home/uid427 + +dn: cn=user428,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user428 +sn: user428 +uid: uid428 +givenname: givenname428 +description: description428 +userPassword: password428 +mail: uid428 +uidnumber: 428 +gidnumber: 428 +homeDirectory: /home/uid428 + +dn: cn=user429,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user429 +sn: user429 +uid: uid429 +givenname: givenname429 +description: description429 +userPassword: password429 +mail: uid429 +uidnumber: 429 +gidnumber: 429 +homeDirectory: /home/uid429 + +dn: cn=user430,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user430 +sn: user430 +uid: uid430 +givenname: givenname430 +description: description430 +userPassword: password430 +mail: uid430 +uidnumber: 430 +gidnumber: 430 +homeDirectory: /home/uid430 + +dn: cn=user431,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user431 +sn: user431 +uid: uid431 +givenname: givenname431 +description: description431 +userPassword: password431 +mail: uid431 +uidnumber: 431 +gidnumber: 431 +homeDirectory: /home/uid431 + +dn: cn=user432,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user432 +sn: user432 +uid: uid432 +givenname: givenname432 +description: description432 +userPassword: password432 +mail: uid432 +uidnumber: 432 +gidnumber: 432 +homeDirectory: /home/uid432 + +dn: cn=user433,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user433 +sn: user433 +uid: uid433 +givenname: givenname433 +description: description433 +userPassword: password433 +mail: uid433 +uidnumber: 433 +gidnumber: 433 +homeDirectory: /home/uid433 + +dn: cn=user434,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user434 +sn: user434 +uid: uid434 +givenname: givenname434 +description: description434 +userPassword: password434 +mail: uid434 +uidnumber: 434 +gidnumber: 434 +homeDirectory: /home/uid434 + +dn: cn=user435,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user435 +sn: user435 +uid: uid435 +givenname: givenname435 +description: description435 +userPassword: password435 +mail: uid435 +uidnumber: 435 +gidnumber: 435 +homeDirectory: /home/uid435 + +dn: cn=user436,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user436 +sn: user436 +uid: uid436 +givenname: givenname436 +description: description436 +userPassword: password436 +mail: uid436 +uidnumber: 436 +gidnumber: 436 +homeDirectory: /home/uid436 + +dn: cn=user437,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user437 +sn: user437 +uid: uid437 +givenname: givenname437 +description: description437 +userPassword: password437 +mail: uid437 +uidnumber: 437 +gidnumber: 437 +homeDirectory: /home/uid437 + +dn: cn=user438,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user438 +sn: user438 +uid: uid438 +givenname: givenname438 +description: description438 +userPassword: password438 +mail: uid438 +uidnumber: 438 +gidnumber: 438 +homeDirectory: /home/uid438 + +dn: cn=user439,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user439 +sn: user439 +uid: uid439 +givenname: givenname439 +description: description439 +userPassword: password439 +mail: uid439 +uidnumber: 439 +gidnumber: 439 +homeDirectory: /home/uid439 + +dn: cn=user440,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user440 +sn: user440 +uid: uid440 +givenname: givenname440 +description: description440 +userPassword: password440 +mail: uid440 +uidnumber: 440 +gidnumber: 440 +homeDirectory: /home/uid440 + +dn: cn=user441,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user441 +sn: user441 +uid: uid441 +givenname: givenname441 +description: description441 +userPassword: password441 +mail: uid441 +uidnumber: 441 +gidnumber: 441 +homeDirectory: /home/uid441 + +dn: cn=user442,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user442 +sn: user442 +uid: uid442 +givenname: givenname442 +description: description442 +userPassword: password442 +mail: uid442 +uidnumber: 442 +gidnumber: 442 +homeDirectory: /home/uid442 + +dn: cn=user443,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user443 +sn: user443 +uid: uid443 +givenname: givenname443 +description: description443 +userPassword: password443 +mail: uid443 +uidnumber: 443 +gidnumber: 443 +homeDirectory: /home/uid443 + +dn: cn=user444,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user444 +sn: user444 +uid: uid444 +givenname: givenname444 +description: description444 +userPassword: password444 +mail: uid444 +uidnumber: 444 +gidnumber: 444 +homeDirectory: /home/uid444 + +dn: cn=user445,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user445 +sn: user445 +uid: uid445 +givenname: givenname445 +description: description445 +userPassword: password445 +mail: uid445 +uidnumber: 445 +gidnumber: 445 +homeDirectory: /home/uid445 + +dn: cn=user446,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user446 +sn: user446 +uid: uid446 +givenname: givenname446 +description: description446 +userPassword: password446 +mail: uid446 +uidnumber: 446 +gidnumber: 446 +homeDirectory: /home/uid446 + +dn: cn=user447,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user447 +sn: user447 +uid: uid447 +givenname: givenname447 +description: description447 +userPassword: password447 +mail: uid447 +uidnumber: 447 +gidnumber: 447 +homeDirectory: /home/uid447 + +dn: cn=user448,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user448 +sn: user448 +uid: uid448 +givenname: givenname448 +description: description448 +userPassword: password448 +mail: uid448 +uidnumber: 448 +gidnumber: 448 +homeDirectory: /home/uid448 + +dn: cn=user449,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user449 +sn: user449 +uid: uid449 +givenname: givenname449 +description: description449 +userPassword: password449 +mail: uid449 +uidnumber: 449 +gidnumber: 449 +homeDirectory: /home/uid449 + +dn: cn=user450,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user450 +sn: user450 +uid: uid450 +givenname: givenname450 +description: description450 +userPassword: password450 +mail: uid450 +uidnumber: 450 +gidnumber: 450 +homeDirectory: /home/uid450 + +dn: cn=user451,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user451 +sn: user451 +uid: uid451 +givenname: givenname451 +description: description451 +userPassword: password451 +mail: uid451 +uidnumber: 451 +gidnumber: 451 +homeDirectory: /home/uid451 + +dn: cn=user452,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user452 +sn: user452 +uid: uid452 +givenname: givenname452 +description: description452 +userPassword: password452 +mail: uid452 +uidnumber: 452 +gidnumber: 452 +homeDirectory: /home/uid452 + +dn: cn=user453,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user453 +sn: user453 +uid: uid453 +givenname: givenname453 +description: description453 +userPassword: password453 +mail: uid453 +uidnumber: 453 +gidnumber: 453 +homeDirectory: /home/uid453 + +dn: cn=user454,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user454 +sn: user454 +uid: uid454 +givenname: givenname454 +description: description454 +userPassword: password454 +mail: uid454 +uidnumber: 454 +gidnumber: 454 +homeDirectory: /home/uid454 + +dn: cn=user455,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user455 +sn: user455 +uid: uid455 +givenname: givenname455 +description: description455 +userPassword: password455 +mail: uid455 +uidnumber: 455 +gidnumber: 455 +homeDirectory: /home/uid455 + +dn: cn=user456,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user456 +sn: user456 +uid: uid456 +givenname: givenname456 +description: description456 +userPassword: password456 +mail: uid456 +uidnumber: 456 +gidnumber: 456 +homeDirectory: /home/uid456 + +dn: cn=user457,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user457 +sn: user457 +uid: uid457 +givenname: givenname457 +description: description457 +userPassword: password457 +mail: uid457 +uidnumber: 457 +gidnumber: 457 +homeDirectory: /home/uid457 + +dn: cn=user458,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user458 +sn: user458 +uid: uid458 +givenname: givenname458 +description: description458 +userPassword: password458 +mail: uid458 +uidnumber: 458 +gidnumber: 458 +homeDirectory: /home/uid458 + +dn: cn=user459,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user459 +sn: user459 +uid: uid459 +givenname: givenname459 +description: description459 +userPassword: password459 +mail: uid459 +uidnumber: 459 +gidnumber: 459 +homeDirectory: /home/uid459 + +dn: cn=user460,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user460 +sn: user460 +uid: uid460 +givenname: givenname460 +description: description460 +userPassword: password460 +mail: uid460 +uidnumber: 460 +gidnumber: 460 +homeDirectory: /home/uid460 + +dn: cn=user461,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user461 +sn: user461 +uid: uid461 +givenname: givenname461 +description: description461 +userPassword: password461 +mail: uid461 +uidnumber: 461 +gidnumber: 461 +homeDirectory: /home/uid461 + +dn: cn=user462,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user462 +sn: user462 +uid: uid462 +givenname: givenname462 +description: description462 +userPassword: password462 +mail: uid462 +uidnumber: 462 +gidnumber: 462 +homeDirectory: /home/uid462 + +dn: cn=user463,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user463 +sn: user463 +uid: uid463 +givenname: givenname463 +description: description463 +userPassword: password463 +mail: uid463 +uidnumber: 463 +gidnumber: 463 +homeDirectory: /home/uid463 + +dn: cn=user464,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user464 +sn: user464 +uid: uid464 +givenname: givenname464 +description: description464 +userPassword: password464 +mail: uid464 +uidnumber: 464 +gidnumber: 464 +homeDirectory: /home/uid464 + +dn: cn=user465,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user465 +sn: user465 +uid: uid465 +givenname: givenname465 +description: description465 +userPassword: password465 +mail: uid465 +uidnumber: 465 +gidnumber: 465 +homeDirectory: /home/uid465 + +dn: cn=user466,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user466 +sn: user466 +uid: uid466 +givenname: givenname466 +description: description466 +userPassword: password466 +mail: uid466 +uidnumber: 466 +gidnumber: 466 +homeDirectory: /home/uid466 + +dn: cn=user467,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user467 +sn: user467 +uid: uid467 +givenname: givenname467 +description: description467 +userPassword: password467 +mail: uid467 +uidnumber: 467 +gidnumber: 467 +homeDirectory: /home/uid467 + +dn: cn=user468,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user468 +sn: user468 +uid: uid468 +givenname: givenname468 +description: description468 +userPassword: password468 +mail: uid468 +uidnumber: 468 +gidnumber: 468 +homeDirectory: /home/uid468 + +dn: cn=user469,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user469 +sn: user469 +uid: uid469 +givenname: givenname469 +description: description469 +userPassword: password469 +mail: uid469 +uidnumber: 469 +gidnumber: 469 +homeDirectory: /home/uid469 + +dn: cn=user470,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user470 +sn: user470 +uid: uid470 +givenname: givenname470 +description: description470 +userPassword: password470 +mail: uid470 +uidnumber: 470 +gidnumber: 470 +homeDirectory: /home/uid470 + +dn: cn=user471,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user471 +sn: user471 +uid: uid471 +givenname: givenname471 +description: description471 +userPassword: password471 +mail: uid471 +uidnumber: 471 +gidnumber: 471 +homeDirectory: /home/uid471 + +dn: cn=user472,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user472 +sn: user472 +uid: uid472 +givenname: givenname472 +description: description472 +userPassword: password472 +mail: uid472 +uidnumber: 472 +gidnumber: 472 +homeDirectory: /home/uid472 + +dn: cn=user473,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user473 +sn: user473 +uid: uid473 +givenname: givenname473 +description: description473 +userPassword: password473 +mail: uid473 +uidnumber: 473 +gidnumber: 473 +homeDirectory: /home/uid473 + +dn: cn=user474,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user474 +sn: user474 +uid: uid474 +givenname: givenname474 +description: description474 +userPassword: password474 +mail: uid474 +uidnumber: 474 +gidnumber: 474 +homeDirectory: /home/uid474 + +dn: cn=user475,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user475 +sn: user475 +uid: uid475 +givenname: givenname475 +description: description475 +userPassword: password475 +mail: uid475 +uidnumber: 475 +gidnumber: 475 +homeDirectory: /home/uid475 + +dn: cn=user476,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user476 +sn: user476 +uid: uid476 +givenname: givenname476 +description: description476 +userPassword: password476 +mail: uid476 +uidnumber: 476 +gidnumber: 476 +homeDirectory: /home/uid476 + +dn: cn=user477,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user477 +sn: user477 +uid: uid477 +givenname: givenname477 +description: description477 +userPassword: password477 +mail: uid477 +uidnumber: 477 +gidnumber: 477 +homeDirectory: /home/uid477 + +dn: cn=user478,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user478 +sn: user478 +uid: uid478 +givenname: givenname478 +description: description478 +userPassword: password478 +mail: uid478 +uidnumber: 478 +gidnumber: 478 +homeDirectory: /home/uid478 + +dn: cn=user479,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user479 +sn: user479 +uid: uid479 +givenname: givenname479 +description: description479 +userPassword: password479 +mail: uid479 +uidnumber: 479 +gidnumber: 479 +homeDirectory: /home/uid479 + +dn: cn=user480,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user480 +sn: user480 +uid: uid480 +givenname: givenname480 +description: description480 +userPassword: password480 +mail: uid480 +uidnumber: 480 +gidnumber: 480 +homeDirectory: /home/uid480 + +dn: cn=user481,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user481 +sn: user481 +uid: uid481 +givenname: givenname481 +description: description481 +userPassword: password481 +mail: uid481 +uidnumber: 481 +gidnumber: 481 +homeDirectory: /home/uid481 + +dn: cn=user482,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user482 +sn: user482 +uid: uid482 +givenname: givenname482 +description: description482 +userPassword: password482 +mail: uid482 +uidnumber: 482 +gidnumber: 482 +homeDirectory: /home/uid482 + +dn: cn=user483,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user483 +sn: user483 +uid: uid483 +givenname: givenname483 +description: description483 +userPassword: password483 +mail: uid483 +uidnumber: 483 +gidnumber: 483 +homeDirectory: /home/uid483 + +dn: cn=user484,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user484 +sn: user484 +uid: uid484 +givenname: givenname484 +description: description484 +userPassword: password484 +mail: uid484 +uidnumber: 484 +gidnumber: 484 +homeDirectory: /home/uid484 + +dn: cn=user485,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user485 +sn: user485 +uid: uid485 +givenname: givenname485 +description: description485 +userPassword: password485 +mail: uid485 +uidnumber: 485 +gidnumber: 485 +homeDirectory: /home/uid485 + +dn: cn=user486,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user486 +sn: user486 +uid: uid486 +givenname: givenname486 +description: description486 +userPassword: password486 +mail: uid486 +uidnumber: 486 +gidnumber: 486 +homeDirectory: /home/uid486 + +dn: cn=user487,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user487 +sn: user487 +uid: uid487 +givenname: givenname487 +description: description487 +userPassword: password487 +mail: uid487 +uidnumber: 487 +gidnumber: 487 +homeDirectory: /home/uid487 + +dn: cn=user488,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user488 +sn: user488 +uid: uid488 +givenname: givenname488 +description: description488 +userPassword: password488 +mail: uid488 +uidnumber: 488 +gidnumber: 488 +homeDirectory: /home/uid488 + +dn: cn=user489,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user489 +sn: user489 +uid: uid489 +givenname: givenname489 +description: description489 +userPassword: password489 +mail: uid489 +uidnumber: 489 +gidnumber: 489 +homeDirectory: /home/uid489 + +dn: cn=user490,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user490 +sn: user490 +uid: uid490 +givenname: givenname490 +description: description490 +userPassword: password490 +mail: uid490 +uidnumber: 490 +gidnumber: 490 +homeDirectory: /home/uid490 + +dn: cn=user491,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user491 +sn: user491 +uid: uid491 +givenname: givenname491 +description: description491 +userPassword: password491 +mail: uid491 +uidnumber: 491 +gidnumber: 491 +homeDirectory: /home/uid491 + +dn: cn=user492,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user492 +sn: user492 +uid: uid492 +givenname: givenname492 +description: description492 +userPassword: password492 +mail: uid492 +uidnumber: 492 +gidnumber: 492 +homeDirectory: /home/uid492 + +dn: cn=user493,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user493 +sn: user493 +uid: uid493 +givenname: givenname493 +description: description493 +userPassword: password493 +mail: uid493 +uidnumber: 493 +gidnumber: 493 +homeDirectory: /home/uid493 + +dn: cn=user494,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user494 +sn: user494 +uid: uid494 +givenname: givenname494 +description: description494 +userPassword: password494 +mail: uid494 +uidnumber: 494 +gidnumber: 494 +homeDirectory: /home/uid494 + +dn: cn=user495,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user495 +sn: user495 +uid: uid495 +givenname: givenname495 +description: description495 +userPassword: password495 +mail: uid495 +uidnumber: 495 +gidnumber: 495 +homeDirectory: /home/uid495 + +dn: cn=user496,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user496 +sn: user496 +uid: uid496 +givenname: givenname496 +description: description496 +userPassword: password496 +mail: uid496 +uidnumber: 496 +gidnumber: 496 +homeDirectory: /home/uid496 + +dn: cn=user497,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user497 +sn: user497 +uid: uid497 +givenname: givenname497 +description: description497 +userPassword: password497 +mail: uid497 +uidnumber: 497 +gidnumber: 497 +homeDirectory: /home/uid497 + +dn: cn=user498,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user498 +sn: user498 +uid: uid498 +givenname: givenname498 +description: description498 +userPassword: password498 +mail: uid498 +uidnumber: 498 +gidnumber: 498 +homeDirectory: /home/uid498 + +dn: cn=user499,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user499 +sn: user499 +uid: uid499 +givenname: givenname499 +description: description499 +userPassword: password499 +mail: uid499 +uidnumber: 499 +gidnumber: 499 +homeDirectory: /home/uid499 + +dn: cn=user500,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user500 +sn: user500 +uid: uid500 +givenname: givenname500 +description: description500 +userPassword: password500 +mail: uid500 +uidnumber: 500 +gidnumber: 500 +homeDirectory: /home/uid500 + +dn: cn=user501,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user501 +sn: user501 +uid: uid501 +givenname: givenname501 +description: description501 +userPassword: password501 +mail: uid501 +uidnumber: 501 +gidnumber: 501 +homeDirectory: /home/uid501 + +dn: cn=user502,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user502 +sn: user502 +uid: uid502 +givenname: givenname502 +description: description502 +userPassword: password502 +mail: uid502 +uidnumber: 502 +gidnumber: 502 +homeDirectory: /home/uid502 + +dn: cn=user503,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user503 +sn: user503 +uid: uid503 +givenname: givenname503 +description: description503 +userPassword: password503 +mail: uid503 +uidnumber: 503 +gidnumber: 503 +homeDirectory: /home/uid503 + +dn: cn=user504,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user504 +sn: user504 +uid: uid504 +givenname: givenname504 +description: description504 +userPassword: password504 +mail: uid504 +uidnumber: 504 +gidnumber: 504 +homeDirectory: /home/uid504 + +dn: cn=user505,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user505 +sn: user505 +uid: uid505 +givenname: givenname505 +description: description505 +userPassword: password505 +mail: uid505 +uidnumber: 505 +gidnumber: 505 +homeDirectory: /home/uid505 + +dn: cn=user506,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user506 +sn: user506 +uid: uid506 +givenname: givenname506 +description: description506 +userPassword: password506 +mail: uid506 +uidnumber: 506 +gidnumber: 506 +homeDirectory: /home/uid506 + +dn: cn=user507,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user507 +sn: user507 +uid: uid507 +givenname: givenname507 +description: description507 +userPassword: password507 +mail: uid507 +uidnumber: 507 +gidnumber: 507 +homeDirectory: /home/uid507 + +dn: cn=user508,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user508 +sn: user508 +uid: uid508 +givenname: givenname508 +description: description508 +userPassword: password508 +mail: uid508 +uidnumber: 508 +gidnumber: 508 +homeDirectory: /home/uid508 + +dn: cn=user509,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user509 +sn: user509 +uid: uid509 +givenname: givenname509 +description: description509 +userPassword: password509 +mail: uid509 +uidnumber: 509 +gidnumber: 509 +homeDirectory: /home/uid509 + +dn: cn=user510,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user510 +sn: user510 +uid: uid510 +givenname: givenname510 +description: description510 +userPassword: password510 +mail: uid510 +uidnumber: 510 +gidnumber: 510 +homeDirectory: /home/uid510 + +dn: cn=user511,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user511 +sn: user511 +uid: uid511 +givenname: givenname511 +description: description511 +userPassword: password511 +mail: uid511 +uidnumber: 511 +gidnumber: 511 +homeDirectory: /home/uid511 + +dn: cn=user512,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user512 +sn: user512 +uid: uid512 +givenname: givenname512 +description: description512 +userPassword: password512 +mail: uid512 +uidnumber: 512 +gidnumber: 512 +homeDirectory: /home/uid512 + +dn: cn=user513,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user513 +sn: user513 +uid: uid513 +givenname: givenname513 +description: description513 +userPassword: password513 +mail: uid513 +uidnumber: 513 +gidnumber: 513 +homeDirectory: /home/uid513 + +dn: cn=user514,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user514 +sn: user514 +uid: uid514 +givenname: givenname514 +description: description514 +userPassword: password514 +mail: uid514 +uidnumber: 514 +gidnumber: 514 +homeDirectory: /home/uid514 + +dn: cn=user515,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user515 +sn: user515 +uid: uid515 +givenname: givenname515 +description: description515 +userPassword: password515 +mail: uid515 +uidnumber: 515 +gidnumber: 515 +homeDirectory: /home/uid515 + +dn: cn=user516,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user516 +sn: user516 +uid: uid516 +givenname: givenname516 +description: description516 +userPassword: password516 +mail: uid516 +uidnumber: 516 +gidnumber: 516 +homeDirectory: /home/uid516 + +dn: cn=user517,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user517 +sn: user517 +uid: uid517 +givenname: givenname517 +description: description517 +userPassword: password517 +mail: uid517 +uidnumber: 517 +gidnumber: 517 +homeDirectory: /home/uid517 + +dn: cn=user518,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user518 +sn: user518 +uid: uid518 +givenname: givenname518 +description: description518 +userPassword: password518 +mail: uid518 +uidnumber: 518 +gidnumber: 518 +homeDirectory: /home/uid518 + +dn: cn=user519,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user519 +sn: user519 +uid: uid519 +givenname: givenname519 +description: description519 +userPassword: password519 +mail: uid519 +uidnumber: 519 +gidnumber: 519 +homeDirectory: /home/uid519 + +dn: cn=user520,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user520 +sn: user520 +uid: uid520 +givenname: givenname520 +description: description520 +userPassword: password520 +mail: uid520 +uidnumber: 520 +gidnumber: 520 +homeDirectory: /home/uid520 + +dn: cn=user521,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user521 +sn: user521 +uid: uid521 +givenname: givenname521 +description: description521 +userPassword: password521 +mail: uid521 +uidnumber: 521 +gidnumber: 521 +homeDirectory: /home/uid521 + +dn: cn=user522,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user522 +sn: user522 +uid: uid522 +givenname: givenname522 +description: description522 +userPassword: password522 +mail: uid522 +uidnumber: 522 +gidnumber: 522 +homeDirectory: /home/uid522 + +dn: cn=user523,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user523 +sn: user523 +uid: uid523 +givenname: givenname523 +description: description523 +userPassword: password523 +mail: uid523 +uidnumber: 523 +gidnumber: 523 +homeDirectory: /home/uid523 + +dn: cn=user524,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user524 +sn: user524 +uid: uid524 +givenname: givenname524 +description: description524 +userPassword: password524 +mail: uid524 +uidnumber: 524 +gidnumber: 524 +homeDirectory: /home/uid524 + +dn: cn=user525,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user525 +sn: user525 +uid: uid525 +givenname: givenname525 +description: description525 +userPassword: password525 +mail: uid525 +uidnumber: 525 +gidnumber: 525 +homeDirectory: /home/uid525 + +dn: cn=user526,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user526 +sn: user526 +uid: uid526 +givenname: givenname526 +description: description526 +userPassword: password526 +mail: uid526 +uidnumber: 526 +gidnumber: 526 +homeDirectory: /home/uid526 + +dn: cn=user527,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user527 +sn: user527 +uid: uid527 +givenname: givenname527 +description: description527 +userPassword: password527 +mail: uid527 +uidnumber: 527 +gidnumber: 527 +homeDirectory: /home/uid527 + +dn: cn=user528,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user528 +sn: user528 +uid: uid528 +givenname: givenname528 +description: description528 +userPassword: password528 +mail: uid528 +uidnumber: 528 +gidnumber: 528 +homeDirectory: /home/uid528 + +dn: cn=user529,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user529 +sn: user529 +uid: uid529 +givenname: givenname529 +description: description529 +userPassword: password529 +mail: uid529 +uidnumber: 529 +gidnumber: 529 +homeDirectory: /home/uid529 + +dn: cn=user530,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user530 +sn: user530 +uid: uid530 +givenname: givenname530 +description: description530 +userPassword: password530 +mail: uid530 +uidnumber: 530 +gidnumber: 530 +homeDirectory: /home/uid530 + +dn: cn=user531,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user531 +sn: user531 +uid: uid531 +givenname: givenname531 +description: description531 +userPassword: password531 +mail: uid531 +uidnumber: 531 +gidnumber: 531 +homeDirectory: /home/uid531 + +dn: cn=user532,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user532 +sn: user532 +uid: uid532 +givenname: givenname532 +description: description532 +userPassword: password532 +mail: uid532 +uidnumber: 532 +gidnumber: 532 +homeDirectory: /home/uid532 + +dn: cn=user533,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user533 +sn: user533 +uid: uid533 +givenname: givenname533 +description: description533 +userPassword: password533 +mail: uid533 +uidnumber: 533 +gidnumber: 533 +homeDirectory: /home/uid533 + +dn: cn=user534,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user534 +sn: user534 +uid: uid534 +givenname: givenname534 +description: description534 +userPassword: password534 +mail: uid534 +uidnumber: 534 +gidnumber: 534 +homeDirectory: /home/uid534 + +dn: cn=user535,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user535 +sn: user535 +uid: uid535 +givenname: givenname535 +description: description535 +userPassword: password535 +mail: uid535 +uidnumber: 535 +gidnumber: 535 +homeDirectory: /home/uid535 + +dn: cn=user536,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user536 +sn: user536 +uid: uid536 +givenname: givenname536 +description: description536 +userPassword: password536 +mail: uid536 +uidnumber: 536 +gidnumber: 536 +homeDirectory: /home/uid536 + +dn: cn=user537,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user537 +sn: user537 +uid: uid537 +givenname: givenname537 +description: description537 +userPassword: password537 +mail: uid537 +uidnumber: 537 +gidnumber: 537 +homeDirectory: /home/uid537 + +dn: cn=user538,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user538 +sn: user538 +uid: uid538 +givenname: givenname538 +description: description538 +userPassword: password538 +mail: uid538 +uidnumber: 538 +gidnumber: 538 +homeDirectory: /home/uid538 + +dn: cn=user539,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user539 +sn: user539 +uid: uid539 +givenname: givenname539 +description: description539 +userPassword: password539 +mail: uid539 +uidnumber: 539 +gidnumber: 539 +homeDirectory: /home/uid539 + +dn: cn=user540,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user540 +sn: user540 +uid: uid540 +givenname: givenname540 +description: description540 +userPassword: password540 +mail: uid540 +uidnumber: 540 +gidnumber: 540 +homeDirectory: /home/uid540 + +dn: cn=user541,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user541 +sn: user541 +uid: uid541 +givenname: givenname541 +description: description541 +userPassword: password541 +mail: uid541 +uidnumber: 541 +gidnumber: 541 +homeDirectory: /home/uid541 + +dn: cn=user542,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user542 +sn: user542 +uid: uid542 +givenname: givenname542 +description: description542 +userPassword: password542 +mail: uid542 +uidnumber: 542 +gidnumber: 542 +homeDirectory: /home/uid542 + +dn: cn=user543,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user543 +sn: user543 +uid: uid543 +givenname: givenname543 +description: description543 +userPassword: password543 +mail: uid543 +uidnumber: 543 +gidnumber: 543 +homeDirectory: /home/uid543 + +dn: cn=user544,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user544 +sn: user544 +uid: uid544 +givenname: givenname544 +description: description544 +userPassword: password544 +mail: uid544 +uidnumber: 544 +gidnumber: 544 +homeDirectory: /home/uid544 + +dn: cn=user545,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user545 +sn: user545 +uid: uid545 +givenname: givenname545 +description: description545 +userPassword: password545 +mail: uid545 +uidnumber: 545 +gidnumber: 545 +homeDirectory: /home/uid545 + +dn: cn=user546,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user546 +sn: user546 +uid: uid546 +givenname: givenname546 +description: description546 +userPassword: password546 +mail: uid546 +uidnumber: 546 +gidnumber: 546 +homeDirectory: /home/uid546 + +dn: cn=user547,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user547 +sn: user547 +uid: uid547 +givenname: givenname547 +description: description547 +userPassword: password547 +mail: uid547 +uidnumber: 547 +gidnumber: 547 +homeDirectory: /home/uid547 + +dn: cn=user548,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user548 +sn: user548 +uid: uid548 +givenname: givenname548 +description: description548 +userPassword: password548 +mail: uid548 +uidnumber: 548 +gidnumber: 548 +homeDirectory: /home/uid548 + +dn: cn=user549,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user549 +sn: user549 +uid: uid549 +givenname: givenname549 +description: description549 +userPassword: password549 +mail: uid549 +uidnumber: 549 +gidnumber: 549 +homeDirectory: /home/uid549 + +dn: cn=user550,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user550 +sn: user550 +uid: uid550 +givenname: givenname550 +description: description550 +userPassword: password550 +mail: uid550 +uidnumber: 550 +gidnumber: 550 +homeDirectory: /home/uid550 + +dn: cn=user551,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user551 +sn: user551 +uid: uid551 +givenname: givenname551 +description: description551 +userPassword: password551 +mail: uid551 +uidnumber: 551 +gidnumber: 551 +homeDirectory: /home/uid551 + +dn: cn=user552,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user552 +sn: user552 +uid: uid552 +givenname: givenname552 +description: description552 +userPassword: password552 +mail: uid552 +uidnumber: 552 +gidnumber: 552 +homeDirectory: /home/uid552 + +dn: cn=user553,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user553 +sn: user553 +uid: uid553 +givenname: givenname553 +description: description553 +userPassword: password553 +mail: uid553 +uidnumber: 553 +gidnumber: 553 +homeDirectory: /home/uid553 + +dn: cn=user554,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user554 +sn: user554 +uid: uid554 +givenname: givenname554 +description: description554 +userPassword: password554 +mail: uid554 +uidnumber: 554 +gidnumber: 554 +homeDirectory: /home/uid554 + +dn: cn=user555,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user555 +sn: user555 +uid: uid555 +givenname: givenname555 +description: description555 +userPassword: password555 +mail: uid555 +uidnumber: 555 +gidnumber: 555 +homeDirectory: /home/uid555 + +dn: cn=user556,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user556 +sn: user556 +uid: uid556 +givenname: givenname556 +description: description556 +userPassword: password556 +mail: uid556 +uidnumber: 556 +gidnumber: 556 +homeDirectory: /home/uid556 + +dn: cn=user557,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user557 +sn: user557 +uid: uid557 +givenname: givenname557 +description: description557 +userPassword: password557 +mail: uid557 +uidnumber: 557 +gidnumber: 557 +homeDirectory: /home/uid557 + +dn: cn=user558,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user558 +sn: user558 +uid: uid558 +givenname: givenname558 +description: description558 +userPassword: password558 +mail: uid558 +uidnumber: 558 +gidnumber: 558 +homeDirectory: /home/uid558 + +dn: cn=user559,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user559 +sn: user559 +uid: uid559 +givenname: givenname559 +description: description559 +userPassword: password559 +mail: uid559 +uidnumber: 559 +gidnumber: 559 +homeDirectory: /home/uid559 + +dn: cn=user560,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user560 +sn: user560 +uid: uid560 +givenname: givenname560 +description: description560 +userPassword: password560 +mail: uid560 +uidnumber: 560 +gidnumber: 560 +homeDirectory: /home/uid560 + +dn: cn=user561,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user561 +sn: user561 +uid: uid561 +givenname: givenname561 +description: description561 +userPassword: password561 +mail: uid561 +uidnumber: 561 +gidnumber: 561 +homeDirectory: /home/uid561 + +dn: cn=user562,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user562 +sn: user562 +uid: uid562 +givenname: givenname562 +description: description562 +userPassword: password562 +mail: uid562 +uidnumber: 562 +gidnumber: 562 +homeDirectory: /home/uid562 + +dn: cn=user563,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user563 +sn: user563 +uid: uid563 +givenname: givenname563 +description: description563 +userPassword: password563 +mail: uid563 +uidnumber: 563 +gidnumber: 563 +homeDirectory: /home/uid563 + +dn: cn=user564,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user564 +sn: user564 +uid: uid564 +givenname: givenname564 +description: description564 +userPassword: password564 +mail: uid564 +uidnumber: 564 +gidnumber: 564 +homeDirectory: /home/uid564 + +dn: cn=user565,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user565 +sn: user565 +uid: uid565 +givenname: givenname565 +description: description565 +userPassword: password565 +mail: uid565 +uidnumber: 565 +gidnumber: 565 +homeDirectory: /home/uid565 + +dn: cn=user566,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user566 +sn: user566 +uid: uid566 +givenname: givenname566 +description: description566 +userPassword: password566 +mail: uid566 +uidnumber: 566 +gidnumber: 566 +homeDirectory: /home/uid566 + +dn: cn=user567,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user567 +sn: user567 +uid: uid567 +givenname: givenname567 +description: description567 +userPassword: password567 +mail: uid567 +uidnumber: 567 +gidnumber: 567 +homeDirectory: /home/uid567 + +dn: cn=user568,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user568 +sn: user568 +uid: uid568 +givenname: givenname568 +description: description568 +userPassword: password568 +mail: uid568 +uidnumber: 568 +gidnumber: 568 +homeDirectory: /home/uid568 + +dn: cn=user569,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user569 +sn: user569 +uid: uid569 +givenname: givenname569 +description: description569 +userPassword: password569 +mail: uid569 +uidnumber: 569 +gidnumber: 569 +homeDirectory: /home/uid569 + +dn: cn=user570,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user570 +sn: user570 +uid: uid570 +givenname: givenname570 +description: description570 +userPassword: password570 +mail: uid570 +uidnumber: 570 +gidnumber: 570 +homeDirectory: /home/uid570 + +dn: cn=user571,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user571 +sn: user571 +uid: uid571 +givenname: givenname571 +description: description571 +userPassword: password571 +mail: uid571 +uidnumber: 571 +gidnumber: 571 +homeDirectory: /home/uid571 + +dn: cn=user572,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user572 +sn: user572 +uid: uid572 +givenname: givenname572 +description: description572 +userPassword: password572 +mail: uid572 +uidnumber: 572 +gidnumber: 572 +homeDirectory: /home/uid572 + +dn: cn=user573,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user573 +sn: user573 +uid: uid573 +givenname: givenname573 +description: description573 +userPassword: password573 +mail: uid573 +uidnumber: 573 +gidnumber: 573 +homeDirectory: /home/uid573 + +dn: cn=user574,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user574 +sn: user574 +uid: uid574 +givenname: givenname574 +description: description574 +userPassword: password574 +mail: uid574 +uidnumber: 574 +gidnumber: 574 +homeDirectory: /home/uid574 + +dn: cn=user575,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user575 +sn: user575 +uid: uid575 +givenname: givenname575 +description: description575 +userPassword: password575 +mail: uid575 +uidnumber: 575 +gidnumber: 575 +homeDirectory: /home/uid575 + +dn: cn=user576,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user576 +sn: user576 +uid: uid576 +givenname: givenname576 +description: description576 +userPassword: password576 +mail: uid576 +uidnumber: 576 +gidnumber: 576 +homeDirectory: /home/uid576 + +dn: cn=user577,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user577 +sn: user577 +uid: uid577 +givenname: givenname577 +description: description577 +userPassword: password577 +mail: uid577 +uidnumber: 577 +gidnumber: 577 +homeDirectory: /home/uid577 + +dn: cn=user578,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user578 +sn: user578 +uid: uid578 +givenname: givenname578 +description: description578 +userPassword: password578 +mail: uid578 +uidnumber: 578 +gidnumber: 578 +homeDirectory: /home/uid578 + +dn: cn=user579,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user579 +sn: user579 +uid: uid579 +givenname: givenname579 +description: description579 +userPassword: password579 +mail: uid579 +uidnumber: 579 +gidnumber: 579 +homeDirectory: /home/uid579 + +dn: cn=user580,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user580 +sn: user580 +uid: uid580 +givenname: givenname580 +description: description580 +userPassword: password580 +mail: uid580 +uidnumber: 580 +gidnumber: 580 +homeDirectory: /home/uid580 + +dn: cn=user581,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user581 +sn: user581 +uid: uid581 +givenname: givenname581 +description: description581 +userPassword: password581 +mail: uid581 +uidnumber: 581 +gidnumber: 581 +homeDirectory: /home/uid581 + +dn: cn=user582,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user582 +sn: user582 +uid: uid582 +givenname: givenname582 +description: description582 +userPassword: password582 +mail: uid582 +uidnumber: 582 +gidnumber: 582 +homeDirectory: /home/uid582 + +dn: cn=user583,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user583 +sn: user583 +uid: uid583 +givenname: givenname583 +description: description583 +userPassword: password583 +mail: uid583 +uidnumber: 583 +gidnumber: 583 +homeDirectory: /home/uid583 + +dn: cn=user584,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user584 +sn: user584 +uid: uid584 +givenname: givenname584 +description: description584 +userPassword: password584 +mail: uid584 +uidnumber: 584 +gidnumber: 584 +homeDirectory: /home/uid584 + +dn: cn=user585,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user585 +sn: user585 +uid: uid585 +givenname: givenname585 +description: description585 +userPassword: password585 +mail: uid585 +uidnumber: 585 +gidnumber: 585 +homeDirectory: /home/uid585 + +dn: cn=user586,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user586 +sn: user586 +uid: uid586 +givenname: givenname586 +description: description586 +userPassword: password586 +mail: uid586 +uidnumber: 586 +gidnumber: 586 +homeDirectory: /home/uid586 + +dn: cn=user587,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user587 +sn: user587 +uid: uid587 +givenname: givenname587 +description: description587 +userPassword: password587 +mail: uid587 +uidnumber: 587 +gidnumber: 587 +homeDirectory: /home/uid587 + +dn: cn=user588,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user588 +sn: user588 +uid: uid588 +givenname: givenname588 +description: description588 +userPassword: password588 +mail: uid588 +uidnumber: 588 +gidnumber: 588 +homeDirectory: /home/uid588 + +dn: cn=user589,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user589 +sn: user589 +uid: uid589 +givenname: givenname589 +description: description589 +userPassword: password589 +mail: uid589 +uidnumber: 589 +gidnumber: 589 +homeDirectory: /home/uid589 + +dn: cn=user590,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user590 +sn: user590 +uid: uid590 +givenname: givenname590 +description: description590 +userPassword: password590 +mail: uid590 +uidnumber: 590 +gidnumber: 590 +homeDirectory: /home/uid590 + +dn: cn=user591,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user591 +sn: user591 +uid: uid591 +givenname: givenname591 +description: description591 +userPassword: password591 +mail: uid591 +uidnumber: 591 +gidnumber: 591 +homeDirectory: /home/uid591 + +dn: cn=user592,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user592 +sn: user592 +uid: uid592 +givenname: givenname592 +description: description592 +userPassword: password592 +mail: uid592 +uidnumber: 592 +gidnumber: 592 +homeDirectory: /home/uid592 + +dn: cn=user593,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user593 +sn: user593 +uid: uid593 +givenname: givenname593 +description: description593 +userPassword: password593 +mail: uid593 +uidnumber: 593 +gidnumber: 593 +homeDirectory: /home/uid593 + +dn: cn=user594,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user594 +sn: user594 +uid: uid594 +givenname: givenname594 +description: description594 +userPassword: password594 +mail: uid594 +uidnumber: 594 +gidnumber: 594 +homeDirectory: /home/uid594 + +dn: cn=user595,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user595 +sn: user595 +uid: uid595 +givenname: givenname595 +description: description595 +userPassword: password595 +mail: uid595 +uidnumber: 595 +gidnumber: 595 +homeDirectory: /home/uid595 + +dn: cn=user596,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user596 +sn: user596 +uid: uid596 +givenname: givenname596 +description: description596 +userPassword: password596 +mail: uid596 +uidnumber: 596 +gidnumber: 596 +homeDirectory: /home/uid596 + +dn: cn=user597,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user597 +sn: user597 +uid: uid597 +givenname: givenname597 +description: description597 +userPassword: password597 +mail: uid597 +uidnumber: 597 +gidnumber: 597 +homeDirectory: /home/uid597 + +dn: cn=user598,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user598 +sn: user598 +uid: uid598 +givenname: givenname598 +description: description598 +userPassword: password598 +mail: uid598 +uidnumber: 598 +gidnumber: 598 +homeDirectory: /home/uid598 + +dn: cn=user599,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user599 +sn: user599 +uid: uid599 +givenname: givenname599 +description: description599 +userPassword: password599 +mail: uid599 +uidnumber: 599 +gidnumber: 599 +homeDirectory: /home/uid599 + +dn: cn=user600,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user600 +sn: user600 +uid: uid600 +givenname: givenname600 +description: description600 +userPassword: password600 +mail: uid600 +uidnumber: 600 +gidnumber: 600 +homeDirectory: /home/uid600 + +dn: cn=user601,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user601 +sn: user601 +uid: uid601 +givenname: givenname601 +description: description601 +userPassword: password601 +mail: uid601 +uidnumber: 601 +gidnumber: 601 +homeDirectory: /home/uid601 + +dn: cn=user602,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user602 +sn: user602 +uid: uid602 +givenname: givenname602 +description: description602 +userPassword: password602 +mail: uid602 +uidnumber: 602 +gidnumber: 602 +homeDirectory: /home/uid602 + +dn: cn=user603,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user603 +sn: user603 +uid: uid603 +givenname: givenname603 +description: description603 +userPassword: password603 +mail: uid603 +uidnumber: 603 +gidnumber: 603 +homeDirectory: /home/uid603 + +dn: cn=user604,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user604 +sn: user604 +uid: uid604 +givenname: givenname604 +description: description604 +userPassword: password604 +mail: uid604 +uidnumber: 604 +gidnumber: 604 +homeDirectory: /home/uid604 + +dn: cn=user605,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user605 +sn: user605 +uid: uid605 +givenname: givenname605 +description: description605 +userPassword: password605 +mail: uid605 +uidnumber: 605 +gidnumber: 605 +homeDirectory: /home/uid605 + +dn: cn=user606,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user606 +sn: user606 +uid: uid606 +givenname: givenname606 +description: description606 +userPassword: password606 +mail: uid606 +uidnumber: 606 +gidnumber: 606 +homeDirectory: /home/uid606 + +dn: cn=user607,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user607 +sn: user607 +uid: uid607 +givenname: givenname607 +description: description607 +userPassword: password607 +mail: uid607 +uidnumber: 607 +gidnumber: 607 +homeDirectory: /home/uid607 + +dn: cn=user608,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user608 +sn: user608 +uid: uid608 +givenname: givenname608 +description: description608 +userPassword: password608 +mail: uid608 +uidnumber: 608 +gidnumber: 608 +homeDirectory: /home/uid608 + +dn: cn=user609,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user609 +sn: user609 +uid: uid609 +givenname: givenname609 +description: description609 +userPassword: password609 +mail: uid609 +uidnumber: 609 +gidnumber: 609 +homeDirectory: /home/uid609 + +dn: cn=user610,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user610 +sn: user610 +uid: uid610 +givenname: givenname610 +description: description610 +userPassword: password610 +mail: uid610 +uidnumber: 610 +gidnumber: 610 +homeDirectory: /home/uid610 + +dn: cn=user611,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user611 +sn: user611 +uid: uid611 +givenname: givenname611 +description: description611 +userPassword: password611 +mail: uid611 +uidnumber: 611 +gidnumber: 611 +homeDirectory: /home/uid611 + +dn: cn=user612,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user612 +sn: user612 +uid: uid612 +givenname: givenname612 +description: description612 +userPassword: password612 +mail: uid612 +uidnumber: 612 +gidnumber: 612 +homeDirectory: /home/uid612 + +dn: cn=user613,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user613 +sn: user613 +uid: uid613 +givenname: givenname613 +description: description613 +userPassword: password613 +mail: uid613 +uidnumber: 613 +gidnumber: 613 +homeDirectory: /home/uid613 + +dn: cn=user614,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user614 +sn: user614 +uid: uid614 +givenname: givenname614 +description: description614 +userPassword: password614 +mail: uid614 +uidnumber: 614 +gidnumber: 614 +homeDirectory: /home/uid614 + +dn: cn=user615,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user615 +sn: user615 +uid: uid615 +givenname: givenname615 +description: description615 +userPassword: password615 +mail: uid615 +uidnumber: 615 +gidnumber: 615 +homeDirectory: /home/uid615 + +dn: cn=user616,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user616 +sn: user616 +uid: uid616 +givenname: givenname616 +description: description616 +userPassword: password616 +mail: uid616 +uidnumber: 616 +gidnumber: 616 +homeDirectory: /home/uid616 + +dn: cn=user617,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user617 +sn: user617 +uid: uid617 +givenname: givenname617 +description: description617 +userPassword: password617 +mail: uid617 +uidnumber: 617 +gidnumber: 617 +homeDirectory: /home/uid617 + +dn: cn=user618,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user618 +sn: user618 +uid: uid618 +givenname: givenname618 +description: description618 +userPassword: password618 +mail: uid618 +uidnumber: 618 +gidnumber: 618 +homeDirectory: /home/uid618 + +dn: cn=user619,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user619 +sn: user619 +uid: uid619 +givenname: givenname619 +description: description619 +userPassword: password619 +mail: uid619 +uidnumber: 619 +gidnumber: 619 +homeDirectory: /home/uid619 + +dn: cn=user620,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user620 +sn: user620 +uid: uid620 +givenname: givenname620 +description: description620 +userPassword: password620 +mail: uid620 +uidnumber: 620 +gidnumber: 620 +homeDirectory: /home/uid620 + +dn: cn=user621,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user621 +sn: user621 +uid: uid621 +givenname: givenname621 +description: description621 +userPassword: password621 +mail: uid621 +uidnumber: 621 +gidnumber: 621 +homeDirectory: /home/uid621 + +dn: cn=user622,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user622 +sn: user622 +uid: uid622 +givenname: givenname622 +description: description622 +userPassword: password622 +mail: uid622 +uidnumber: 622 +gidnumber: 622 +homeDirectory: /home/uid622 + +dn: cn=user623,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user623 +sn: user623 +uid: uid623 +givenname: givenname623 +description: description623 +userPassword: password623 +mail: uid623 +uidnumber: 623 +gidnumber: 623 +homeDirectory: /home/uid623 + +dn: cn=user624,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user624 +sn: user624 +uid: uid624 +givenname: givenname624 +description: description624 +userPassword: password624 +mail: uid624 +uidnumber: 624 +gidnumber: 624 +homeDirectory: /home/uid624 + +dn: cn=user625,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user625 +sn: user625 +uid: uid625 +givenname: givenname625 +description: description625 +userPassword: password625 +mail: uid625 +uidnumber: 625 +gidnumber: 625 +homeDirectory: /home/uid625 + +dn: cn=user626,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user626 +sn: user626 +uid: uid626 +givenname: givenname626 +description: description626 +userPassword: password626 +mail: uid626 +uidnumber: 626 +gidnumber: 626 +homeDirectory: /home/uid626 + +dn: cn=user627,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user627 +sn: user627 +uid: uid627 +givenname: givenname627 +description: description627 +userPassword: password627 +mail: uid627 +uidnumber: 627 +gidnumber: 627 +homeDirectory: /home/uid627 + +dn: cn=user628,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user628 +sn: user628 +uid: uid628 +givenname: givenname628 +description: description628 +userPassword: password628 +mail: uid628 +uidnumber: 628 +gidnumber: 628 +homeDirectory: /home/uid628 + +dn: cn=user629,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user629 +sn: user629 +uid: uid629 +givenname: givenname629 +description: description629 +userPassword: password629 +mail: uid629 +uidnumber: 629 +gidnumber: 629 +homeDirectory: /home/uid629 + +dn: cn=user630,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user630 +sn: user630 +uid: uid630 +givenname: givenname630 +description: description630 +userPassword: password630 +mail: uid630 +uidnumber: 630 +gidnumber: 630 +homeDirectory: /home/uid630 + +dn: cn=user631,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user631 +sn: user631 +uid: uid631 +givenname: givenname631 +description: description631 +userPassword: password631 +mail: uid631 +uidnumber: 631 +gidnumber: 631 +homeDirectory: /home/uid631 + +dn: cn=user632,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user632 +sn: user632 +uid: uid632 +givenname: givenname632 +description: description632 +userPassword: password632 +mail: uid632 +uidnumber: 632 +gidnumber: 632 +homeDirectory: /home/uid632 + +dn: cn=user633,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user633 +sn: user633 +uid: uid633 +givenname: givenname633 +description: description633 +userPassword: password633 +mail: uid633 +uidnumber: 633 +gidnumber: 633 +homeDirectory: /home/uid633 + +dn: cn=user634,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user634 +sn: user634 +uid: uid634 +givenname: givenname634 +description: description634 +userPassword: password634 +mail: uid634 +uidnumber: 634 +gidnumber: 634 +homeDirectory: /home/uid634 + +dn: cn=user635,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user635 +sn: user635 +uid: uid635 +givenname: givenname635 +description: description635 +userPassword: password635 +mail: uid635 +uidnumber: 635 +gidnumber: 635 +homeDirectory: /home/uid635 + +dn: cn=user636,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user636 +sn: user636 +uid: uid636 +givenname: givenname636 +description: description636 +userPassword: password636 +mail: uid636 +uidnumber: 636 +gidnumber: 636 +homeDirectory: /home/uid636 + +dn: cn=user637,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user637 +sn: user637 +uid: uid637 +givenname: givenname637 +description: description637 +userPassword: password637 +mail: uid637 +uidnumber: 637 +gidnumber: 637 +homeDirectory: /home/uid637 + +dn: cn=user638,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user638 +sn: user638 +uid: uid638 +givenname: givenname638 +description: description638 +userPassword: password638 +mail: uid638 +uidnumber: 638 +gidnumber: 638 +homeDirectory: /home/uid638 + +dn: cn=user639,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user639 +sn: user639 +uid: uid639 +givenname: givenname639 +description: description639 +userPassword: password639 +mail: uid639 +uidnumber: 639 +gidnumber: 639 +homeDirectory: /home/uid639 + +dn: cn=user640,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user640 +sn: user640 +uid: uid640 +givenname: givenname640 +description: description640 +userPassword: password640 +mail: uid640 +uidnumber: 640 +gidnumber: 640 +homeDirectory: /home/uid640 + +dn: cn=user641,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user641 +sn: user641 +uid: uid641 +givenname: givenname641 +description: description641 +userPassword: password641 +mail: uid641 +uidnumber: 641 +gidnumber: 641 +homeDirectory: /home/uid641 + +dn: cn=user642,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user642 +sn: user642 +uid: uid642 +givenname: givenname642 +description: description642 +userPassword: password642 +mail: uid642 +uidnumber: 642 +gidnumber: 642 +homeDirectory: /home/uid642 + +dn: cn=user643,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user643 +sn: user643 +uid: uid643 +givenname: givenname643 +description: description643 +userPassword: password643 +mail: uid643 +uidnumber: 643 +gidnumber: 643 +homeDirectory: /home/uid643 + +dn: cn=user644,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user644 +sn: user644 +uid: uid644 +givenname: givenname644 +description: description644 +userPassword: password644 +mail: uid644 +uidnumber: 644 +gidnumber: 644 +homeDirectory: /home/uid644 + +dn: cn=user645,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user645 +sn: user645 +uid: uid645 +givenname: givenname645 +description: description645 +userPassword: password645 +mail: uid645 +uidnumber: 645 +gidnumber: 645 +homeDirectory: /home/uid645 + +dn: cn=user646,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user646 +sn: user646 +uid: uid646 +givenname: givenname646 +description: description646 +userPassword: password646 +mail: uid646 +uidnumber: 646 +gidnumber: 646 +homeDirectory: /home/uid646 + +dn: cn=user647,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user647 +sn: user647 +uid: uid647 +givenname: givenname647 +description: description647 +userPassword: password647 +mail: uid647 +uidnumber: 647 +gidnumber: 647 +homeDirectory: /home/uid647 + +dn: cn=user648,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user648 +sn: user648 +uid: uid648 +givenname: givenname648 +description: description648 +userPassword: password648 +mail: uid648 +uidnumber: 648 +gidnumber: 648 +homeDirectory: /home/uid648 + +dn: cn=user649,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user649 +sn: user649 +uid: uid649 +givenname: givenname649 +description: description649 +userPassword: password649 +mail: uid649 +uidnumber: 649 +gidnumber: 649 +homeDirectory: /home/uid649 + +dn: cn=user650,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user650 +sn: user650 +uid: uid650 +givenname: givenname650 +description: description650 +userPassword: password650 +mail: uid650 +uidnumber: 650 +gidnumber: 650 +homeDirectory: /home/uid650 + +dn: cn=user651,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user651 +sn: user651 +uid: uid651 +givenname: givenname651 +description: description651 +userPassword: password651 +mail: uid651 +uidnumber: 651 +gidnumber: 651 +homeDirectory: /home/uid651 + +dn: cn=user652,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user652 +sn: user652 +uid: uid652 +givenname: givenname652 +description: description652 +userPassword: password652 +mail: uid652 +uidnumber: 652 +gidnumber: 652 +homeDirectory: /home/uid652 + +dn: cn=user653,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user653 +sn: user653 +uid: uid653 +givenname: givenname653 +description: description653 +userPassword: password653 +mail: uid653 +uidnumber: 653 +gidnumber: 653 +homeDirectory: /home/uid653 + +dn: cn=user654,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user654 +sn: user654 +uid: uid654 +givenname: givenname654 +description: description654 +userPassword: password654 +mail: uid654 +uidnumber: 654 +gidnumber: 654 +homeDirectory: /home/uid654 + +dn: cn=user655,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user655 +sn: user655 +uid: uid655 +givenname: givenname655 +description: description655 +userPassword: password655 +mail: uid655 +uidnumber: 655 +gidnumber: 655 +homeDirectory: /home/uid655 + +dn: cn=user656,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user656 +sn: user656 +uid: uid656 +givenname: givenname656 +description: description656 +userPassword: password656 +mail: uid656 +uidnumber: 656 +gidnumber: 656 +homeDirectory: /home/uid656 + +dn: cn=user657,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user657 +sn: user657 +uid: uid657 +givenname: givenname657 +description: description657 +userPassword: password657 +mail: uid657 +uidnumber: 657 +gidnumber: 657 +homeDirectory: /home/uid657 + +dn: cn=user658,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user658 +sn: user658 +uid: uid658 +givenname: givenname658 +description: description658 +userPassword: password658 +mail: uid658 +uidnumber: 658 +gidnumber: 658 +homeDirectory: /home/uid658 + +dn: cn=user659,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user659 +sn: user659 +uid: uid659 +givenname: givenname659 +description: description659 +userPassword: password659 +mail: uid659 +uidnumber: 659 +gidnumber: 659 +homeDirectory: /home/uid659 + +dn: cn=user660,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user660 +sn: user660 +uid: uid660 +givenname: givenname660 +description: description660 +userPassword: password660 +mail: uid660 +uidnumber: 660 +gidnumber: 660 +homeDirectory: /home/uid660 + +dn: cn=user661,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user661 +sn: user661 +uid: uid661 +givenname: givenname661 +description: description661 +userPassword: password661 +mail: uid661 +uidnumber: 661 +gidnumber: 661 +homeDirectory: /home/uid661 + +dn: cn=user662,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user662 +sn: user662 +uid: uid662 +givenname: givenname662 +description: description662 +userPassword: password662 +mail: uid662 +uidnumber: 662 +gidnumber: 662 +homeDirectory: /home/uid662 + +dn: cn=user663,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user663 +sn: user663 +uid: uid663 +givenname: givenname663 +description: description663 +userPassword: password663 +mail: uid663 +uidnumber: 663 +gidnumber: 663 +homeDirectory: /home/uid663 + +dn: cn=user664,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user664 +sn: user664 +uid: uid664 +givenname: givenname664 +description: description664 +userPassword: password664 +mail: uid664 +uidnumber: 664 +gidnumber: 664 +homeDirectory: /home/uid664 + +dn: cn=user665,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user665 +sn: user665 +uid: uid665 +givenname: givenname665 +description: description665 +userPassword: password665 +mail: uid665 +uidnumber: 665 +gidnumber: 665 +homeDirectory: /home/uid665 + +dn: cn=user666,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user666 +sn: user666 +uid: uid666 +givenname: givenname666 +description: description666 +userPassword: password666 +mail: uid666 +uidnumber: 666 +gidnumber: 666 +homeDirectory: /home/uid666 + +dn: cn=user667,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user667 +sn: user667 +uid: uid667 +givenname: givenname667 +description: description667 +userPassword: password667 +mail: uid667 +uidnumber: 667 +gidnumber: 667 +homeDirectory: /home/uid667 + +dn: cn=user668,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user668 +sn: user668 +uid: uid668 +givenname: givenname668 +description: description668 +userPassword: password668 +mail: uid668 +uidnumber: 668 +gidnumber: 668 +homeDirectory: /home/uid668 + +dn: cn=user669,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user669 +sn: user669 +uid: uid669 +givenname: givenname669 +description: description669 +userPassword: password669 +mail: uid669 +uidnumber: 669 +gidnumber: 669 +homeDirectory: /home/uid669 + +dn: cn=user670,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user670 +sn: user670 +uid: uid670 +givenname: givenname670 +description: description670 +userPassword: password670 +mail: uid670 +uidnumber: 670 +gidnumber: 670 +homeDirectory: /home/uid670 + +dn: cn=user671,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user671 +sn: user671 +uid: uid671 +givenname: givenname671 +description: description671 +userPassword: password671 +mail: uid671 +uidnumber: 671 +gidnumber: 671 +homeDirectory: /home/uid671 + +dn: cn=user672,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user672 +sn: user672 +uid: uid672 +givenname: givenname672 +description: description672 +userPassword: password672 +mail: uid672 +uidnumber: 672 +gidnumber: 672 +homeDirectory: /home/uid672 + +dn: cn=user673,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user673 +sn: user673 +uid: uid673 +givenname: givenname673 +description: description673 +userPassword: password673 +mail: uid673 +uidnumber: 673 +gidnumber: 673 +homeDirectory: /home/uid673 + +dn: cn=user674,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user674 +sn: user674 +uid: uid674 +givenname: givenname674 +description: description674 +userPassword: password674 +mail: uid674 +uidnumber: 674 +gidnumber: 674 +homeDirectory: /home/uid674 + +dn: cn=user675,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user675 +sn: user675 +uid: uid675 +givenname: givenname675 +description: description675 +userPassword: password675 +mail: uid675 +uidnumber: 675 +gidnumber: 675 +homeDirectory: /home/uid675 + +dn: cn=user676,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user676 +sn: user676 +uid: uid676 +givenname: givenname676 +description: description676 +userPassword: password676 +mail: uid676 +uidnumber: 676 +gidnumber: 676 +homeDirectory: /home/uid676 + +dn: cn=user677,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user677 +sn: user677 +uid: uid677 +givenname: givenname677 +description: description677 +userPassword: password677 +mail: uid677 +uidnumber: 677 +gidnumber: 677 +homeDirectory: /home/uid677 + +dn: cn=user678,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user678 +sn: user678 +uid: uid678 +givenname: givenname678 +description: description678 +userPassword: password678 +mail: uid678 +uidnumber: 678 +gidnumber: 678 +homeDirectory: /home/uid678 + +dn: cn=user679,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user679 +sn: user679 +uid: uid679 +givenname: givenname679 +description: description679 +userPassword: password679 +mail: uid679 +uidnumber: 679 +gidnumber: 679 +homeDirectory: /home/uid679 + +dn: cn=user680,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user680 +sn: user680 +uid: uid680 +givenname: givenname680 +description: description680 +userPassword: password680 +mail: uid680 +uidnumber: 680 +gidnumber: 680 +homeDirectory: /home/uid680 + +dn: cn=user681,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user681 +sn: user681 +uid: uid681 +givenname: givenname681 +description: description681 +userPassword: password681 +mail: uid681 +uidnumber: 681 +gidnumber: 681 +homeDirectory: /home/uid681 + +dn: cn=user682,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user682 +sn: user682 +uid: uid682 +givenname: givenname682 +description: description682 +userPassword: password682 +mail: uid682 +uidnumber: 682 +gidnumber: 682 +homeDirectory: /home/uid682 + +dn: cn=user683,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user683 +sn: user683 +uid: uid683 +givenname: givenname683 +description: description683 +userPassword: password683 +mail: uid683 +uidnumber: 683 +gidnumber: 683 +homeDirectory: /home/uid683 + +dn: cn=user684,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user684 +sn: user684 +uid: uid684 +givenname: givenname684 +description: description684 +userPassword: password684 +mail: uid684 +uidnumber: 684 +gidnumber: 684 +homeDirectory: /home/uid684 + +dn: cn=user685,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user685 +sn: user685 +uid: uid685 +givenname: givenname685 +description: description685 +userPassword: password685 +mail: uid685 +uidnumber: 685 +gidnumber: 685 +homeDirectory: /home/uid685 + +dn: cn=user686,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user686 +sn: user686 +uid: uid686 +givenname: givenname686 +description: description686 +userPassword: password686 +mail: uid686 +uidnumber: 686 +gidnumber: 686 +homeDirectory: /home/uid686 + +dn: cn=user687,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user687 +sn: user687 +uid: uid687 +givenname: givenname687 +description: description687 +userPassword: password687 +mail: uid687 +uidnumber: 687 +gidnumber: 687 +homeDirectory: /home/uid687 + +dn: cn=user688,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user688 +sn: user688 +uid: uid688 +givenname: givenname688 +description: description688 +userPassword: password688 +mail: uid688 +uidnumber: 688 +gidnumber: 688 +homeDirectory: /home/uid688 + +dn: cn=user689,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user689 +sn: user689 +uid: uid689 +givenname: givenname689 +description: description689 +userPassword: password689 +mail: uid689 +uidnumber: 689 +gidnumber: 689 +homeDirectory: /home/uid689 + +dn: cn=user690,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user690 +sn: user690 +uid: uid690 +givenname: givenname690 +description: description690 +userPassword: password690 +mail: uid690 +uidnumber: 690 +gidnumber: 690 +homeDirectory: /home/uid690 + +dn: cn=user691,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user691 +sn: user691 +uid: uid691 +givenname: givenname691 +description: description691 +userPassword: password691 +mail: uid691 +uidnumber: 691 +gidnumber: 691 +homeDirectory: /home/uid691 + +dn: cn=user692,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user692 +sn: user692 +uid: uid692 +givenname: givenname692 +description: description692 +userPassword: password692 +mail: uid692 +uidnumber: 692 +gidnumber: 692 +homeDirectory: /home/uid692 + +dn: cn=user693,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user693 +sn: user693 +uid: uid693 +givenname: givenname693 +description: description693 +userPassword: password693 +mail: uid693 +uidnumber: 693 +gidnumber: 693 +homeDirectory: /home/uid693 + +dn: cn=user694,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user694 +sn: user694 +uid: uid694 +givenname: givenname694 +description: description694 +userPassword: password694 +mail: uid694 +uidnumber: 694 +gidnumber: 694 +homeDirectory: /home/uid694 + +dn: cn=user695,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user695 +sn: user695 +uid: uid695 +givenname: givenname695 +description: description695 +userPassword: password695 +mail: uid695 +uidnumber: 695 +gidnumber: 695 +homeDirectory: /home/uid695 + +dn: cn=user696,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user696 +sn: user696 +uid: uid696 +givenname: givenname696 +description: description696 +userPassword: password696 +mail: uid696 +uidnumber: 696 +gidnumber: 696 +homeDirectory: /home/uid696 + +dn: cn=user697,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user697 +sn: user697 +uid: uid697 +givenname: givenname697 +description: description697 +userPassword: password697 +mail: uid697 +uidnumber: 697 +gidnumber: 697 +homeDirectory: /home/uid697 + +dn: cn=user698,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user698 +sn: user698 +uid: uid698 +givenname: givenname698 +description: description698 +userPassword: password698 +mail: uid698 +uidnumber: 698 +gidnumber: 698 +homeDirectory: /home/uid698 + +dn: cn=user699,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user699 +sn: user699 +uid: uid699 +givenname: givenname699 +description: description699 +userPassword: password699 +mail: uid699 +uidnumber: 699 +gidnumber: 699 +homeDirectory: /home/uid699 + +dn: cn=user700,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user700 +sn: user700 +uid: uid700 +givenname: givenname700 +description: description700 +userPassword: password700 +mail: uid700 +uidnumber: 700 +gidnumber: 700 +homeDirectory: /home/uid700 + +dn: cn=user701,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user701 +sn: user701 +uid: uid701 +givenname: givenname701 +description: description701 +userPassword: password701 +mail: uid701 +uidnumber: 701 +gidnumber: 701 +homeDirectory: /home/uid701 + +dn: cn=user702,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user702 +sn: user702 +uid: uid702 +givenname: givenname702 +description: description702 +userPassword: password702 +mail: uid702 +uidnumber: 702 +gidnumber: 702 +homeDirectory: /home/uid702 + +dn: cn=user703,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user703 +sn: user703 +uid: uid703 +givenname: givenname703 +description: description703 +userPassword: password703 +mail: uid703 +uidnumber: 703 +gidnumber: 703 +homeDirectory: /home/uid703 + +dn: cn=user704,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user704 +sn: user704 +uid: uid704 +givenname: givenname704 +description: description704 +userPassword: password704 +mail: uid704 +uidnumber: 704 +gidnumber: 704 +homeDirectory: /home/uid704 + +dn: cn=user705,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user705 +sn: user705 +uid: uid705 +givenname: givenname705 +description: description705 +userPassword: password705 +mail: uid705 +uidnumber: 705 +gidnumber: 705 +homeDirectory: /home/uid705 + +dn: cn=user706,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user706 +sn: user706 +uid: uid706 +givenname: givenname706 +description: description706 +userPassword: password706 +mail: uid706 +uidnumber: 706 +gidnumber: 706 +homeDirectory: /home/uid706 + +dn: cn=user707,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user707 +sn: user707 +uid: uid707 +givenname: givenname707 +description: description707 +userPassword: password707 +mail: uid707 +uidnumber: 707 +gidnumber: 707 +homeDirectory: /home/uid707 + +dn: cn=user708,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user708 +sn: user708 +uid: uid708 +givenname: givenname708 +description: description708 +userPassword: password708 +mail: uid708 +uidnumber: 708 +gidnumber: 708 +homeDirectory: /home/uid708 + +dn: cn=user709,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user709 +sn: user709 +uid: uid709 +givenname: givenname709 +description: description709 +userPassword: password709 +mail: uid709 +uidnumber: 709 +gidnumber: 709 +homeDirectory: /home/uid709 + +dn: cn=user710,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user710 +sn: user710 +uid: uid710 +givenname: givenname710 +description: description710 +userPassword: password710 +mail: uid710 +uidnumber: 710 +gidnumber: 710 +homeDirectory: /home/uid710 + +dn: cn=user711,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user711 +sn: user711 +uid: uid711 +givenname: givenname711 +description: description711 +userPassword: password711 +mail: uid711 +uidnumber: 711 +gidnumber: 711 +homeDirectory: /home/uid711 + +dn: cn=user712,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user712 +sn: user712 +uid: uid712 +givenname: givenname712 +description: description712 +userPassword: password712 +mail: uid712 +uidnumber: 712 +gidnumber: 712 +homeDirectory: /home/uid712 + +dn: cn=user713,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user713 +sn: user713 +uid: uid713 +givenname: givenname713 +description: description713 +userPassword: password713 +mail: uid713 +uidnumber: 713 +gidnumber: 713 +homeDirectory: /home/uid713 + +dn: cn=user714,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user714 +sn: user714 +uid: uid714 +givenname: givenname714 +description: description714 +userPassword: password714 +mail: uid714 +uidnumber: 714 +gidnumber: 714 +homeDirectory: /home/uid714 + +dn: cn=user715,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user715 +sn: user715 +uid: uid715 +givenname: givenname715 +description: description715 +userPassword: password715 +mail: uid715 +uidnumber: 715 +gidnumber: 715 +homeDirectory: /home/uid715 + +dn: cn=user716,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user716 +sn: user716 +uid: uid716 +givenname: givenname716 +description: description716 +userPassword: password716 +mail: uid716 +uidnumber: 716 +gidnumber: 716 +homeDirectory: /home/uid716 + +dn: cn=user717,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user717 +sn: user717 +uid: uid717 +givenname: givenname717 +description: description717 +userPassword: password717 +mail: uid717 +uidnumber: 717 +gidnumber: 717 +homeDirectory: /home/uid717 + +dn: cn=user718,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user718 +sn: user718 +uid: uid718 +givenname: givenname718 +description: description718 +userPassword: password718 +mail: uid718 +uidnumber: 718 +gidnumber: 718 +homeDirectory: /home/uid718 + +dn: cn=user719,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user719 +sn: user719 +uid: uid719 +givenname: givenname719 +description: description719 +userPassword: password719 +mail: uid719 +uidnumber: 719 +gidnumber: 719 +homeDirectory: /home/uid719 + +dn: cn=user720,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user720 +sn: user720 +uid: uid720 +givenname: givenname720 +description: description720 +userPassword: password720 +mail: uid720 +uidnumber: 720 +gidnumber: 720 +homeDirectory: /home/uid720 + +dn: cn=user721,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user721 +sn: user721 +uid: uid721 +givenname: givenname721 +description: description721 +userPassword: password721 +mail: uid721 +uidnumber: 721 +gidnumber: 721 +homeDirectory: /home/uid721 + +dn: cn=user722,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user722 +sn: user722 +uid: uid722 +givenname: givenname722 +description: description722 +userPassword: password722 +mail: uid722 +uidnumber: 722 +gidnumber: 722 +homeDirectory: /home/uid722 + +dn: cn=user723,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user723 +sn: user723 +uid: uid723 +givenname: givenname723 +description: description723 +userPassword: password723 +mail: uid723 +uidnumber: 723 +gidnumber: 723 +homeDirectory: /home/uid723 + +dn: cn=user724,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user724 +sn: user724 +uid: uid724 +givenname: givenname724 +description: description724 +userPassword: password724 +mail: uid724 +uidnumber: 724 +gidnumber: 724 +homeDirectory: /home/uid724 + +dn: cn=user725,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user725 +sn: user725 +uid: uid725 +givenname: givenname725 +description: description725 +userPassword: password725 +mail: uid725 +uidnumber: 725 +gidnumber: 725 +homeDirectory: /home/uid725 + +dn: cn=user726,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user726 +sn: user726 +uid: uid726 +givenname: givenname726 +description: description726 +userPassword: password726 +mail: uid726 +uidnumber: 726 +gidnumber: 726 +homeDirectory: /home/uid726 + +dn: cn=user727,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user727 +sn: user727 +uid: uid727 +givenname: givenname727 +description: description727 +userPassword: password727 +mail: uid727 +uidnumber: 727 +gidnumber: 727 +homeDirectory: /home/uid727 + +dn: cn=user728,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user728 +sn: user728 +uid: uid728 +givenname: givenname728 +description: description728 +userPassword: password728 +mail: uid728 +uidnumber: 728 +gidnumber: 728 +homeDirectory: /home/uid728 + +dn: cn=user729,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user729 +sn: user729 +uid: uid729 +givenname: givenname729 +description: description729 +userPassword: password729 +mail: uid729 +uidnumber: 729 +gidnumber: 729 +homeDirectory: /home/uid729 + +dn: cn=user730,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user730 +sn: user730 +uid: uid730 +givenname: givenname730 +description: description730 +userPassword: password730 +mail: uid730 +uidnumber: 730 +gidnumber: 730 +homeDirectory: /home/uid730 + +dn: cn=user731,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user731 +sn: user731 +uid: uid731 +givenname: givenname731 +description: description731 +userPassword: password731 +mail: uid731 +uidnumber: 731 +gidnumber: 731 +homeDirectory: /home/uid731 + +dn: cn=user732,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user732 +sn: user732 +uid: uid732 +givenname: givenname732 +description: description732 +userPassword: password732 +mail: uid732 +uidnumber: 732 +gidnumber: 732 +homeDirectory: /home/uid732 + +dn: cn=user733,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user733 +sn: user733 +uid: uid733 +givenname: givenname733 +description: description733 +userPassword: password733 +mail: uid733 +uidnumber: 733 +gidnumber: 733 +homeDirectory: /home/uid733 + +dn: cn=user734,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user734 +sn: user734 +uid: uid734 +givenname: givenname734 +description: description734 +userPassword: password734 +mail: uid734 +uidnumber: 734 +gidnumber: 734 +homeDirectory: /home/uid734 + +dn: cn=user735,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user735 +sn: user735 +uid: uid735 +givenname: givenname735 +description: description735 +userPassword: password735 +mail: uid735 +uidnumber: 735 +gidnumber: 735 +homeDirectory: /home/uid735 + +dn: cn=user736,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user736 +sn: user736 +uid: uid736 +givenname: givenname736 +description: description736 +userPassword: password736 +mail: uid736 +uidnumber: 736 +gidnumber: 736 +homeDirectory: /home/uid736 + +dn: cn=user737,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user737 +sn: user737 +uid: uid737 +givenname: givenname737 +description: description737 +userPassword: password737 +mail: uid737 +uidnumber: 737 +gidnumber: 737 +homeDirectory: /home/uid737 + +dn: cn=user738,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user738 +sn: user738 +uid: uid738 +givenname: givenname738 +description: description738 +userPassword: password738 +mail: uid738 +uidnumber: 738 +gidnumber: 738 +homeDirectory: /home/uid738 + +dn: cn=user739,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user739 +sn: user739 +uid: uid739 +givenname: givenname739 +description: description739 +userPassword: password739 +mail: uid739 +uidnumber: 739 +gidnumber: 739 +homeDirectory: /home/uid739 + +dn: cn=user740,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user740 +sn: user740 +uid: uid740 +givenname: givenname740 +description: description740 +userPassword: password740 +mail: uid740 +uidnumber: 740 +gidnumber: 740 +homeDirectory: /home/uid740 + +dn: cn=user741,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user741 +sn: user741 +uid: uid741 +givenname: givenname741 +description: description741 +userPassword: password741 +mail: uid741 +uidnumber: 741 +gidnumber: 741 +homeDirectory: /home/uid741 + +dn: cn=user742,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user742 +sn: user742 +uid: uid742 +givenname: givenname742 +description: description742 +userPassword: password742 +mail: uid742 +uidnumber: 742 +gidnumber: 742 +homeDirectory: /home/uid742 + +dn: cn=user743,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user743 +sn: user743 +uid: uid743 +givenname: givenname743 +description: description743 +userPassword: password743 +mail: uid743 +uidnumber: 743 +gidnumber: 743 +homeDirectory: /home/uid743 + +dn: cn=user744,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user744 +sn: user744 +uid: uid744 +givenname: givenname744 +description: description744 +userPassword: password744 +mail: uid744 +uidnumber: 744 +gidnumber: 744 +homeDirectory: /home/uid744 + +dn: cn=user745,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user745 +sn: user745 +uid: uid745 +givenname: givenname745 +description: description745 +userPassword: password745 +mail: uid745 +uidnumber: 745 +gidnumber: 745 +homeDirectory: /home/uid745 + +dn: cn=user746,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user746 +sn: user746 +uid: uid746 +givenname: givenname746 +description: description746 +userPassword: password746 +mail: uid746 +uidnumber: 746 +gidnumber: 746 +homeDirectory: /home/uid746 + +dn: cn=user747,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user747 +sn: user747 +uid: uid747 +givenname: givenname747 +description: description747 +userPassword: password747 +mail: uid747 +uidnumber: 747 +gidnumber: 747 +homeDirectory: /home/uid747 + +dn: cn=user748,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user748 +sn: user748 +uid: uid748 +givenname: givenname748 +description: description748 +userPassword: password748 +mail: uid748 +uidnumber: 748 +gidnumber: 748 +homeDirectory: /home/uid748 + +dn: cn=user749,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user749 +sn: user749 +uid: uid749 +givenname: givenname749 +description: description749 +userPassword: password749 +mail: uid749 +uidnumber: 749 +gidnumber: 749 +homeDirectory: /home/uid749 + +dn: cn=user750,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user750 +sn: user750 +uid: uid750 +givenname: givenname750 +description: description750 +userPassword: password750 +mail: uid750 +uidnumber: 750 +gidnumber: 750 +homeDirectory: /home/uid750 + +dn: cn=user751,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user751 +sn: user751 +uid: uid751 +givenname: givenname751 +description: description751 +userPassword: password751 +mail: uid751 +uidnumber: 751 +gidnumber: 751 +homeDirectory: /home/uid751 + +dn: cn=user752,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user752 +sn: user752 +uid: uid752 +givenname: givenname752 +description: description752 +userPassword: password752 +mail: uid752 +uidnumber: 752 +gidnumber: 752 +homeDirectory: /home/uid752 + +dn: cn=user753,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user753 +sn: user753 +uid: uid753 +givenname: givenname753 +description: description753 +userPassword: password753 +mail: uid753 +uidnumber: 753 +gidnumber: 753 +homeDirectory: /home/uid753 + +dn: cn=user754,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user754 +sn: user754 +uid: uid754 +givenname: givenname754 +description: description754 +userPassword: password754 +mail: uid754 +uidnumber: 754 +gidnumber: 754 +homeDirectory: /home/uid754 + +dn: cn=user755,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user755 +sn: user755 +uid: uid755 +givenname: givenname755 +description: description755 +userPassword: password755 +mail: uid755 +uidnumber: 755 +gidnumber: 755 +homeDirectory: /home/uid755 + +dn: cn=user756,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user756 +sn: user756 +uid: uid756 +givenname: givenname756 +description: description756 +userPassword: password756 +mail: uid756 +uidnumber: 756 +gidnumber: 756 +homeDirectory: /home/uid756 + +dn: cn=user757,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user757 +sn: user757 +uid: uid757 +givenname: givenname757 +description: description757 +userPassword: password757 +mail: uid757 +uidnumber: 757 +gidnumber: 757 +homeDirectory: /home/uid757 + +dn: cn=user758,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user758 +sn: user758 +uid: uid758 +givenname: givenname758 +description: description758 +userPassword: password758 +mail: uid758 +uidnumber: 758 +gidnumber: 758 +homeDirectory: /home/uid758 + +dn: cn=user759,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user759 +sn: user759 +uid: uid759 +givenname: givenname759 +description: description759 +userPassword: password759 +mail: uid759 +uidnumber: 759 +gidnumber: 759 +homeDirectory: /home/uid759 + +dn: cn=user760,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user760 +sn: user760 +uid: uid760 +givenname: givenname760 +description: description760 +userPassword: password760 +mail: uid760 +uidnumber: 760 +gidnumber: 760 +homeDirectory: /home/uid760 + +dn: cn=user761,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user761 +sn: user761 +uid: uid761 +givenname: givenname761 +description: description761 +userPassword: password761 +mail: uid761 +uidnumber: 761 +gidnumber: 761 +homeDirectory: /home/uid761 + +dn: cn=user762,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user762 +sn: user762 +uid: uid762 +givenname: givenname762 +description: description762 +userPassword: password762 +mail: uid762 +uidnumber: 762 +gidnumber: 762 +homeDirectory: /home/uid762 + +dn: cn=user763,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user763 +sn: user763 +uid: uid763 +givenname: givenname763 +description: description763 +userPassword: password763 +mail: uid763 +uidnumber: 763 +gidnumber: 763 +homeDirectory: /home/uid763 + +dn: cn=user764,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user764 +sn: user764 +uid: uid764 +givenname: givenname764 +description: description764 +userPassword: password764 +mail: uid764 +uidnumber: 764 +gidnumber: 764 +homeDirectory: /home/uid764 + +dn: cn=user765,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user765 +sn: user765 +uid: uid765 +givenname: givenname765 +description: description765 +userPassword: password765 +mail: uid765 +uidnumber: 765 +gidnumber: 765 +homeDirectory: /home/uid765 + +dn: cn=user766,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user766 +sn: user766 +uid: uid766 +givenname: givenname766 +description: description766 +userPassword: password766 +mail: uid766 +uidnumber: 766 +gidnumber: 766 +homeDirectory: /home/uid766 + +dn: cn=user767,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user767 +sn: user767 +uid: uid767 +givenname: givenname767 +description: description767 +userPassword: password767 +mail: uid767 +uidnumber: 767 +gidnumber: 767 +homeDirectory: /home/uid767 + +dn: cn=user768,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user768 +sn: user768 +uid: uid768 +givenname: givenname768 +description: description768 +userPassword: password768 +mail: uid768 +uidnumber: 768 +gidnumber: 768 +homeDirectory: /home/uid768 + +dn: cn=user769,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user769 +sn: user769 +uid: uid769 +givenname: givenname769 +description: description769 +userPassword: password769 +mail: uid769 +uidnumber: 769 +gidnumber: 769 +homeDirectory: /home/uid769 + +dn: cn=user770,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user770 +sn: user770 +uid: uid770 +givenname: givenname770 +description: description770 +userPassword: password770 +mail: uid770 +uidnumber: 770 +gidnumber: 770 +homeDirectory: /home/uid770 + +dn: cn=user771,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user771 +sn: user771 +uid: uid771 +givenname: givenname771 +description: description771 +userPassword: password771 +mail: uid771 +uidnumber: 771 +gidnumber: 771 +homeDirectory: /home/uid771 + +dn: cn=user772,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user772 +sn: user772 +uid: uid772 +givenname: givenname772 +description: description772 +userPassword: password772 +mail: uid772 +uidnumber: 772 +gidnumber: 772 +homeDirectory: /home/uid772 + +dn: cn=user773,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user773 +sn: user773 +uid: uid773 +givenname: givenname773 +description: description773 +userPassword: password773 +mail: uid773 +uidnumber: 773 +gidnumber: 773 +homeDirectory: /home/uid773 + +dn: cn=user774,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user774 +sn: user774 +uid: uid774 +givenname: givenname774 +description: description774 +userPassword: password774 +mail: uid774 +uidnumber: 774 +gidnumber: 774 +homeDirectory: /home/uid774 + +dn: cn=user775,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user775 +sn: user775 +uid: uid775 +givenname: givenname775 +description: description775 +userPassword: password775 +mail: uid775 +uidnumber: 775 +gidnumber: 775 +homeDirectory: /home/uid775 + +dn: cn=user776,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user776 +sn: user776 +uid: uid776 +givenname: givenname776 +description: description776 +userPassword: password776 +mail: uid776 +uidnumber: 776 +gidnumber: 776 +homeDirectory: /home/uid776 + +dn: cn=user777,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user777 +sn: user777 +uid: uid777 +givenname: givenname777 +description: description777 +userPassword: password777 +mail: uid777 +uidnumber: 777 +gidnumber: 777 +homeDirectory: /home/uid777 + +dn: cn=user778,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user778 +sn: user778 +uid: uid778 +givenname: givenname778 +description: description778 +userPassword: password778 +mail: uid778 +uidnumber: 778 +gidnumber: 778 +homeDirectory: /home/uid778 + +dn: cn=user779,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user779 +sn: user779 +uid: uid779 +givenname: givenname779 +description: description779 +userPassword: password779 +mail: uid779 +uidnumber: 779 +gidnumber: 779 +homeDirectory: /home/uid779 + +dn: cn=user780,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user780 +sn: user780 +uid: uid780 +givenname: givenname780 +description: description780 +userPassword: password780 +mail: uid780 +uidnumber: 780 +gidnumber: 780 +homeDirectory: /home/uid780 + +dn: cn=user781,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user781 +sn: user781 +uid: uid781 +givenname: givenname781 +description: description781 +userPassword: password781 +mail: uid781 +uidnumber: 781 +gidnumber: 781 +homeDirectory: /home/uid781 + +dn: cn=user782,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user782 +sn: user782 +uid: uid782 +givenname: givenname782 +description: description782 +userPassword: password782 +mail: uid782 +uidnumber: 782 +gidnumber: 782 +homeDirectory: /home/uid782 + +dn: cn=user783,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user783 +sn: user783 +uid: uid783 +givenname: givenname783 +description: description783 +userPassword: password783 +mail: uid783 +uidnumber: 783 +gidnumber: 783 +homeDirectory: /home/uid783 + +dn: cn=user784,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user784 +sn: user784 +uid: uid784 +givenname: givenname784 +description: description784 +userPassword: password784 +mail: uid784 +uidnumber: 784 +gidnumber: 784 +homeDirectory: /home/uid784 + +dn: cn=user785,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user785 +sn: user785 +uid: uid785 +givenname: givenname785 +description: description785 +userPassword: password785 +mail: uid785 +uidnumber: 785 +gidnumber: 785 +homeDirectory: /home/uid785 + +dn: cn=user786,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user786 +sn: user786 +uid: uid786 +givenname: givenname786 +description: description786 +userPassword: password786 +mail: uid786 +uidnumber: 786 +gidnumber: 786 +homeDirectory: /home/uid786 + +dn: cn=user787,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user787 +sn: user787 +uid: uid787 +givenname: givenname787 +description: description787 +userPassword: password787 +mail: uid787 +uidnumber: 787 +gidnumber: 787 +homeDirectory: /home/uid787 + +dn: cn=user788,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user788 +sn: user788 +uid: uid788 +givenname: givenname788 +description: description788 +userPassword: password788 +mail: uid788 +uidnumber: 788 +gidnumber: 788 +homeDirectory: /home/uid788 + +dn: cn=user789,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user789 +sn: user789 +uid: uid789 +givenname: givenname789 +description: description789 +userPassword: password789 +mail: uid789 +uidnumber: 789 +gidnumber: 789 +homeDirectory: /home/uid789 + +dn: cn=user790,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user790 +sn: user790 +uid: uid790 +givenname: givenname790 +description: description790 +userPassword: password790 +mail: uid790 +uidnumber: 790 +gidnumber: 790 +homeDirectory: /home/uid790 + +dn: cn=user791,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user791 +sn: user791 +uid: uid791 +givenname: givenname791 +description: description791 +userPassword: password791 +mail: uid791 +uidnumber: 791 +gidnumber: 791 +homeDirectory: /home/uid791 + +dn: cn=user792,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user792 +sn: user792 +uid: uid792 +givenname: givenname792 +description: description792 +userPassword: password792 +mail: uid792 +uidnumber: 792 +gidnumber: 792 +homeDirectory: /home/uid792 + +dn: cn=user793,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user793 +sn: user793 +uid: uid793 +givenname: givenname793 +description: description793 +userPassword: password793 +mail: uid793 +uidnumber: 793 +gidnumber: 793 +homeDirectory: /home/uid793 + +dn: cn=user794,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user794 +sn: user794 +uid: uid794 +givenname: givenname794 +description: description794 +userPassword: password794 +mail: uid794 +uidnumber: 794 +gidnumber: 794 +homeDirectory: /home/uid794 + +dn: cn=user795,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user795 +sn: user795 +uid: uid795 +givenname: givenname795 +description: description795 +userPassword: password795 +mail: uid795 +uidnumber: 795 +gidnumber: 795 +homeDirectory: /home/uid795 + +dn: cn=user796,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user796 +sn: user796 +uid: uid796 +givenname: givenname796 +description: description796 +userPassword: password796 +mail: uid796 +uidnumber: 796 +gidnumber: 796 +homeDirectory: /home/uid796 + +dn: cn=user797,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user797 +sn: user797 +uid: uid797 +givenname: givenname797 +description: description797 +userPassword: password797 +mail: uid797 +uidnumber: 797 +gidnumber: 797 +homeDirectory: /home/uid797 + +dn: cn=user798,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user798 +sn: user798 +uid: uid798 +givenname: givenname798 +description: description798 +userPassword: password798 +mail: uid798 +uidnumber: 798 +gidnumber: 798 +homeDirectory: /home/uid798 + +dn: cn=user799,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user799 +sn: user799 +uid: uid799 +givenname: givenname799 +description: description799 +userPassword: password799 +mail: uid799 +uidnumber: 799 +gidnumber: 799 +homeDirectory: /home/uid799 + +dn: cn=user800,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user800 +sn: user800 +uid: uid800 +givenname: givenname800 +description: description800 +userPassword: password800 +mail: uid800 +uidnumber: 800 +gidnumber: 800 +homeDirectory: /home/uid800 + +dn: cn=user801,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user801 +sn: user801 +uid: uid801 +givenname: givenname801 +description: description801 +userPassword: password801 +mail: uid801 +uidnumber: 801 +gidnumber: 801 +homeDirectory: /home/uid801 + +dn: cn=user802,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user802 +sn: user802 +uid: uid802 +givenname: givenname802 +description: description802 +userPassword: password802 +mail: uid802 +uidnumber: 802 +gidnumber: 802 +homeDirectory: /home/uid802 + +dn: cn=user803,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user803 +sn: user803 +uid: uid803 +givenname: givenname803 +description: description803 +userPassword: password803 +mail: uid803 +uidnumber: 803 +gidnumber: 803 +homeDirectory: /home/uid803 + +dn: cn=user804,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user804 +sn: user804 +uid: uid804 +givenname: givenname804 +description: description804 +userPassword: password804 +mail: uid804 +uidnumber: 804 +gidnumber: 804 +homeDirectory: /home/uid804 + +dn: cn=user805,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user805 +sn: user805 +uid: uid805 +givenname: givenname805 +description: description805 +userPassword: password805 +mail: uid805 +uidnumber: 805 +gidnumber: 805 +homeDirectory: /home/uid805 + +dn: cn=user806,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user806 +sn: user806 +uid: uid806 +givenname: givenname806 +description: description806 +userPassword: password806 +mail: uid806 +uidnumber: 806 +gidnumber: 806 +homeDirectory: /home/uid806 + +dn: cn=user807,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user807 +sn: user807 +uid: uid807 +givenname: givenname807 +description: description807 +userPassword: password807 +mail: uid807 +uidnumber: 807 +gidnumber: 807 +homeDirectory: /home/uid807 + +dn: cn=user808,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user808 +sn: user808 +uid: uid808 +givenname: givenname808 +description: description808 +userPassword: password808 +mail: uid808 +uidnumber: 808 +gidnumber: 808 +homeDirectory: /home/uid808 + +dn: cn=user809,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user809 +sn: user809 +uid: uid809 +givenname: givenname809 +description: description809 +userPassword: password809 +mail: uid809 +uidnumber: 809 +gidnumber: 809 +homeDirectory: /home/uid809 + +dn: cn=user810,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user810 +sn: user810 +uid: uid810 +givenname: givenname810 +description: description810 +userPassword: password810 +mail: uid810 +uidnumber: 810 +gidnumber: 810 +homeDirectory: /home/uid810 + +dn: cn=user811,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user811 +sn: user811 +uid: uid811 +givenname: givenname811 +description: description811 +userPassword: password811 +mail: uid811 +uidnumber: 811 +gidnumber: 811 +homeDirectory: /home/uid811 + +dn: cn=user812,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user812 +sn: user812 +uid: uid812 +givenname: givenname812 +description: description812 +userPassword: password812 +mail: uid812 +uidnumber: 812 +gidnumber: 812 +homeDirectory: /home/uid812 + +dn: cn=user813,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user813 +sn: user813 +uid: uid813 +givenname: givenname813 +description: description813 +userPassword: password813 +mail: uid813 +uidnumber: 813 +gidnumber: 813 +homeDirectory: /home/uid813 + +dn: cn=user814,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user814 +sn: user814 +uid: uid814 +givenname: givenname814 +description: description814 +userPassword: password814 +mail: uid814 +uidnumber: 814 +gidnumber: 814 +homeDirectory: /home/uid814 + +dn: cn=user815,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user815 +sn: user815 +uid: uid815 +givenname: givenname815 +description: description815 +userPassword: password815 +mail: uid815 +uidnumber: 815 +gidnumber: 815 +homeDirectory: /home/uid815 + +dn: cn=user816,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user816 +sn: user816 +uid: uid816 +givenname: givenname816 +description: description816 +userPassword: password816 +mail: uid816 +uidnumber: 816 +gidnumber: 816 +homeDirectory: /home/uid816 + +dn: cn=user817,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user817 +sn: user817 +uid: uid817 +givenname: givenname817 +description: description817 +userPassword: password817 +mail: uid817 +uidnumber: 817 +gidnumber: 817 +homeDirectory: /home/uid817 + +dn: cn=user818,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user818 +sn: user818 +uid: uid818 +givenname: givenname818 +description: description818 +userPassword: password818 +mail: uid818 +uidnumber: 818 +gidnumber: 818 +homeDirectory: /home/uid818 + +dn: cn=user819,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user819 +sn: user819 +uid: uid819 +givenname: givenname819 +description: description819 +userPassword: password819 +mail: uid819 +uidnumber: 819 +gidnumber: 819 +homeDirectory: /home/uid819 + +dn: cn=user820,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user820 +sn: user820 +uid: uid820 +givenname: givenname820 +description: description820 +userPassword: password820 +mail: uid820 +uidnumber: 820 +gidnumber: 820 +homeDirectory: /home/uid820 + +dn: cn=user821,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user821 +sn: user821 +uid: uid821 +givenname: givenname821 +description: description821 +userPassword: password821 +mail: uid821 +uidnumber: 821 +gidnumber: 821 +homeDirectory: /home/uid821 + +dn: cn=user822,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user822 +sn: user822 +uid: uid822 +givenname: givenname822 +description: description822 +userPassword: password822 +mail: uid822 +uidnumber: 822 +gidnumber: 822 +homeDirectory: /home/uid822 + +dn: cn=user823,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user823 +sn: user823 +uid: uid823 +givenname: givenname823 +description: description823 +userPassword: password823 +mail: uid823 +uidnumber: 823 +gidnumber: 823 +homeDirectory: /home/uid823 + +dn: cn=user824,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user824 +sn: user824 +uid: uid824 +givenname: givenname824 +description: description824 +userPassword: password824 +mail: uid824 +uidnumber: 824 +gidnumber: 824 +homeDirectory: /home/uid824 + +dn: cn=user825,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user825 +sn: user825 +uid: uid825 +givenname: givenname825 +description: description825 +userPassword: password825 +mail: uid825 +uidnumber: 825 +gidnumber: 825 +homeDirectory: /home/uid825 + +dn: cn=user826,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user826 +sn: user826 +uid: uid826 +givenname: givenname826 +description: description826 +userPassword: password826 +mail: uid826 +uidnumber: 826 +gidnumber: 826 +homeDirectory: /home/uid826 + +dn: cn=user827,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user827 +sn: user827 +uid: uid827 +givenname: givenname827 +description: description827 +userPassword: password827 +mail: uid827 +uidnumber: 827 +gidnumber: 827 +homeDirectory: /home/uid827 + +dn: cn=user828,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user828 +sn: user828 +uid: uid828 +givenname: givenname828 +description: description828 +userPassword: password828 +mail: uid828 +uidnumber: 828 +gidnumber: 828 +homeDirectory: /home/uid828 + +dn: cn=user829,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user829 +sn: user829 +uid: uid829 +givenname: givenname829 +description: description829 +userPassword: password829 +mail: uid829 +uidnumber: 829 +gidnumber: 829 +homeDirectory: /home/uid829 + +dn: cn=user830,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user830 +sn: user830 +uid: uid830 +givenname: givenname830 +description: description830 +userPassword: password830 +mail: uid830 +uidnumber: 830 +gidnumber: 830 +homeDirectory: /home/uid830 + +dn: cn=user831,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user831 +sn: user831 +uid: uid831 +givenname: givenname831 +description: description831 +userPassword: password831 +mail: uid831 +uidnumber: 831 +gidnumber: 831 +homeDirectory: /home/uid831 + +dn: cn=user832,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user832 +sn: user832 +uid: uid832 +givenname: givenname832 +description: description832 +userPassword: password832 +mail: uid832 +uidnumber: 832 +gidnumber: 832 +homeDirectory: /home/uid832 + +dn: cn=user833,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user833 +sn: user833 +uid: uid833 +givenname: givenname833 +description: description833 +userPassword: password833 +mail: uid833 +uidnumber: 833 +gidnumber: 833 +homeDirectory: /home/uid833 + +dn: cn=user834,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user834 +sn: user834 +uid: uid834 +givenname: givenname834 +description: description834 +userPassword: password834 +mail: uid834 +uidnumber: 834 +gidnumber: 834 +homeDirectory: /home/uid834 + +dn: cn=user835,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user835 +sn: user835 +uid: uid835 +givenname: givenname835 +description: description835 +userPassword: password835 +mail: uid835 +uidnumber: 835 +gidnumber: 835 +homeDirectory: /home/uid835 + +dn: cn=user836,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user836 +sn: user836 +uid: uid836 +givenname: givenname836 +description: description836 +userPassword: password836 +mail: uid836 +uidnumber: 836 +gidnumber: 836 +homeDirectory: /home/uid836 + +dn: cn=user837,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user837 +sn: user837 +uid: uid837 +givenname: givenname837 +description: description837 +userPassword: password837 +mail: uid837 +uidnumber: 837 +gidnumber: 837 +homeDirectory: /home/uid837 + +dn: cn=user838,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user838 +sn: user838 +uid: uid838 +givenname: givenname838 +description: description838 +userPassword: password838 +mail: uid838 +uidnumber: 838 +gidnumber: 838 +homeDirectory: /home/uid838 + +dn: cn=user839,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user839 +sn: user839 +uid: uid839 +givenname: givenname839 +description: description839 +userPassword: password839 +mail: uid839 +uidnumber: 839 +gidnumber: 839 +homeDirectory: /home/uid839 + +dn: cn=user840,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user840 +sn: user840 +uid: uid840 +givenname: givenname840 +description: description840 +userPassword: password840 +mail: uid840 +uidnumber: 840 +gidnumber: 840 +homeDirectory: /home/uid840 + +dn: cn=user841,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user841 +sn: user841 +uid: uid841 +givenname: givenname841 +description: description841 +userPassword: password841 +mail: uid841 +uidnumber: 841 +gidnumber: 841 +homeDirectory: /home/uid841 + +dn: cn=user842,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user842 +sn: user842 +uid: uid842 +givenname: givenname842 +description: description842 +userPassword: password842 +mail: uid842 +uidnumber: 842 +gidnumber: 842 +homeDirectory: /home/uid842 + +dn: cn=user843,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user843 +sn: user843 +uid: uid843 +givenname: givenname843 +description: description843 +userPassword: password843 +mail: uid843 +uidnumber: 843 +gidnumber: 843 +homeDirectory: /home/uid843 + +dn: cn=user844,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user844 +sn: user844 +uid: uid844 +givenname: givenname844 +description: description844 +userPassword: password844 +mail: uid844 +uidnumber: 844 +gidnumber: 844 +homeDirectory: /home/uid844 + +dn: cn=user845,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user845 +sn: user845 +uid: uid845 +givenname: givenname845 +description: description845 +userPassword: password845 +mail: uid845 +uidnumber: 845 +gidnumber: 845 +homeDirectory: /home/uid845 + +dn: cn=user846,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user846 +sn: user846 +uid: uid846 +givenname: givenname846 +description: description846 +userPassword: password846 +mail: uid846 +uidnumber: 846 +gidnumber: 846 +homeDirectory: /home/uid846 + +dn: cn=user847,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user847 +sn: user847 +uid: uid847 +givenname: givenname847 +description: description847 +userPassword: password847 +mail: uid847 +uidnumber: 847 +gidnumber: 847 +homeDirectory: /home/uid847 + +dn: cn=user848,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user848 +sn: user848 +uid: uid848 +givenname: givenname848 +description: description848 +userPassword: password848 +mail: uid848 +uidnumber: 848 +gidnumber: 848 +homeDirectory: /home/uid848 + +dn: cn=user849,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user849 +sn: user849 +uid: uid849 +givenname: givenname849 +description: description849 +userPassword: password849 +mail: uid849 +uidnumber: 849 +gidnumber: 849 +homeDirectory: /home/uid849 + +dn: cn=user850,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user850 +sn: user850 +uid: uid850 +givenname: givenname850 +description: description850 +userPassword: password850 +mail: uid850 +uidnumber: 850 +gidnumber: 850 +homeDirectory: /home/uid850 + +dn: cn=user851,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user851 +sn: user851 +uid: uid851 +givenname: givenname851 +description: description851 +userPassword: password851 +mail: uid851 +uidnumber: 851 +gidnumber: 851 +homeDirectory: /home/uid851 + +dn: cn=user852,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user852 +sn: user852 +uid: uid852 +givenname: givenname852 +description: description852 +userPassword: password852 +mail: uid852 +uidnumber: 852 +gidnumber: 852 +homeDirectory: /home/uid852 + +dn: cn=user853,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user853 +sn: user853 +uid: uid853 +givenname: givenname853 +description: description853 +userPassword: password853 +mail: uid853 +uidnumber: 853 +gidnumber: 853 +homeDirectory: /home/uid853 + +dn: cn=user854,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user854 +sn: user854 +uid: uid854 +givenname: givenname854 +description: description854 +userPassword: password854 +mail: uid854 +uidnumber: 854 +gidnumber: 854 +homeDirectory: /home/uid854 + +dn: cn=user855,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user855 +sn: user855 +uid: uid855 +givenname: givenname855 +description: description855 +userPassword: password855 +mail: uid855 +uidnumber: 855 +gidnumber: 855 +homeDirectory: /home/uid855 + +dn: cn=user856,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user856 +sn: user856 +uid: uid856 +givenname: givenname856 +description: description856 +userPassword: password856 +mail: uid856 +uidnumber: 856 +gidnumber: 856 +homeDirectory: /home/uid856 + +dn: cn=user857,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user857 +sn: user857 +uid: uid857 +givenname: givenname857 +description: description857 +userPassword: password857 +mail: uid857 +uidnumber: 857 +gidnumber: 857 +homeDirectory: /home/uid857 + +dn: cn=user858,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user858 +sn: user858 +uid: uid858 +givenname: givenname858 +description: description858 +userPassword: password858 +mail: uid858 +uidnumber: 858 +gidnumber: 858 +homeDirectory: /home/uid858 + +dn: cn=user859,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user859 +sn: user859 +uid: uid859 +givenname: givenname859 +description: description859 +userPassword: password859 +mail: uid859 +uidnumber: 859 +gidnumber: 859 +homeDirectory: /home/uid859 + +dn: cn=user860,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user860 +sn: user860 +uid: uid860 +givenname: givenname860 +description: description860 +userPassword: password860 +mail: uid860 +uidnumber: 860 +gidnumber: 860 +homeDirectory: /home/uid860 + +dn: cn=user861,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user861 +sn: user861 +uid: uid861 +givenname: givenname861 +description: description861 +userPassword: password861 +mail: uid861 +uidnumber: 861 +gidnumber: 861 +homeDirectory: /home/uid861 + +dn: cn=user862,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user862 +sn: user862 +uid: uid862 +givenname: givenname862 +description: description862 +userPassword: password862 +mail: uid862 +uidnumber: 862 +gidnumber: 862 +homeDirectory: /home/uid862 + +dn: cn=user863,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user863 +sn: user863 +uid: uid863 +givenname: givenname863 +description: description863 +userPassword: password863 +mail: uid863 +uidnumber: 863 +gidnumber: 863 +homeDirectory: /home/uid863 + +dn: cn=user864,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user864 +sn: user864 +uid: uid864 +givenname: givenname864 +description: description864 +userPassword: password864 +mail: uid864 +uidnumber: 864 +gidnumber: 864 +homeDirectory: /home/uid864 + +dn: cn=user865,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user865 +sn: user865 +uid: uid865 +givenname: givenname865 +description: description865 +userPassword: password865 +mail: uid865 +uidnumber: 865 +gidnumber: 865 +homeDirectory: /home/uid865 + +dn: cn=user866,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user866 +sn: user866 +uid: uid866 +givenname: givenname866 +description: description866 +userPassword: password866 +mail: uid866 +uidnumber: 866 +gidnumber: 866 +homeDirectory: /home/uid866 + +dn: cn=user867,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user867 +sn: user867 +uid: uid867 +givenname: givenname867 +description: description867 +userPassword: password867 +mail: uid867 +uidnumber: 867 +gidnumber: 867 +homeDirectory: /home/uid867 + +dn: cn=user868,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user868 +sn: user868 +uid: uid868 +givenname: givenname868 +description: description868 +userPassword: password868 +mail: uid868 +uidnumber: 868 +gidnumber: 868 +homeDirectory: /home/uid868 + +dn: cn=user869,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user869 +sn: user869 +uid: uid869 +givenname: givenname869 +description: description869 +userPassword: password869 +mail: uid869 +uidnumber: 869 +gidnumber: 869 +homeDirectory: /home/uid869 + +dn: cn=user870,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user870 +sn: user870 +uid: uid870 +givenname: givenname870 +description: description870 +userPassword: password870 +mail: uid870 +uidnumber: 870 +gidnumber: 870 +homeDirectory: /home/uid870 + +dn: cn=user871,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user871 +sn: user871 +uid: uid871 +givenname: givenname871 +description: description871 +userPassword: password871 +mail: uid871 +uidnumber: 871 +gidnumber: 871 +homeDirectory: /home/uid871 + +dn: cn=user872,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user872 +sn: user872 +uid: uid872 +givenname: givenname872 +description: description872 +userPassword: password872 +mail: uid872 +uidnumber: 872 +gidnumber: 872 +homeDirectory: /home/uid872 + +dn: cn=user873,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user873 +sn: user873 +uid: uid873 +givenname: givenname873 +description: description873 +userPassword: password873 +mail: uid873 +uidnumber: 873 +gidnumber: 873 +homeDirectory: /home/uid873 + +dn: cn=user874,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user874 +sn: user874 +uid: uid874 +givenname: givenname874 +description: description874 +userPassword: password874 +mail: uid874 +uidnumber: 874 +gidnumber: 874 +homeDirectory: /home/uid874 + +dn: cn=user875,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user875 +sn: user875 +uid: uid875 +givenname: givenname875 +description: description875 +userPassword: password875 +mail: uid875 +uidnumber: 875 +gidnumber: 875 +homeDirectory: /home/uid875 + +dn: cn=user876,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user876 +sn: user876 +uid: uid876 +givenname: givenname876 +description: description876 +userPassword: password876 +mail: uid876 +uidnumber: 876 +gidnumber: 876 +homeDirectory: /home/uid876 + +dn: cn=user877,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user877 +sn: user877 +uid: uid877 +givenname: givenname877 +description: description877 +userPassword: password877 +mail: uid877 +uidnumber: 877 +gidnumber: 877 +homeDirectory: /home/uid877 + +dn: cn=user878,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user878 +sn: user878 +uid: uid878 +givenname: givenname878 +description: description878 +userPassword: password878 +mail: uid878 +uidnumber: 878 +gidnumber: 878 +homeDirectory: /home/uid878 + +dn: cn=user879,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user879 +sn: user879 +uid: uid879 +givenname: givenname879 +description: description879 +userPassword: password879 +mail: uid879 +uidnumber: 879 +gidnumber: 879 +homeDirectory: /home/uid879 + +dn: cn=user880,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user880 +sn: user880 +uid: uid880 +givenname: givenname880 +description: description880 +userPassword: password880 +mail: uid880 +uidnumber: 880 +gidnumber: 880 +homeDirectory: /home/uid880 + +dn: cn=user881,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user881 +sn: user881 +uid: uid881 +givenname: givenname881 +description: description881 +userPassword: password881 +mail: uid881 +uidnumber: 881 +gidnumber: 881 +homeDirectory: /home/uid881 + +dn: cn=user882,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user882 +sn: user882 +uid: uid882 +givenname: givenname882 +description: description882 +userPassword: password882 +mail: uid882 +uidnumber: 882 +gidnumber: 882 +homeDirectory: /home/uid882 + +dn: cn=user883,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user883 +sn: user883 +uid: uid883 +givenname: givenname883 +description: description883 +userPassword: password883 +mail: uid883 +uidnumber: 883 +gidnumber: 883 +homeDirectory: /home/uid883 + +dn: cn=user884,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user884 +sn: user884 +uid: uid884 +givenname: givenname884 +description: description884 +userPassword: password884 +mail: uid884 +uidnumber: 884 +gidnumber: 884 +homeDirectory: /home/uid884 + +dn: cn=user885,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user885 +sn: user885 +uid: uid885 +givenname: givenname885 +description: description885 +userPassword: password885 +mail: uid885 +uidnumber: 885 +gidnumber: 885 +homeDirectory: /home/uid885 + +dn: cn=user886,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user886 +sn: user886 +uid: uid886 +givenname: givenname886 +description: description886 +userPassword: password886 +mail: uid886 +uidnumber: 886 +gidnumber: 886 +homeDirectory: /home/uid886 + +dn: cn=user887,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user887 +sn: user887 +uid: uid887 +givenname: givenname887 +description: description887 +userPassword: password887 +mail: uid887 +uidnumber: 887 +gidnumber: 887 +homeDirectory: /home/uid887 + +dn: cn=user888,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user888 +sn: user888 +uid: uid888 +givenname: givenname888 +description: description888 +userPassword: password888 +mail: uid888 +uidnumber: 888 +gidnumber: 888 +homeDirectory: /home/uid888 + +dn: cn=user889,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user889 +sn: user889 +uid: uid889 +givenname: givenname889 +description: description889 +userPassword: password889 +mail: uid889 +uidnumber: 889 +gidnumber: 889 +homeDirectory: /home/uid889 + +dn: cn=user890,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user890 +sn: user890 +uid: uid890 +givenname: givenname890 +description: description890 +userPassword: password890 +mail: uid890 +uidnumber: 890 +gidnumber: 890 +homeDirectory: /home/uid890 + +dn: cn=user891,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user891 +sn: user891 +uid: uid891 +givenname: givenname891 +description: description891 +userPassword: password891 +mail: uid891 +uidnumber: 891 +gidnumber: 891 +homeDirectory: /home/uid891 + +dn: cn=user892,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user892 +sn: user892 +uid: uid892 +givenname: givenname892 +description: description892 +userPassword: password892 +mail: uid892 +uidnumber: 892 +gidnumber: 892 +homeDirectory: /home/uid892 + +dn: cn=user893,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user893 +sn: user893 +uid: uid893 +givenname: givenname893 +description: description893 +userPassword: password893 +mail: uid893 +uidnumber: 893 +gidnumber: 893 +homeDirectory: /home/uid893 + +dn: cn=user894,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user894 +sn: user894 +uid: uid894 +givenname: givenname894 +description: description894 +userPassword: password894 +mail: uid894 +uidnumber: 894 +gidnumber: 894 +homeDirectory: /home/uid894 + +dn: cn=user895,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user895 +sn: user895 +uid: uid895 +givenname: givenname895 +description: description895 +userPassword: password895 +mail: uid895 +uidnumber: 895 +gidnumber: 895 +homeDirectory: /home/uid895 + +dn: cn=user896,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user896 +sn: user896 +uid: uid896 +givenname: givenname896 +description: description896 +userPassword: password896 +mail: uid896 +uidnumber: 896 +gidnumber: 896 +homeDirectory: /home/uid896 + +dn: cn=user897,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user897 +sn: user897 +uid: uid897 +givenname: givenname897 +description: description897 +userPassword: password897 +mail: uid897 +uidnumber: 897 +gidnumber: 897 +homeDirectory: /home/uid897 + +dn: cn=user898,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user898 +sn: user898 +uid: uid898 +givenname: givenname898 +description: description898 +userPassword: password898 +mail: uid898 +uidnumber: 898 +gidnumber: 898 +homeDirectory: /home/uid898 + +dn: cn=user899,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user899 +sn: user899 +uid: uid899 +givenname: givenname899 +description: description899 +userPassword: password899 +mail: uid899 +uidnumber: 899 +gidnumber: 899 +homeDirectory: /home/uid899 + +dn: cn=user900,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user900 +sn: user900 +uid: uid900 +givenname: givenname900 +description: description900 +userPassword: password900 +mail: uid900 +uidnumber: 900 +gidnumber: 900 +homeDirectory: /home/uid900 + +dn: cn=user901,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user901 +sn: user901 +uid: uid901 +givenname: givenname901 +description: description901 +userPassword: password901 +mail: uid901 +uidnumber: 901 +gidnumber: 901 +homeDirectory: /home/uid901 + +dn: cn=user902,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user902 +sn: user902 +uid: uid902 +givenname: givenname902 +description: description902 +userPassword: password902 +mail: uid902 +uidnumber: 902 +gidnumber: 902 +homeDirectory: /home/uid902 + +dn: cn=user903,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user903 +sn: user903 +uid: uid903 +givenname: givenname903 +description: description903 +userPassword: password903 +mail: uid903 +uidnumber: 903 +gidnumber: 903 +homeDirectory: /home/uid903 + +dn: cn=user904,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user904 +sn: user904 +uid: uid904 +givenname: givenname904 +description: description904 +userPassword: password904 +mail: uid904 +uidnumber: 904 +gidnumber: 904 +homeDirectory: /home/uid904 + +dn: cn=user905,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user905 +sn: user905 +uid: uid905 +givenname: givenname905 +description: description905 +userPassword: password905 +mail: uid905 +uidnumber: 905 +gidnumber: 905 +homeDirectory: /home/uid905 + +dn: cn=user906,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user906 +sn: user906 +uid: uid906 +givenname: givenname906 +description: description906 +userPassword: password906 +mail: uid906 +uidnumber: 906 +gidnumber: 906 +homeDirectory: /home/uid906 + +dn: cn=user907,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user907 +sn: user907 +uid: uid907 +givenname: givenname907 +description: description907 +userPassword: password907 +mail: uid907 +uidnumber: 907 +gidnumber: 907 +homeDirectory: /home/uid907 + +dn: cn=user908,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user908 +sn: user908 +uid: uid908 +givenname: givenname908 +description: description908 +userPassword: password908 +mail: uid908 +uidnumber: 908 +gidnumber: 908 +homeDirectory: /home/uid908 + +dn: cn=user909,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user909 +sn: user909 +uid: uid909 +givenname: givenname909 +description: description909 +userPassword: password909 +mail: uid909 +uidnumber: 909 +gidnumber: 909 +homeDirectory: /home/uid909 + +dn: cn=user910,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user910 +sn: user910 +uid: uid910 +givenname: givenname910 +description: description910 +userPassword: password910 +mail: uid910 +uidnumber: 910 +gidnumber: 910 +homeDirectory: /home/uid910 + +dn: cn=user911,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user911 +sn: user911 +uid: uid911 +givenname: givenname911 +description: description911 +userPassword: password911 +mail: uid911 +uidnumber: 911 +gidnumber: 911 +homeDirectory: /home/uid911 + +dn: cn=user912,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user912 +sn: user912 +uid: uid912 +givenname: givenname912 +description: description912 +userPassword: password912 +mail: uid912 +uidnumber: 912 +gidnumber: 912 +homeDirectory: /home/uid912 + +dn: cn=user913,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user913 +sn: user913 +uid: uid913 +givenname: givenname913 +description: description913 +userPassword: password913 +mail: uid913 +uidnumber: 913 +gidnumber: 913 +homeDirectory: /home/uid913 + +dn: cn=user914,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user914 +sn: user914 +uid: uid914 +givenname: givenname914 +description: description914 +userPassword: password914 +mail: uid914 +uidnumber: 914 +gidnumber: 914 +homeDirectory: /home/uid914 + +dn: cn=user915,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user915 +sn: user915 +uid: uid915 +givenname: givenname915 +description: description915 +userPassword: password915 +mail: uid915 +uidnumber: 915 +gidnumber: 915 +homeDirectory: /home/uid915 + +dn: cn=user916,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user916 +sn: user916 +uid: uid916 +givenname: givenname916 +description: description916 +userPassword: password916 +mail: uid916 +uidnumber: 916 +gidnumber: 916 +homeDirectory: /home/uid916 + +dn: cn=user917,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user917 +sn: user917 +uid: uid917 +givenname: givenname917 +description: description917 +userPassword: password917 +mail: uid917 +uidnumber: 917 +gidnumber: 917 +homeDirectory: /home/uid917 + +dn: cn=user918,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user918 +sn: user918 +uid: uid918 +givenname: givenname918 +description: description918 +userPassword: password918 +mail: uid918 +uidnumber: 918 +gidnumber: 918 +homeDirectory: /home/uid918 + +dn: cn=user919,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user919 +sn: user919 +uid: uid919 +givenname: givenname919 +description: description919 +userPassword: password919 +mail: uid919 +uidnumber: 919 +gidnumber: 919 +homeDirectory: /home/uid919 + +dn: cn=user920,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user920 +sn: user920 +uid: uid920 +givenname: givenname920 +description: description920 +userPassword: password920 +mail: uid920 +uidnumber: 920 +gidnumber: 920 +homeDirectory: /home/uid920 + +dn: cn=user921,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user921 +sn: user921 +uid: uid921 +givenname: givenname921 +description: description921 +userPassword: password921 +mail: uid921 +uidnumber: 921 +gidnumber: 921 +homeDirectory: /home/uid921 + +dn: cn=user922,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user922 +sn: user922 +uid: uid922 +givenname: givenname922 +description: description922 +userPassword: password922 +mail: uid922 +uidnumber: 922 +gidnumber: 922 +homeDirectory: /home/uid922 + +dn: cn=user923,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user923 +sn: user923 +uid: uid923 +givenname: givenname923 +description: description923 +userPassword: password923 +mail: uid923 +uidnumber: 923 +gidnumber: 923 +homeDirectory: /home/uid923 + +dn: cn=user924,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user924 +sn: user924 +uid: uid924 +givenname: givenname924 +description: description924 +userPassword: password924 +mail: uid924 +uidnumber: 924 +gidnumber: 924 +homeDirectory: /home/uid924 + +dn: cn=user925,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user925 +sn: user925 +uid: uid925 +givenname: givenname925 +description: description925 +userPassword: password925 +mail: uid925 +uidnumber: 925 +gidnumber: 925 +homeDirectory: /home/uid925 + +dn: cn=user926,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user926 +sn: user926 +uid: uid926 +givenname: givenname926 +description: description926 +userPassword: password926 +mail: uid926 +uidnumber: 926 +gidnumber: 926 +homeDirectory: /home/uid926 + +dn: cn=user927,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user927 +sn: user927 +uid: uid927 +givenname: givenname927 +description: description927 +userPassword: password927 +mail: uid927 +uidnumber: 927 +gidnumber: 927 +homeDirectory: /home/uid927 + +dn: cn=user928,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user928 +sn: user928 +uid: uid928 +givenname: givenname928 +description: description928 +userPassword: password928 +mail: uid928 +uidnumber: 928 +gidnumber: 928 +homeDirectory: /home/uid928 + +dn: cn=user929,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user929 +sn: user929 +uid: uid929 +givenname: givenname929 +description: description929 +userPassword: password929 +mail: uid929 +uidnumber: 929 +gidnumber: 929 +homeDirectory: /home/uid929 + +dn: cn=user930,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user930 +sn: user930 +uid: uid930 +givenname: givenname930 +description: description930 +userPassword: password930 +mail: uid930 +uidnumber: 930 +gidnumber: 930 +homeDirectory: /home/uid930 + +dn: cn=user931,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user931 +sn: user931 +uid: uid931 +givenname: givenname931 +description: description931 +userPassword: password931 +mail: uid931 +uidnumber: 931 +gidnumber: 931 +homeDirectory: /home/uid931 + +dn: cn=user932,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user932 +sn: user932 +uid: uid932 +givenname: givenname932 +description: description932 +userPassword: password932 +mail: uid932 +uidnumber: 932 +gidnumber: 932 +homeDirectory: /home/uid932 + +dn: cn=user933,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user933 +sn: user933 +uid: uid933 +givenname: givenname933 +description: description933 +userPassword: password933 +mail: uid933 +uidnumber: 933 +gidnumber: 933 +homeDirectory: /home/uid933 + +dn: cn=user934,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user934 +sn: user934 +uid: uid934 +givenname: givenname934 +description: description934 +userPassword: password934 +mail: uid934 +uidnumber: 934 +gidnumber: 934 +homeDirectory: /home/uid934 + +dn: cn=user935,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user935 +sn: user935 +uid: uid935 +givenname: givenname935 +description: description935 +userPassword: password935 +mail: uid935 +uidnumber: 935 +gidnumber: 935 +homeDirectory: /home/uid935 + +dn: cn=user936,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user936 +sn: user936 +uid: uid936 +givenname: givenname936 +description: description936 +userPassword: password936 +mail: uid936 +uidnumber: 936 +gidnumber: 936 +homeDirectory: /home/uid936 + +dn: cn=user937,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user937 +sn: user937 +uid: uid937 +givenname: givenname937 +description: description937 +userPassword: password937 +mail: uid937 +uidnumber: 937 +gidnumber: 937 +homeDirectory: /home/uid937 + +dn: cn=user938,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user938 +sn: user938 +uid: uid938 +givenname: givenname938 +description: description938 +userPassword: password938 +mail: uid938 +uidnumber: 938 +gidnumber: 938 +homeDirectory: /home/uid938 + +dn: cn=user939,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user939 +sn: user939 +uid: uid939 +givenname: givenname939 +description: description939 +userPassword: password939 +mail: uid939 +uidnumber: 939 +gidnumber: 939 +homeDirectory: /home/uid939 + +dn: cn=user940,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user940 +sn: user940 +uid: uid940 +givenname: givenname940 +description: description940 +userPassword: password940 +mail: uid940 +uidnumber: 940 +gidnumber: 940 +homeDirectory: /home/uid940 + +dn: cn=user941,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user941 +sn: user941 +uid: uid941 +givenname: givenname941 +description: description941 +userPassword: password941 +mail: uid941 +uidnumber: 941 +gidnumber: 941 +homeDirectory: /home/uid941 + +dn: cn=user942,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user942 +sn: user942 +uid: uid942 +givenname: givenname942 +description: description942 +userPassword: password942 +mail: uid942 +uidnumber: 942 +gidnumber: 942 +homeDirectory: /home/uid942 + +dn: cn=user943,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user943 +sn: user943 +uid: uid943 +givenname: givenname943 +description: description943 +userPassword: password943 +mail: uid943 +uidnumber: 943 +gidnumber: 943 +homeDirectory: /home/uid943 + +dn: cn=user944,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user944 +sn: user944 +uid: uid944 +givenname: givenname944 +description: description944 +userPassword: password944 +mail: uid944 +uidnumber: 944 +gidnumber: 944 +homeDirectory: /home/uid944 + +dn: cn=user945,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user945 +sn: user945 +uid: uid945 +givenname: givenname945 +description: description945 +userPassword: password945 +mail: uid945 +uidnumber: 945 +gidnumber: 945 +homeDirectory: /home/uid945 + +dn: cn=user946,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user946 +sn: user946 +uid: uid946 +givenname: givenname946 +description: description946 +userPassword: password946 +mail: uid946 +uidnumber: 946 +gidnumber: 946 +homeDirectory: /home/uid946 + +dn: cn=user947,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user947 +sn: user947 +uid: uid947 +givenname: givenname947 +description: description947 +userPassword: password947 +mail: uid947 +uidnumber: 947 +gidnumber: 947 +homeDirectory: /home/uid947 + +dn: cn=user948,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user948 +sn: user948 +uid: uid948 +givenname: givenname948 +description: description948 +userPassword: password948 +mail: uid948 +uidnumber: 948 +gidnumber: 948 +homeDirectory: /home/uid948 + +dn: cn=user949,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user949 +sn: user949 +uid: uid949 +givenname: givenname949 +description: description949 +userPassword: password949 +mail: uid949 +uidnumber: 949 +gidnumber: 949 +homeDirectory: /home/uid949 + +dn: cn=user950,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user950 +sn: user950 +uid: uid950 +givenname: givenname950 +description: description950 +userPassword: password950 +mail: uid950 +uidnumber: 950 +gidnumber: 950 +homeDirectory: /home/uid950 + +dn: cn=user951,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user951 +sn: user951 +uid: uid951 +givenname: givenname951 +description: description951 +userPassword: password951 +mail: uid951 +uidnumber: 951 +gidnumber: 951 +homeDirectory: /home/uid951 + +dn: cn=user952,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user952 +sn: user952 +uid: uid952 +givenname: givenname952 +description: description952 +userPassword: password952 +mail: uid952 +uidnumber: 952 +gidnumber: 952 +homeDirectory: /home/uid952 + +dn: cn=user953,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user953 +sn: user953 +uid: uid953 +givenname: givenname953 +description: description953 +userPassword: password953 +mail: uid953 +uidnumber: 953 +gidnumber: 953 +homeDirectory: /home/uid953 + +dn: cn=user954,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user954 +sn: user954 +uid: uid954 +givenname: givenname954 +description: description954 +userPassword: password954 +mail: uid954 +uidnumber: 954 +gidnumber: 954 +homeDirectory: /home/uid954 + +dn: cn=user955,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user955 +sn: user955 +uid: uid955 +givenname: givenname955 +description: description955 +userPassword: password955 +mail: uid955 +uidnumber: 955 +gidnumber: 955 +homeDirectory: /home/uid955 + +dn: cn=user956,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user956 +sn: user956 +uid: uid956 +givenname: givenname956 +description: description956 +userPassword: password956 +mail: uid956 +uidnumber: 956 +gidnumber: 956 +homeDirectory: /home/uid956 + +dn: cn=user957,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user957 +sn: user957 +uid: uid957 +givenname: givenname957 +description: description957 +userPassword: password957 +mail: uid957 +uidnumber: 957 +gidnumber: 957 +homeDirectory: /home/uid957 + +dn: cn=user958,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user958 +sn: user958 +uid: uid958 +givenname: givenname958 +description: description958 +userPassword: password958 +mail: uid958 +uidnumber: 958 +gidnumber: 958 +homeDirectory: /home/uid958 + +dn: cn=user959,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user959 +sn: user959 +uid: uid959 +givenname: givenname959 +description: description959 +userPassword: password959 +mail: uid959 +uidnumber: 959 +gidnumber: 959 +homeDirectory: /home/uid959 + +dn: cn=user960,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user960 +sn: user960 +uid: uid960 +givenname: givenname960 +description: description960 +userPassword: password960 +mail: uid960 +uidnumber: 960 +gidnumber: 960 +homeDirectory: /home/uid960 + +dn: cn=user961,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user961 +sn: user961 +uid: uid961 +givenname: givenname961 +description: description961 +userPassword: password961 +mail: uid961 +uidnumber: 961 +gidnumber: 961 +homeDirectory: /home/uid961 + +dn: cn=user962,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user962 +sn: user962 +uid: uid962 +givenname: givenname962 +description: description962 +userPassword: password962 +mail: uid962 +uidnumber: 962 +gidnumber: 962 +homeDirectory: /home/uid962 + +dn: cn=user963,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user963 +sn: user963 +uid: uid963 +givenname: givenname963 +description: description963 +userPassword: password963 +mail: uid963 +uidnumber: 963 +gidnumber: 963 +homeDirectory: /home/uid963 + +dn: cn=user964,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user964 +sn: user964 +uid: uid964 +givenname: givenname964 +description: description964 +userPassword: password964 +mail: uid964 +uidnumber: 964 +gidnumber: 964 +homeDirectory: /home/uid964 + +dn: cn=user965,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user965 +sn: user965 +uid: uid965 +givenname: givenname965 +description: description965 +userPassword: password965 +mail: uid965 +uidnumber: 965 +gidnumber: 965 +homeDirectory: /home/uid965 + +dn: cn=user966,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user966 +sn: user966 +uid: uid966 +givenname: givenname966 +description: description966 +userPassword: password966 +mail: uid966 +uidnumber: 966 +gidnumber: 966 +homeDirectory: /home/uid966 + +dn: cn=user967,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user967 +sn: user967 +uid: uid967 +givenname: givenname967 +description: description967 +userPassword: password967 +mail: uid967 +uidnumber: 967 +gidnumber: 967 +homeDirectory: /home/uid967 + +dn: cn=user968,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user968 +sn: user968 +uid: uid968 +givenname: givenname968 +description: description968 +userPassword: password968 +mail: uid968 +uidnumber: 968 +gidnumber: 968 +homeDirectory: /home/uid968 + +dn: cn=user969,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user969 +sn: user969 +uid: uid969 +givenname: givenname969 +description: description969 +userPassword: password969 +mail: uid969 +uidnumber: 969 +gidnumber: 969 +homeDirectory: /home/uid969 + +dn: cn=user970,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user970 +sn: user970 +uid: uid970 +givenname: givenname970 +description: description970 +userPassword: password970 +mail: uid970 +uidnumber: 970 +gidnumber: 970 +homeDirectory: /home/uid970 + +dn: cn=user971,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user971 +sn: user971 +uid: uid971 +givenname: givenname971 +description: description971 +userPassword: password971 +mail: uid971 +uidnumber: 971 +gidnumber: 971 +homeDirectory: /home/uid971 + +dn: cn=user972,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user972 +sn: user972 +uid: uid972 +givenname: givenname972 +description: description972 +userPassword: password972 +mail: uid972 +uidnumber: 972 +gidnumber: 972 +homeDirectory: /home/uid972 + +dn: cn=user973,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user973 +sn: user973 +uid: uid973 +givenname: givenname973 +description: description973 +userPassword: password973 +mail: uid973 +uidnumber: 973 +gidnumber: 973 +homeDirectory: /home/uid973 + +dn: cn=user974,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user974 +sn: user974 +uid: uid974 +givenname: givenname974 +description: description974 +userPassword: password974 +mail: uid974 +uidnumber: 974 +gidnumber: 974 +homeDirectory: /home/uid974 + +dn: cn=user975,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user975 +sn: user975 +uid: uid975 +givenname: givenname975 +description: description975 +userPassword: password975 +mail: uid975 +uidnumber: 975 +gidnumber: 975 +homeDirectory: /home/uid975 + +dn: cn=user976,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user976 +sn: user976 +uid: uid976 +givenname: givenname976 +description: description976 +userPassword: password976 +mail: uid976 +uidnumber: 976 +gidnumber: 976 +homeDirectory: /home/uid976 + +dn: cn=user977,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user977 +sn: user977 +uid: uid977 +givenname: givenname977 +description: description977 +userPassword: password977 +mail: uid977 +uidnumber: 977 +gidnumber: 977 +homeDirectory: /home/uid977 + +dn: cn=user978,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user978 +sn: user978 +uid: uid978 +givenname: givenname978 +description: description978 +userPassword: password978 +mail: uid978 +uidnumber: 978 +gidnumber: 978 +homeDirectory: /home/uid978 + +dn: cn=user979,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user979 +sn: user979 +uid: uid979 +givenname: givenname979 +description: description979 +userPassword: password979 +mail: uid979 +uidnumber: 979 +gidnumber: 979 +homeDirectory: /home/uid979 + +dn: cn=user980,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user980 +sn: user980 +uid: uid980 +givenname: givenname980 +description: description980 +userPassword: password980 +mail: uid980 +uidnumber: 980 +gidnumber: 980 +homeDirectory: /home/uid980 + +dn: cn=user981,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user981 +sn: user981 +uid: uid981 +givenname: givenname981 +description: description981 +userPassword: password981 +mail: uid981 +uidnumber: 981 +gidnumber: 981 +homeDirectory: /home/uid981 + +dn: cn=user982,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user982 +sn: user982 +uid: uid982 +givenname: givenname982 +description: description982 +userPassword: password982 +mail: uid982 +uidnumber: 982 +gidnumber: 982 +homeDirectory: /home/uid982 + +dn: cn=user983,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user983 +sn: user983 +uid: uid983 +givenname: givenname983 +description: description983 +userPassword: password983 +mail: uid983 +uidnumber: 983 +gidnumber: 983 +homeDirectory: /home/uid983 + +dn: cn=user984,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user984 +sn: user984 +uid: uid984 +givenname: givenname984 +description: description984 +userPassword: password984 +mail: uid984 +uidnumber: 984 +gidnumber: 984 +homeDirectory: /home/uid984 + +dn: cn=user985,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user985 +sn: user985 +uid: uid985 +givenname: givenname985 +description: description985 +userPassword: password985 +mail: uid985 +uidnumber: 985 +gidnumber: 985 +homeDirectory: /home/uid985 + +dn: cn=user986,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user986 +sn: user986 +uid: uid986 +givenname: givenname986 +description: description986 +userPassword: password986 +mail: uid986 +uidnumber: 986 +gidnumber: 986 +homeDirectory: /home/uid986 + +dn: cn=user987,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user987 +sn: user987 +uid: uid987 +givenname: givenname987 +description: description987 +userPassword: password987 +mail: uid987 +uidnumber: 987 +gidnumber: 987 +homeDirectory: /home/uid987 + +dn: cn=user988,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user988 +sn: user988 +uid: uid988 +givenname: givenname988 +description: description988 +userPassword: password988 +mail: uid988 +uidnumber: 988 +gidnumber: 988 +homeDirectory: /home/uid988 + +dn: cn=user989,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user989 +sn: user989 +uid: uid989 +givenname: givenname989 +description: description989 +userPassword: password989 +mail: uid989 +uidnumber: 989 +gidnumber: 989 +homeDirectory: /home/uid989 + +dn: cn=user990,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user990 +sn: user990 +uid: uid990 +givenname: givenname990 +description: description990 +userPassword: password990 +mail: uid990 +uidnumber: 990 +gidnumber: 990 +homeDirectory: /home/uid990 + +dn: cn=user991,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user991 +sn: user991 +uid: uid991 +givenname: givenname991 +description: description991 +userPassword: password991 +mail: uid991 +uidnumber: 991 +gidnumber: 991 +homeDirectory: /home/uid991 + +dn: cn=user992,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user992 +sn: user992 +uid: uid992 +givenname: givenname992 +description: description992 +userPassword: password992 +mail: uid992 +uidnumber: 992 +gidnumber: 992 +homeDirectory: /home/uid992 + +dn: cn=user993,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user993 +sn: user993 +uid: uid993 +givenname: givenname993 +description: description993 +userPassword: password993 +mail: uid993 +uidnumber: 993 +gidnumber: 993 +homeDirectory: /home/uid993 + +dn: cn=user994,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user994 +sn: user994 +uid: uid994 +givenname: givenname994 +description: description994 +userPassword: password994 +mail: uid994 +uidnumber: 994 +gidnumber: 994 +homeDirectory: /home/uid994 + +dn: cn=user995,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user995 +sn: user995 +uid: uid995 +givenname: givenname995 +description: description995 +userPassword: password995 +mail: uid995 +uidnumber: 995 +gidnumber: 995 +homeDirectory: /home/uid995 + +dn: cn=user996,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user996 +sn: user996 +uid: uid996 +givenname: givenname996 +description: description996 +userPassword: password996 +mail: uid996 +uidnumber: 996 +gidnumber: 996 +homeDirectory: /home/uid996 + +dn: cn=user997,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user997 +sn: user997 +uid: uid997 +givenname: givenname997 +description: description997 +userPassword: password997 +mail: uid997 +uidnumber: 997 +gidnumber: 997 +homeDirectory: /home/uid997 + +dn: cn=user998,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user998 +sn: user998 +uid: uid998 +givenname: givenname998 +description: description998 +userPassword: password998 +mail: uid998 +uidnumber: 998 +gidnumber: 998 +homeDirectory: /home/uid998 + +dn: cn=user999,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user999 +sn: user999 +uid: uid999 +givenname: givenname999 +description: description999 +userPassword: password999 +mail: uid999 +uidnumber: 999 +gidnumber: 999 +homeDirectory: /home/uid999 + diff --git a/dirsrvtests/tests/suites/acct_usability_plugin/acct_usability_test.py b/dirsrvtests/tests/suites/acct_usability_plugin/acct_usability_test.py new file mode 100644 index 0000000..36021e2 --- /dev/null +++ b/dirsrvtests/tests/suites/acct_usability_plugin/acct_usability_test.py @@ -0,0 +1,93 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2015 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import os +import sys +import time +import ldap +import logging +import pytest +from lib389 import DirSrv, Entry, tools, tasks +from lib389.tools import DirSrvTools +from lib389._constants import * +from lib389.properties import * +from lib389.tasks import * +from lib389.utils import * + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + +installation1_prefix = None + + +class TopologyStandalone(object): + def __init__(self, standalone): + standalone.open() + self.standalone = standalone + + +@pytest.fixture(scope="module") +def topology(request): + global installation1_prefix + if installation1_prefix: + args_instance[SER_DEPLOYED_DIR] = installation1_prefix + + # Creating standalone instance ... + standalone = DirSrv(verbose=False) + args_instance[SER_HOST] = HOST_STANDALONE + args_instance[SER_PORT] = PORT_STANDALONE + args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE + args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX + args_standalone = args_instance.copy() + standalone.allocate(args_standalone) + instance_standalone = standalone.exists() + if instance_standalone: + standalone.delete() + standalone.create() + standalone.open() + + # Clear out the tmp dir + standalone.clearTmpDir(__file__) + + return TopologyStandalone(standalone) + + +def test_acct_usability_init(topology): + ''' + Write any test suite initialization here(if needed) + ''' + + return + + +def test_acct_usability_(topology): + ''' + Write a single test here... + ''' + + return + + +def test_acct_usability_final(topology): + topology.standalone.delete() + log.info('acct_usability test suite PASSED') + + +def run_isolated(): + global installation1_prefix + installation1_prefix = None + + topo = topology(True) + test_acct_usability_init(topo) + test_acct_usability_(topo) + test_acct_usability_final(topo) + + +if __name__ == '__main__': + run_isolated() + diff --git a/dirsrvtests/tests/suites/acctpolicy_plugin/acctpolicy_test.py b/dirsrvtests/tests/suites/acctpolicy_plugin/acctpolicy_test.py new file mode 100644 index 0000000..b7db352 --- /dev/null +++ b/dirsrvtests/tests/suites/acctpolicy_plugin/acctpolicy_test.py @@ -0,0 +1,93 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2015 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import os +import sys +import time +import ldap +import logging +import pytest +from lib389 import DirSrv, Entry, tools, tasks +from lib389.tools import DirSrvTools +from lib389._constants import * +from lib389.properties import * +from lib389.tasks import * +from lib389.utils import * + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + +installation1_prefix = None + + +class TopologyStandalone(object): + def __init__(self, standalone): + standalone.open() + self.standalone = standalone + + +@pytest.fixture(scope="module") +def topology(request): + global installation1_prefix + if installation1_prefix: + args_instance[SER_DEPLOYED_DIR] = installation1_prefix + + # Creating standalone instance ... + standalone = DirSrv(verbose=False) + args_instance[SER_HOST] = HOST_STANDALONE + args_instance[SER_PORT] = PORT_STANDALONE + args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE + args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX + args_standalone = args_instance.copy() + standalone.allocate(args_standalone) + instance_standalone = standalone.exists() + if instance_standalone: + standalone.delete() + standalone.create() + standalone.open() + + # Clear out the tmp dir + standalone.clearTmpDir(__file__) + + return TopologyStandalone(standalone) + + +def test_acctpolicy_init(topology): + ''' + Write any test suite initialization here(if needed) + ''' + + return + + +def test_acctpolicy_(topology): + ''' + Write a single test here... + ''' + + return + + +def test_acctpolicy_final(topology): + topology.standalone.delete() + log.info('acctpolicy test suite PASSED') + + +def run_isolated(): + global installation1_prefix + installation1_prefix = None + + topo = topology(True) + test_acctpolicy_init(topo) + test_acctpolicy_(topo) + test_acctpolicy_final(topo) + + +if __name__ == '__main__': + run_isolated() + diff --git a/dirsrvtests/tests/suites/acl/acl_test.py b/dirsrvtests/tests/suites/acl/acl_test.py new file mode 100644 index 0000000..422a1ec --- /dev/null +++ b/dirsrvtests/tests/suites/acl/acl_test.py @@ -0,0 +1,1059 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2015 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import os +import sys +import time +import ldap +import logging +import pytest +from lib389 import DirSrv, Entry, tools, tasks +from lib389.tools import DirSrvTools +from lib389._constants import * +from lib389.properties import * +from lib389.tasks import * +from lib389.utils import * +from ldap.controls.simple import GetEffectiveRightsControl + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + +# +# important part. We can deploy Master1 and Master2 on different versions +# +installation1_prefix = None +installation2_prefix = None + +TEST_REPL_DN = "cn=test_repl, %s" % SUFFIX + +STAGING_CN = "staged user" +PRODUCTION_CN = "accounts" +EXCEPT_CN = "excepts" + +STAGING_DN = "cn=%s,%s" % (STAGING_CN, SUFFIX) +PRODUCTION_DN = "cn=%s,%s" % (PRODUCTION_CN, SUFFIX) +PROD_EXCEPT_DN = "cn=%s,%s" % (EXCEPT_CN, PRODUCTION_DN) + +STAGING_PATTERN = "cn=%s*,%s" % (STAGING_CN[:2], SUFFIX) +PRODUCTION_PATTERN = "cn=%s*,%s" % (PRODUCTION_CN[:2], SUFFIX) +BAD_STAGING_PATTERN = "cn=bad*,%s" % (SUFFIX) +BAD_PRODUCTION_PATTERN = "cn=bad*,%s" % (SUFFIX) + +BIND_CN = "bind_entry" +BIND_DN = "cn=%s,%s" % (BIND_CN, SUFFIX) +BIND_PW = "password" + +NEW_ACCOUNT = "new_account" +MAX_ACCOUNTS = 20 + +CONFIG_MODDN_ACI_ATTR = "nsslapd-moddn-aci" + +SRC_ENTRY_CN = "tuser" +EXT_RDN = "01" +DST_ENTRY_CN = SRC_ENTRY_CN + EXT_RDN + +SRC_ENTRY_DN = "cn=%s,%s" % (SRC_ENTRY_CN, SUFFIX) +DST_ENTRY_DN = "cn=%s,%s" % (DST_ENTRY_CN, SUFFIX) + + +class TopologyMaster1Master2(object): + def __init__(self, master1, master2): + master1.open() + self.master1 = master1 + + master2.open() + self.master2 = master2 + + +@pytest.fixture(scope="module") +def topology(request): + """This fixture is used to create a replicated topology for the 'module'. + The replicated topology is MASTER1 <-> Master2. + """ + + global installation1_prefix + global installation2_prefix + + # allocate master1 on a given deployement + master1 = DirSrv(verbose=False) + if installation1_prefix: + args_instance[SER_DEPLOYED_DIR] = installation1_prefix + + # Args for the master1 instance + args_instance[SER_HOST] = HOST_MASTER_1 + args_instance[SER_PORT] = PORT_MASTER_1 + args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1 + args_master = args_instance.copy() + master1.allocate(args_master) + + # allocate master1 on a given deployement + master2 = DirSrv(verbose=False) + if installation2_prefix: + args_instance[SER_DEPLOYED_DIR] = installation2_prefix + + # Args for the consumer instance + args_instance[SER_HOST] = HOST_MASTER_2 + args_instance[SER_PORT] = PORT_MASTER_2 + args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_2 + args_master = args_instance.copy() + master2.allocate(args_master) + + # Get the status of the instance and restart it if it exists + instance_master1 = master1.exists() + instance_master2 = master2.exists() + + # Remove all the instances + if instance_master1: + master1.delete() + if instance_master2: + master2.delete() + + # Create the instances + master1.create() + master1.open() + master2.create() + master2.open() + + # + # Now prepare the Master-Consumer topology + # + # First Enable replication + master1.replica.enableReplication(suffix=SUFFIX, + role=REPLICAROLE_MASTER, + replicaId=REPLICAID_MASTER_1) + master2.replica.enableReplication(suffix=SUFFIX, + role=REPLICAROLE_MASTER, + replicaId=REPLICAID_MASTER_2) + + # Initialize the supplier->consumer + + properties = {RA_NAME: r'meTo_$host:$port', + RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], + RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], + RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], + RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} + repl_agreement = master1.agreement.create(suffix=SUFFIX, + host=master2.host, + port=master2.port, + properties=properties) + + if not repl_agreement: + log.fatal("Fail to create a replica agreement") + sys.exit(1) + + log.debug("%s created" % repl_agreement) + + properties = {RA_NAME: r'meTo_$host:$port', + RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], + RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], + RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], + RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} + master2.agreement.create(suffix=SUFFIX, + host=master1.host, + port=master1.port, + properties=properties) + + master1.agreement.init(SUFFIX, HOST_MASTER_2, PORT_MASTER_2) + master1.waitForReplInit(repl_agreement) + + # Check replication is working fine + if master1.testReplication(DEFAULT_SUFFIX, master2): + log.info('Replication is working.') + else: + log.fatal('Replication is not working.') + assert False + + def fin(): + master1.delete() + master2.delete() + request.addfinalizer(fin) + + # clear the tmp directory + master1.clearTmpDir(__file__) + + # Here we have two instances master and consumer + # with replication working. + return TopologyMaster1Master2(master1, master2) + + +def add_attr(topology, attr_name): + """Adds attribute to the schema""" + + ATTR_VALUE = """(NAME '%s' \ + DESC 'Attribute filteri-Multi-Valued' \ + SYNTAX 1.3.6.1.4.1.1466.115.121.1.27)""" % attr_name + mod = [(ldap.MOD_ADD, 'attributeTypes', ATTR_VALUE)] + + try: + topology.standalone.modify_s(DN_SCHEMA, mod) + except ldap.LDAPError as e: + log.fatal('Failed to add attr (%s): error (%s)' % (attr_name, + e.message['desc'])) + assert False + + +@pytest.fixture(params=["lang-ja", "binary", "phonetic"]) +def aci_with_attr_subtype(request, topology): + """Adds and deletes an ACI in the DEFAULT_SUFFIX""" + + TARGET_ATTR = 'protectedOperation' + USER_ATTR = 'allowedToPerform' + SUBTYPE = request.param + + log.info("========Executing test with '%s' subtype========" % SUBTYPE) + log.info(" Add a target attribute") + add_attr(topology, TARGET_ATTR) + + log.info(" Add a user attribute") + add_attr(topology, USER_ATTR) + + ACI_TARGET = '(targetattr=%s;%s)' % (TARGET_ATTR, SUBTYPE) + ACI_ALLOW = '(version 3.0; acl "test aci for subtypes"; allow (read) ' + ACI_SUBJECT = 'userattr = "%s;%s#GROUPDN";)' % (USER_ATTR, SUBTYPE) + ACI_BODY = ACI_TARGET + ACI_ALLOW + ACI_SUBJECT + + log.info(" Add an ACI with attribute subtype") + mod = [(ldap.MOD_ADD, 'aci', ACI_BODY)] + try: + topology.standalone.modify_s(DEFAULT_SUFFIX, mod) + except ldap.LDAPError as e: + log.fatal('Failed to add ACI: error (%s)' % (e.message['desc'])) + assert False + + def fin(): + log.info(" Finally, delete an ACI with the '%s' subtype" % + SUBTYPE) + mod = [(ldap.MOD_DELETE, 'aci', ACI_BODY)] + try: + topology.standalone.modify_s(DEFAULT_SUFFIX, mod) + except ldap.LDAPError as e: + log.fatal('Failed to delete ACI: error (%s)' % (e.message['desc'])) + assert False + request.addfinalizer(fin) + + return ACI_BODY + + +def test_aci_attr_subtype_targetattr(topology, aci_with_attr_subtype): + """Checks, that ACIs allow attribute subtypes in the targetattr keyword + + Test description: + 1. Define two attributes in the schema + - first will be a targetattr + - second will be a userattr + 2. Add an ACI with an attribute subtype + - or language subtype + - or binary subtype + - or pronunciation subtype + """ + + log.info(" Search for the added attribute") + try: + entries = topology.master1.search_s(DEFAULT_SUFFIX, + ldap.SCOPE_BASE, + '(objectclass=*)', ['aci']) + entry = str(entries[0]) + assert aci_with_attr_subtype in entry + log.info(" The added attribute was found") + + except ldap.LDAPError as e: + log.fatal('Search failed, error: ' + e.message['desc']) + assert False + + +def _bind_manager(topology): + topology.master1.log.info("Bind as %s " % DN_DM) + topology.master1.simple_bind_s(DN_DM, PASSWORD) + + +def _bind_normal(topology): + # bind as bind_entry + topology.master1.log.info("Bind as %s" % BIND_DN) + topology.master1.simple_bind_s(BIND_DN, BIND_PW) + + +def _moddn_aci_deny_tree(topology, mod_type=None, + target_from=STAGING_DN, target_to=PROD_EXCEPT_DN): + """It denies the access moddn_to in cn=except,cn=accounts,SUFFIX""" + + assert mod_type is not None + + ACI_TARGET_FROM = "" + ACI_TARGET_TO = "" + if target_from: + ACI_TARGET_FROM = "(target_from = \"ldap:///%s\")" % (target_from) + if target_to: + ACI_TARGET_TO = "(target_to = \"ldap:///%s\")" % (target_to) + + ACI_ALLOW = "(version 3.0; acl \"Deny MODDN to prod_except\"; deny (moddn)" + ACI_SUBJECT = " userdn = \"ldap:///%s\";)" % BIND_DN + ACI_BODY = ACI_TARGET_TO + ACI_TARGET_FROM + ACI_ALLOW + ACI_SUBJECT + mod = [(mod_type, 'aci', ACI_BODY)] + #topology.master1.modify_s(SUFFIX, mod) + topology.master1.log.info("Add a DENY aci under %s " % PROD_EXCEPT_DN) + topology.master1.modify_s(PROD_EXCEPT_DN, mod) + + +def _write_aci_staging(topology, mod_type=None): + assert mod_type is not None + + ACI_TARGET = "(targetattr= \"cn\")(target=\"ldap:///cn=*,%s\")" % STAGING_DN + ACI_ALLOW = "(version 3.0; acl \"write staging entries\"; allow (write)" + ACI_SUBJECT = " userdn = \"ldap:///%s\";)" % BIND_DN + ACI_BODY = ACI_TARGET + ACI_ALLOW + ACI_SUBJECT + mod = [(mod_type, 'aci', ACI_BODY)] + topology.master1.modify_s(SUFFIX, mod) + + +def _write_aci_production(topology, mod_type=None): + assert mod_type is not None + + ACI_TARGET = "(targetattr= \"cn\")(target=\"ldap:///cn=*,%s\")" % PRODUCTION_DN + ACI_ALLOW = "(version 3.0; acl \"write production entries\"; allow (write)" + ACI_SUBJECT = " userdn = \"ldap:///%s\";)" % BIND_DN + ACI_BODY = ACI_TARGET + ACI_ALLOW + ACI_SUBJECT + mod = [(mod_type, 'aci', ACI_BODY)] + topology.master1.modify_s(SUFFIX, mod) + + +def _moddn_aci_staging_to_production(topology, mod_type=None, + target_from=STAGING_DN, target_to=PRODUCTION_DN): + assert mod_type is not None + + + ACI_TARGET_FROM = "" + ACI_TARGET_TO = "" + if target_from: + ACI_TARGET_FROM = "(target_from = \"ldap:///%s\")" % (target_from) + if target_to: + ACI_TARGET_TO = "(target_to = \"ldap:///%s\")" % (target_to) + + ACI_ALLOW = "(version 3.0; acl \"MODDN from staging to production\"; allow (moddn)" + ACI_SUBJECT = " userdn = \"ldap:///%s\";)" % BIND_DN + ACI_BODY = ACI_TARGET_FROM + ACI_TARGET_TO + ACI_ALLOW + ACI_SUBJECT + mod = [(mod_type, 'aci', ACI_BODY)] + topology.master1.modify_s(SUFFIX, mod) + + _write_aci_staging(topology, mod_type=mod_type) + + +def _moddn_aci_from_production_to_staging(topology, mod_type=None): + assert mod_type is not None + + ACI_TARGET = "(target_from = \"ldap:///%s\") (target_to = \"ldap:///%s\")" % ( + PRODUCTION_DN, STAGING_DN) + ACI_ALLOW = "(version 3.0; acl \"MODDN from production to staging\"; allow (moddn)" + ACI_SUBJECT = " userdn = \"ldap:///%s\";)" % BIND_DN + ACI_BODY = ACI_TARGET + ACI_ALLOW + ACI_SUBJECT + mod = [(mod_type, 'aci', ACI_BODY)] + topology.master1.modify_s(SUFFIX, mod) + + _write_aci_production(topology, mod_type=mod_type) + + +@pytest.fixture(scope="module") +def moddn_setup(topology): + """Creates + - a staging DIT + - a production DIT + - add accounts in staging DIT + - enable ACL logging (commented for performance reason) + """ + + topology.master1.log.info("\n\n######## INITIALIZATION ########\n") + + # entry used to bind with + topology.master1.log.info("Add %s" % BIND_DN) + topology.master1.add_s(Entry((BIND_DN, { + 'objectclass': "top person".split(), + 'sn': BIND_CN, + 'cn': BIND_CN, + 'userpassword': BIND_PW}))) + + # DIT for staging + topology.master1.log.info("Add %s" % STAGING_DN) + topology.master1.add_s(Entry((STAGING_DN, { + 'objectclass': "top organizationalRole".split(), + 'cn': STAGING_CN, + 'description': "staging DIT"}))) + + # DIT for production + topology.master1.log.info("Add %s" % PRODUCTION_DN) + topology.master1.add_s(Entry((PRODUCTION_DN, { + 'objectclass': "top organizationalRole".split(), + 'cn': PRODUCTION_CN, + 'description': "production DIT"}))) + + # DIT for production/except + topology.master1.log.info("Add %s" % PROD_EXCEPT_DN) + topology.master1.add_s(Entry((PROD_EXCEPT_DN, { + 'objectclass': "top organizationalRole".split(), + 'cn': EXCEPT_CN, + 'description': "production except DIT"}))) + + # enable acl error logging + #mod = [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', '128')] + #topology.master1.modify_s(DN_CONFIG, mod) + #topology.master2.modify_s(DN_CONFIG, mod) + + # add dummy entries in the staging DIT + for cpt in range(MAX_ACCOUNTS): + name = "%s%d" % (NEW_ACCOUNT, cpt) + topology.master1.add_s(Entry(("cn=%s,%s" % (name, STAGING_DN), { + 'objectclass': "top person".split(), + 'sn': name, + 'cn': name}))) + + +def test_mode_default_add_deny(topology, moddn_setup): + """This test case checks + that the ADD operation fails (no ADD aci on production) + """ + + topology.master1.log.info("\n\n######## mode moddn_aci : ADD (should fail) ########\n") + + _bind_normal(topology) + + # + # First try to add an entry in production => INSUFFICIENT_ACCESS + # + try: + topology.master1.log.info("Try to add %s" % PRODUCTION_DN) + name = "%s%d" % (NEW_ACCOUNT, 0) + topology.master1.add_s(Entry(("cn=%s,%s" % (name, PRODUCTION_DN), { + 'objectclass': "top person".split(), + 'sn': name, + 'cn': name}))) + assert 0 # this is an error, we should not be allowed to add an entry in production + except Exception as e: + topology.master1.log.info("Exception (expected): %s" % type(e).__name__) + assert isinstance(e, ldap.INSUFFICIENT_ACCESS) + + +def test_mode_default_delete_deny(topology, moddn_setup): + """This test case checks + that the DEL operation fails (no 'delete' aci on production) + """ + + topology.master1.log.info("\n\n######## DELETE (should fail) ########\n") + + _bind_normal(topology) + # + # Second try to delete an entry in staging => INSUFFICIENT_ACCESS + # + try: + topology.master1.log.info("Try to delete %s" % STAGING_DN) + name = "%s%d" % (NEW_ACCOUNT, 0) + topology.master1.delete_s("cn=%s,%s" % (name, STAGING_DN)) + assert 0 # this is an error, we should not be allowed to add an entry in production + except Exception as e: + topology.master1.log.info("Exception (expected): %s" % type(e).__name__) + assert isinstance(e, ldap.INSUFFICIENT_ACCESS) + + +@pytest.mark.parametrize("index,tfrom,tto,failure", + [(0, STAGING_DN, PRODUCTION_DN, False), + (1, STAGING_DN, PRODUCTION_DN, False), + (2, STAGING_DN, BAD_PRODUCTION_PATTERN, True), + (3, STAGING_PATTERN, PRODUCTION_DN, False), + (4, BAD_STAGING_PATTERN, PRODUCTION_DN, True), + (5, STAGING_PATTERN, PRODUCTION_PATTERN, False), + (6, None, PRODUCTION_PATTERN, False), + (7, STAGING_PATTERN, None, False), + (8, None, None, False)]) +def test_moddn_staging_prod(topology, moddn_setup, + index, tfrom, tto, failure): + """This test case MOVE entry NEW_ACCOUNT0 from staging to prod + target_to/target_from: equality filter + """ + + topology.master1.log.info("\n\n######## MOVE staging -> Prod (%s) ########\n" % index) + _bind_normal(topology) + + old_rdn = "cn=%s%s" % (NEW_ACCOUNT, index) + old_dn = "%s,%s" % (old_rdn, STAGING_DN) + new_rdn = old_rdn + new_superior = PRODUCTION_DN + + # + # Try to rename without the apropriate ACI => INSUFFICIENT_ACCESS + # + try: + topology.master1.log.info("Try to MODDN %s -> %s,%s" % (old_dn, new_rdn, new_superior)) + topology.master1.rename_s(old_dn, new_rdn, newsuperior=new_superior) + assert 0 + except AssertionError: + topology.master1.log.info("Exception (not really expected exception but that is fine as it fails to rename)") + except Exception as e: + topology.master1.log.info("Exception (expected): %s" % type(e).__name__) + assert isinstance(e, ldap.INSUFFICIENT_ACCESS) + + + # successfull MOD with the ACI + topology.master1.log.info("\n\n######## MOVE to and from equality filter ########\n") + _bind_manager(topology) + _moddn_aci_staging_to_production(topology, mod_type=ldap.MOD_ADD, + target_from=tfrom, target_to=tto) + _bind_normal(topology) + + try: + topology.master1.log.info("Try to MODDN %s -> %s,%s" % (old_dn, new_rdn, new_superior)) + topology.master1.rename_s(old_dn, new_rdn, newsuperior=new_superior) + except Exception as e: + topology.master1.log.info("Exception (expected): %s" % type(e).__name__) + if failure: + assert isinstance(e, ldap.INSUFFICIENT_ACCESS) + + # successfull MOD with the both ACI + _bind_manager(topology) + _moddn_aci_staging_to_production(topology, mod_type=ldap.MOD_DELETE, + target_from=tfrom, target_to=tto) + _bind_normal(topology) + + +def test_moddn_staging_prod_9(topology, moddn_setup): + """This test case disable the 'moddn' right so a MODDN requires a 'add' right + to be successfull. + It fails to MOVE entry NEW_ACCOUNT9 from staging to prod. + Add a 'add' right to prod. + Then it succeeds to MOVE NEW_ACCOUNT9 from staging to prod. + + Then enable the 'moddn' right so a MODDN requires a 'moddn' right + It fails to MOVE entry NEW_ACCOUNT10 from staging to prod. + Add a 'moddn' right to prod. + Then it succeeds to MOVE NEW_ACCOUNT10 from staging to prod. + """ + + topology.master1.log.info("\n\n######## MOVE staging -> Prod (9) ########\n") + + _bind_normal(topology) + old_rdn = "cn=%s9" % NEW_ACCOUNT + old_dn = "%s,%s" % (old_rdn, STAGING_DN) + new_rdn = old_rdn + new_superior = PRODUCTION_DN + + # + # Try to rename without the apropriate ACI => INSUFFICIENT_ACCESS + # + try: + topology.master1.log.info("Try to MODDN %s -> %s,%s" % (old_dn, new_rdn, new_superior)) + topology.master1.rename_s(old_dn, new_rdn, newsuperior=new_superior) + assert 0 + except AssertionError: + topology.master1.log.info("Exception (not really expected exception but that is fine as it fails to rename)") + except Exception as e: + topology.master1.log.info("Exception (expected): %s" % type(e).__name__) + assert isinstance(e, ldap.INSUFFICIENT_ACCESS) + + ############# + # Now do tests with no support of moddn aci + ############# + topology.master1.log.info("Disable the moddn right") + _bind_manager(topology) + mod = [(ldap.MOD_REPLACE, CONFIG_MODDN_ACI_ATTR, 'off')] + topology.master1.modify_s(DN_CONFIG, mod) + + # Add the moddn aci that will not be evaluated because of the config flag + topology.master1.log.info("\n\n######## MOVE to and from equality filter ########\n") + _bind_manager(topology) + _moddn_aci_staging_to_production(topology, mod_type=ldap.MOD_ADD, + target_from=STAGING_DN, target_to=PRODUCTION_DN) + _bind_normal(topology) + + # It will fail because it will test the ADD right + try: + topology.master1.log.info("Try to MODDN %s -> %s,%s" % (old_dn, new_rdn, new_superior)) + topology.master1.rename_s(old_dn, new_rdn, newsuperior=new_superior) + assert 0 + except AssertionError: + topology.master1.log.info("Exception (not really expected exception but that is fine as it fails to rename)") + except Exception as e: + topology.master1.log.info("Exception (expected): %s" % type(e).__name__) + assert isinstance(e, ldap.INSUFFICIENT_ACCESS) + + # remove the moddn aci + _bind_manager(topology) + _moddn_aci_staging_to_production(topology, mod_type=ldap.MOD_DELETE, + target_from=STAGING_DN, target_to=PRODUCTION_DN) + _bind_normal(topology) + + # + # add the 'add' right to the production DN + # Then do a successfull moddn + # + ACI_ALLOW = "(version 3.0; acl \"ADD rights to allow moddn\"; allow (add)" + ACI_SUBJECT = " userdn = \"ldap:///%s\";)" % BIND_DN + ACI_BODY = ACI_ALLOW + ACI_SUBJECT + + _bind_manager(topology) + mod = [(ldap.MOD_ADD, 'aci', ACI_BODY)] + topology.master1.modify_s(PRODUCTION_DN, mod) + _write_aci_staging(topology, mod_type=ldap.MOD_ADD) + _bind_normal(topology) + + topology.master1.log.info("Try to MODDN %s -> %s,%s" % (old_dn, new_rdn, new_superior)) + topology.master1.rename_s(old_dn, new_rdn, newsuperior=new_superior) + + _bind_manager(topology) + mod = [(ldap.MOD_DELETE, 'aci', ACI_BODY)] + topology.master1.modify_s(PRODUCTION_DN, mod) + _write_aci_staging(topology, mod_type=ldap.MOD_DELETE) + _bind_normal(topology) + + ############# + # Now do tests with support of moddn aci + ############# + topology.master1.log.info("Enable the moddn right") + _bind_manager(topology) + mod = [(ldap.MOD_REPLACE, CONFIG_MODDN_ACI_ATTR, 'on')] + topology.master1.modify_s(DN_CONFIG, mod) + + topology.master1.log.info("\n\n######## MOVE staging -> Prod (10) ########\n") + + _bind_normal(topology) + old_rdn = "cn=%s10" % NEW_ACCOUNT + old_dn = "%s,%s" % (old_rdn, STAGING_DN) + new_rdn = old_rdn + new_superior = PRODUCTION_DN + + # + # Try to rename without the apropriate ACI => INSUFFICIENT_ACCESS + # + try: + topology.master1.log.info("Try to MODDN %s -> %s,%s" % (old_dn, new_rdn, new_superior)) + topology.master1.rename_s(old_dn, new_rdn, newsuperior=new_superior) + assert 0 + except AssertionError: + topology.master1.log.info("Exception (not really expected exception but that is fine as it fails to rename)") + except Exception as e: + topology.master1.log.info("Exception (expected): %s" % type(e).__name__) + assert isinstance(e, ldap.INSUFFICIENT_ACCESS) + + # + # add the 'add' right to the production DN + # Then do a failing moddn + # + ACI_ALLOW = "(version 3.0; acl \"ADD rights to allow moddn\"; allow (add)" + ACI_SUBJECT = " userdn = \"ldap:///%s\";)" % BIND_DN + ACI_BODY = ACI_ALLOW + ACI_SUBJECT + + _bind_manager(topology) + mod = [(ldap.MOD_ADD, 'aci', ACI_BODY)] + topology.master1.modify_s(PRODUCTION_DN, mod) + _write_aci_staging(topology, mod_type=ldap.MOD_ADD) + _bind_normal(topology) + + try: + topology.master1.log.info("Try to MODDN %s -> %s,%s" % (old_dn, new_rdn, new_superior)) + topology.master1.rename_s(old_dn, new_rdn, newsuperior=new_superior) + assert 0 + except AssertionError: + topology.master1.log.info("Exception (not really expected exception but that is fine as it fails to rename)") + except Exception as e: + topology.master1.log.info("Exception (expected): %s" % type(e).__name__) + assert isinstance(e, ldap.INSUFFICIENT_ACCESS) + + _bind_manager(topology) + mod = [(ldap.MOD_DELETE, 'aci', ACI_BODY)] + topology.master1.modify_s(PRODUCTION_DN, mod) + _write_aci_staging(topology, mod_type=ldap.MOD_DELETE) + _bind_normal(topology) + + # Add the moddn aci that will be evaluated because of the config flag + topology.master1.log.info("\n\n######## MOVE to and from equality filter ########\n") + _bind_manager(topology) + _moddn_aci_staging_to_production(topology, mod_type=ldap.MOD_ADD, + target_from=STAGING_DN, target_to=PRODUCTION_DN) + _bind_normal(topology) + + topology.master1.log.info("Try to MODDN %s -> %s,%s" % (old_dn, new_rdn, new_superior)) + topology.master1.rename_s(old_dn, new_rdn, newsuperior=new_superior) + + # remove the moddn aci + _bind_manager(topology) + _moddn_aci_staging_to_production(topology, mod_type=ldap.MOD_DELETE, + target_from=STAGING_DN, target_to=PRODUCTION_DN) + _bind_normal(topology) + + +def test_moddn_prod_staging(topology, moddn_setup): + """This test checks that we can move ACCOUNT11 from staging to prod + but not move back ACCOUNT11 from prod to staging + """ + + topology.master1.log.info("\n\n######## MOVE staging -> Prod (11) ########\n") + + _bind_normal(topology) + + old_rdn = "cn=%s11" % NEW_ACCOUNT + old_dn = "%s,%s" % (old_rdn, STAGING_DN) + new_rdn = old_rdn + new_superior = PRODUCTION_DN + + # + # Try to rename without the apropriate ACI => INSUFFICIENT_ACCESS + # + try: + topology.master1.log.info("Try to MODDN %s -> %s,%s" % (old_dn, new_rdn, new_superior)) + topology.master1.rename_s(old_dn, new_rdn, newsuperior=new_superior) + assert 0 + except AssertionError: + topology.master1.log.info("Exception (not really expected exception but that is fine as it fails to rename)") + except Exception as e: + topology.master1.log.info("Exception (expected): %s" % type(e).__name__) + assert isinstance(e, ldap.INSUFFICIENT_ACCESS) + + # successfull MOD with the ACI + topology.master1.log.info("\n\n######## MOVE to and from equality filter ########\n") + _bind_manager(topology) + _moddn_aci_staging_to_production(topology, mod_type=ldap.MOD_ADD, + target_from=STAGING_DN, target_to=PRODUCTION_DN) + _bind_normal(topology) + + topology.master1.log.info("Try to MODDN %s -> %s,%s" % (old_dn, new_rdn, new_superior)) + topology.master1.rename_s(old_dn, new_rdn, newsuperior=new_superior) + + # Now check we can not move back the entry to staging + old_rdn = "cn=%s11" % NEW_ACCOUNT + old_dn = "%s,%s" % (old_rdn, PRODUCTION_DN) + new_rdn = old_rdn + new_superior = STAGING_DN + + # add the write right because we want to check the moddn + _bind_manager(topology) + _write_aci_production(topology, mod_type=ldap.MOD_ADD) + _bind_normal(topology) + + try: + topology.master1.log.info("Try to move back MODDN %s -> %s,%s" % (old_dn, new_rdn, new_superior)) + topology.master1.rename_s(old_dn, new_rdn, newsuperior=new_superior) + assert 0 + except AssertionError: + topology.master1.log.info("Exception (not really expected exception but that is fine as it fails to rename)") + except Exception as e: + topology.master1.log.info("Exception (expected): %s" % type(e).__name__) + assert isinstance(e, ldap.INSUFFICIENT_ACCESS) + + _bind_manager(topology) + _write_aci_production(topology, mod_type=ldap.MOD_DELETE) + _bind_normal(topology) + + # successfull MOD with the both ACI + _bind_manager(topology) + _moddn_aci_staging_to_production(topology, mod_type=ldap.MOD_DELETE, + target_from=STAGING_DN, target_to=PRODUCTION_DN) + _bind_normal(topology) + + +def test_check_repl_M2_to_M1(topology, moddn_setup): + """Checks that replication is still working M2->M1, using ACCOUNT12""" + + topology.master1.log.info("Bind as %s (M2)" % DN_DM) + topology.master2.simple_bind_s(DN_DM, PASSWORD) + + rdn = "cn=%s12" % NEW_ACCOUNT + dn = "%s,%s" % (rdn, STAGING_DN) + + # First wait for the ACCOUNT19 entry being replicated on M2 + loop = 0 + while loop <= 10: + try: + ent = topology.master2.getEntry(dn, ldap.SCOPE_BASE, "(objectclass=*)") + break + except ldap.NO_SUCH_OBJECT: + time.sleep(1) + loop += 1 + assert loop <= 10 + + attribute = 'description' + tested_value = 'Hello world' + mod = [(ldap.MOD_ADD, attribute, tested_value)] + topology.master1.log.info("Update (M2) %s (%s)" % (dn, attribute)) + topology.master2.modify_s(dn, mod) + + loop = 0 + while loop <= 10: + ent = topology.master1.getEntry(dn, ldap.SCOPE_BASE, "(objectclass=*)") + assert ent is not None + if ent.hasAttr(attribute) and (ent.getValue(attribute) == tested_value): + break + + time.sleep(1) + loop += 1 + assert loop < 10 + topology.master1.log.info("Update %s (%s) replicated on M1" % (dn, attribute)) + + +def test_moddn_staging_prod_except(topology, moddn_setup): + """This test case MOVE entry NEW_ACCOUNT13 from staging to prod + but fails to move entry NEW_ACCOUNT14 from staging to prod_except + """ + + topology.master1.log.info("\n\n######## MOVE staging -> Prod (13) ########\n") + _bind_normal(topology) + + old_rdn = "cn=%s13" % NEW_ACCOUNT + old_dn = "%s,%s" % (old_rdn, STAGING_DN) + new_rdn = old_rdn + new_superior = PRODUCTION_DN + + # + # Try to rename without the apropriate ACI => INSUFFICIENT_ACCESS + # + try: + topology.master1.log.info("Try to MODDN %s -> %s,%s" % (old_dn, new_rdn, new_superior)) + topology.master1.rename_s(old_dn, new_rdn, newsuperior=new_superior) + assert 0 + except AssertionError: + topology.master1.log.info("Exception (not really expected exception but that is fine as it fails to rename)") + except Exception as e: + topology.master1.log.info("Exception (expected): %s" % type(e).__name__) + assert isinstance(e, ldap.INSUFFICIENT_ACCESS) + + # successfull MOD with the ACI + topology.master1.log.info("\n\n######## MOVE to and from equality filter ########\n") + _bind_manager(topology) + _moddn_aci_staging_to_production(topology, mod_type=ldap.MOD_ADD, + target_from=STAGING_DN, target_to=PRODUCTION_DN) + _moddn_aci_deny_tree(topology, mod_type=ldap.MOD_ADD) + _bind_normal(topology) + + topology.master1.log.info("Try to MODDN %s -> %s,%s" % (old_dn, new_rdn, new_superior)) + topology.master1.rename_s(old_dn, new_rdn, newsuperior=new_superior) + + # + # Now try to move an entry under except + # + topology.master1.log.info("\n\n######## MOVE staging -> Prod/Except (14) ########\n") + old_rdn = "cn=%s14" % NEW_ACCOUNT + old_dn = "%s,%s" % (old_rdn, STAGING_DN) + new_rdn = old_rdn + new_superior = PROD_EXCEPT_DN + try: + topology.master1.log.info("Try to MODDN %s -> %s,%s" % (old_dn, new_rdn, new_superior)) + topology.master1.rename_s(old_dn, new_rdn, newsuperior=new_superior) + assert 0 + except AssertionError: + topology.master1.log.info("Exception (not really expected exception but that is fine as it fails to rename)") + except Exception as e: + topology.master1.log.info("Exception (expected): %s" % type(e).__name__) + assert isinstance(e, ldap.INSUFFICIENT_ACCESS) + + # successfull MOD with the both ACI + _bind_manager(topology) + _moddn_aci_staging_to_production(topology, mod_type=ldap.MOD_DELETE, + target_from=STAGING_DN, target_to=PRODUCTION_DN) + _moddn_aci_deny_tree(topology, mod_type=ldap.MOD_DELETE) + _bind_normal(topology) + + +def test_mode_default_ger_no_moddn(topology, moddn_setup): + topology.master1.log.info("\n\n######## mode moddn_aci : GER no moddn ########\n") + request_ctrl = GetEffectiveRightsControl(criticality=True, authzId="dn: " + BIND_DN) + msg_id = topology.master1.search_ext(PRODUCTION_DN, + ldap.SCOPE_SUBTREE, + "objectclass=*", + serverctrls=[request_ctrl]) + rtype, rdata, rmsgid, response_ctrl = topology.master1.result3(msg_id) + #ger={} + value = '' + for dn, attrs in rdata: + topology.master1.log.info("dn: %s" % dn) + value = attrs['entryLevelRights'][0] + + topology.master1.log.info("######## entryLevelRights: %r" % value) + assert 'n' not in value + + +def test_mode_default_ger_with_moddn(topology, moddn_setup): + """This test case adds the moddn aci and check ger contains 'n'""" + + topology.master1.log.info("\n\n######## mode moddn_aci: GER with moddn ########\n") + + # successfull MOD with the ACI + _bind_manager(topology) + _moddn_aci_staging_to_production(topology, mod_type=ldap.MOD_ADD, + target_from=STAGING_DN, target_to=PRODUCTION_DN) + _bind_normal(topology) + + request_ctrl = GetEffectiveRightsControl(criticality=True, authzId="dn: " + BIND_DN) + msg_id = topology.master1.search_ext(PRODUCTION_DN, + ldap.SCOPE_SUBTREE, + "objectclass=*", + serverctrls=[request_ctrl]) + rtype, rdata, rmsgid, response_ctrl = topology.master1.result3(msg_id) + #ger={} + value = '' + for dn, attrs in rdata: + topology.master1.log.info("dn: %s" % dn) + value = attrs['entryLevelRights'][0] + + topology.master1.log.info("######## entryLevelRights: %r" % value) + assert 'n' in value + + # successfull MOD with the both ACI + _bind_manager(topology) + _moddn_aci_staging_to_production(topology, mod_type=ldap.MOD_DELETE, + target_from=STAGING_DN, target_to=PRODUCTION_DN) + _bind_normal(topology) + + +def test_mode_switch_default_to_legacy(topology, moddn_setup): + """This test switch the server from default mode to legacy""" + + topology.master1.log.info("\n\n######## Disable the moddn aci mod ########\n") + _bind_manager(topology) + mod = [(ldap.MOD_REPLACE, CONFIG_MODDN_ACI_ATTR, 'off')] + topology.master1.modify_s(DN_CONFIG, mod) + + +def test_mode_legacy_ger_no_moddn1(topology, moddn_setup): + topology.master1.log.info("\n\n######## mode legacy 1: GER no moddn ########\n") + request_ctrl = GetEffectiveRightsControl(criticality=True, authzId="dn: " + BIND_DN) + msg_id = topology.master1.search_ext(PRODUCTION_DN, + ldap.SCOPE_SUBTREE, + "objectclass=*", + serverctrls=[request_ctrl]) + rtype, rdata, rmsgid, response_ctrl = topology.master1.result3(msg_id) + #ger={} + value = '' + for dn, attrs in rdata: + topology.master1.log.info("dn: %s" % dn) + value = attrs['entryLevelRights'][0] + + topology.master1.log.info("######## entryLevelRights: %r" % value) + assert 'n' not in value + + +def test_mode_legacy_ger_no_moddn2(topology, moddn_setup): + topology.master1.log.info("\n\n######## mode legacy 2: GER no moddn ########\n") + # successfull MOD with the ACI + _bind_manager(topology) + _moddn_aci_staging_to_production(topology, mod_type=ldap.MOD_ADD, + target_from=STAGING_DN, target_to=PRODUCTION_DN) + _bind_normal(topology) + + request_ctrl = GetEffectiveRightsControl(criticality=True, authzId="dn: " + BIND_DN) + msg_id = topology.master1.search_ext(PRODUCTION_DN, + ldap.SCOPE_SUBTREE, + "objectclass=*", + serverctrls=[request_ctrl]) + rtype, rdata, rmsgid, response_ctrl = topology.master1.result3(msg_id) + #ger={} + value = '' + for dn, attrs in rdata: + topology.master1.log.info("dn: %s" % dn) + value = attrs['entryLevelRights'][0] + + topology.master1.log.info("######## entryLevelRights: %r" % value) + assert 'n' not in value + + # successfull MOD with the both ACI + _bind_manager(topology) + _moddn_aci_staging_to_production(topology, mod_type=ldap.MOD_DELETE, + target_from=STAGING_DN, target_to=PRODUCTION_DN) + _bind_normal(topology) + + +def test_mode_legacy_ger_with_moddn(topology, moddn_setup): + topology.master1.log.info("\n\n######## mode legacy : GER with moddn ########\n") + + # being allowed to read/write the RDN attribute use to allow the RDN + ACI_TARGET = "(target = \"ldap:///%s\")(targetattr=\"cn\")" % (PRODUCTION_DN) + ACI_ALLOW = "(version 3.0; acl \"MODDN production changing the RDN attribute\"; allow (read,search,write)" + ACI_SUBJECT = " userdn = \"ldap:///%s\";)" % BIND_DN + ACI_BODY = ACI_TARGET + ACI_ALLOW + ACI_SUBJECT + + # successfull MOD with the ACI + _bind_manager(topology) + mod = [(ldap.MOD_ADD, 'aci', ACI_BODY)] + topology.master1.modify_s(SUFFIX, mod) + _bind_normal(topology) + + request_ctrl = GetEffectiveRightsControl(criticality=True, authzId="dn: " + BIND_DN) + msg_id = topology.master1.search_ext(PRODUCTION_DN, + ldap.SCOPE_SUBTREE, + "objectclass=*", + serverctrls=[request_ctrl]) + rtype, rdata, rmsgid, response_ctrl = topology.master1.result3(msg_id) + #ger={} + value = '' + for dn, attrs in rdata: + topology.master1.log.info("dn: %s" % dn) + value = attrs['entryLevelRights'][0] + + topology.master1.log.info("######## entryLevelRights: %r" % value) + assert 'n' in value + + # successfull MOD with the both ACI + _bind_manager(topology) + mod = [(ldap.MOD_DELETE, 'aci', ACI_BODY)] + topology.master1.modify_s(SUFFIX, mod) + #_bind_normal(topology) + + +@pytest.fixture(scope="module") +def rdn_write_setup(topology): + topology.master1.log.info("\n\n######## Add entry tuser ########\n") + topology.master1.add_s(Entry((SRC_ENTRY_DN, { + 'objectclass': "top person".split(), + 'sn': SRC_ENTRY_CN, + 'cn': SRC_ENTRY_CN}))) + + +def test_rdn_write_get_ger(topology, rdn_write_setup): + ANONYMOUS_DN = "" + topology.master1.log.info("\n\n######## GER rights for anonymous ########\n") + request_ctrl = GetEffectiveRightsControl(criticality=True, + authzId="dn:" + ANONYMOUS_DN) + msg_id = topology.master1.search_ext(SUFFIX, + ldap.SCOPE_SUBTREE, + "objectclass=*", + serverctrls=[request_ctrl]) + rtype, rdata, rmsgid, response_ctrl = topology.master1.result3(msg_id) + value = '' + for dn, attrs in rdata: + topology.master1.log.info("dn: %s" % dn) + for value in attrs['entryLevelRights']: + topology.master1.log.info("######## entryLevelRights: %r" % value) + assert 'n' not in value + + +def test_rdn_write_modrdn_anonymous(topology, rdn_write_setup): + ANONYMOUS_DN = "" + topology.master1.close() + topology.master1.binddn = ANONYMOUS_DN + topology.master1.open() + msg_id = topology.master1.search_ext("", ldap.SCOPE_BASE, "objectclass=*") + rtype, rdata, rmsgid, response_ctrl = topology.master1.result3(msg_id) + for dn, attrs in rdata: + topology.master1.log.info("dn: %s" % dn) + for attr in attrs: + topology.master1.log.info("######## %r: %r" % (attr, attrs[attr])) + + try: + topology.master1.rename_s(SRC_ENTRY_DN, "cn=%s" % DST_ENTRY_CN, delold=True) + except Exception as e: + topology.master1.log.info("Exception (expected): %s" % type(e).__name__) + isinstance(e, ldap.INSUFFICIENT_ACCESS) + + try: + topology.master1.getEntry(DST_ENTRY_DN, ldap.SCOPE_BASE, "objectclass=*") + assert False + except Exception as e: + topology.master1.log.info("The entry was not renamed (expected)") + isinstance(e, ldap.NO_SUCH_OBJECT) + + _bind_manager(topology) + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/attr_encryption/attr_encrypt_test.py b/dirsrvtests/tests/suites/attr_encryption/attr_encrypt_test.py new file mode 100644 index 0000000..7d14a76 --- /dev/null +++ b/dirsrvtests/tests/suites/attr_encryption/attr_encrypt_test.py @@ -0,0 +1,93 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2015 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import os +import sys +import time +import ldap +import logging +import pytest +from lib389 import DirSrv, Entry, tools, tasks +from lib389.tools import DirSrvTools +from lib389._constants import * +from lib389.properties import * +from lib389.tasks import * +from lib389.utils import * + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + +installation1_prefix = None + + +class TopologyStandalone(object): + def __init__(self, standalone): + standalone.open() + self.standalone = standalone + + +@pytest.fixture(scope="module") +def topology(request): + global installation1_prefix + if installation1_prefix: + args_instance[SER_DEPLOYED_DIR] = installation1_prefix + + # Creating standalone instance ... + standalone = DirSrv(verbose=False) + args_instance[SER_HOST] = HOST_STANDALONE + args_instance[SER_PORT] = PORT_STANDALONE + args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE + args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX + args_standalone = args_instance.copy() + standalone.allocate(args_standalone) + instance_standalone = standalone.exists() + if instance_standalone: + standalone.delete() + standalone.create() + standalone.open() + + # Clear out the tmp dir + standalone.clearTmpDir(__file__) + + return TopologyStandalone(standalone) + + +def test_attr_encrypt_init(topology): + ''' + Write any test suite initialization here(if needed) + ''' + + return + + +def test_attr_encrypt_(topology): + ''' + Write a single test here... + ''' + + return + + +def test_attr_encrypt_final(topology): + topology.standalone.delete() + log.info('attr_encrypt test suite PASSED') + + +def run_isolated(): + global installation1_prefix + installation1_prefix = None + + topo = topology(True) + test_attr_encrypt_init(topo) + test_attr_encrypt_(topo) + test_attr_encrypt_final(topo) + + +if __name__ == '__main__': + run_isolated() + diff --git a/dirsrvtests/tests/suites/attr_uniqueness_plugin/attr_uniqueness_test.py b/dirsrvtests/tests/suites/attr_uniqueness_plugin/attr_uniqueness_test.py new file mode 100644 index 0000000..06e7425 --- /dev/null +++ b/dirsrvtests/tests/suites/attr_uniqueness_plugin/attr_uniqueness_test.py @@ -0,0 +1,248 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2015 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import os +import sys +import time +import ldap +import logging +import pytest +from lib389 import DirSrv, Entry, tools, tasks +from lib389.tools import DirSrvTools +from lib389._constants import * +from lib389.properties import * +from lib389.tasks import * +from lib389.utils import * + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) +USER1_DN = 'uid=user1,' + DEFAULT_SUFFIX +USER2_DN = 'uid=user2,' + DEFAULT_SUFFIX +installation1_prefix = None + + +class TopologyStandalone(object): + def __init__(self, standalone): + standalone.open() + self.standalone = standalone + + +@pytest.fixture(scope="module") +def topology(request): + global installation1_prefix + if installation1_prefix: + args_instance[SER_DEPLOYED_DIR] = installation1_prefix + + # Creating standalone instance ... + standalone = DirSrv(verbose=False) + args_instance[SER_HOST] = HOST_STANDALONE + args_instance[SER_PORT] = PORT_STANDALONE + args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE + args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX + args_standalone = args_instance.copy() + standalone.allocate(args_standalone) + instance_standalone = standalone.exists() + if instance_standalone: + standalone.delete() + standalone.create() + standalone.open() + + # Clear out the tmp dir + standalone.clearTmpDir(__file__) + + return TopologyStandalone(standalone) + + +def test_attr_uniqueness_init(topology): + ''' + Enable dynamic plugins - makes things easier + ''' + try: + topology.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-dynamic-plugins', 'on')]) + except ldap.LDAPError as e: + ldap.fatal('Failed to enable dynamic plugin!' + e.message['desc']) + assert False + + topology.standalone.plugins.enable(name=PLUGIN_ATTR_UNIQUENESS) + + +def test_attr_uniqueness(topology): + log.info('Running test_attr_uniqueness...') + + # + # Configure plugin + # + try: + topology.standalone.modify_s('cn=' + PLUGIN_ATTR_UNIQUENESS + ',cn=plugins,cn=config', + [(ldap.MOD_REPLACE, 'uniqueness-attribute-name', 'uid')]) + + except ldap.LDAPError as e: + log.fatal('test_attr_uniqueness: Failed to configure plugin for "uid": error ' + e.message['desc']) + assert False + + # Add an entry + try: + topology.standalone.add_s(Entry((USER1_DN, {'objectclass': "top extensibleObject".split(), + 'sn': '1', + 'cn': 'user 1', + 'uid': 'user1', + 'mail': 'user1@example.com', + 'mailAlternateAddress': 'user1@alt.example.com', + 'userpassword': 'password'}))) + except ldap.LDAPError as e: + log.fatal('test_attr_uniqueness: Failed to add test user' + USER1_DN + ': error ' + e.message['desc']) + assert False + + # Add an entry with a duplicate "uid" + try: + topology.standalone.add_s(Entry((USER2_DN, {'objectclass': "top extensibleObject".split(), + 'sn': '2', + 'cn': 'user 2', + 'uid': 'user2', + 'uid': 'user1', + 'userpassword': 'password'}))) + except ldap.CONSTRAINT_VIOLATION: + pass + else: + log.fatal('test_attr_uniqueness: Adding of 2nd entry(uid) incorrectly succeeded') + assert False + + # + # Change config to use "mail" instead of "uid" + # + try: + topology.standalone.modify_s('cn=' + PLUGIN_ATTR_UNIQUENESS + ',cn=plugins,cn=config', + [(ldap.MOD_REPLACE, 'uniqueness-attribute-name', 'mail')]) + + except ldap.LDAPError as e: + log.fatal('test_attr_uniqueness: Failed to configure plugin for "mail": error ' + e.message['desc']) + assert False + + # + # Test plugin - Add an entry, that has a duplicate "mail" value + # + try: + topology.standalone.add_s(Entry((USER2_DN, {'objectclass': "top extensibleObject".split(), + 'sn': '2', + 'cn': 'user 2', + 'uid': 'user2', + 'mail': 'user1@example.com', + 'userpassword': 'password'}))) + except ldap.CONSTRAINT_VIOLATION: + pass + else: + log.fatal('test_attr_uniqueness: Adding of 2nd entry(mail) incorrectly succeeded') + assert False + + # + # Reconfigure plugin for mail and mailAlternateAddress + # + try: + topology.standalone.modify_s('cn=' + PLUGIN_ATTR_UNIQUENESS + ',cn=plugins,cn=config', + [(ldap.MOD_REPLACE, 'uniqueness-attribute-name', 'mail'), + (ldap.MOD_ADD, 'uniqueness-attribute-name', + 'mailAlternateAddress')]) + except ldap.LDAPError as e: + log.error('test_attr_uniqueness: Failed to reconfigure plugin for "mail mailAlternateAddress": error ' + + e.message['desc']) + assert False + + # + # Test plugin - Add an entry, that has a duplicate "mail" value + # + try: + topology.standalone.add_s(Entry((USER2_DN, {'objectclass': "top extensibleObject".split(), + 'sn': '2', + 'cn': 'user 2', + 'uid': 'user2', + 'mail': 'user1@example.com', + 'userpassword': 'password'}))) + except ldap.CONSTRAINT_VIOLATION: + pass + else: + log.error('test_attr_uniqueness: Adding of 3rd entry(mail) incorrectly succeeded') + assert False + + # + # Test plugin - Add an entry, that has a duplicate "mailAlternateAddress" value + # + try: + topology.standalone.add_s(Entry((USER2_DN, {'objectclass': "top extensibleObject".split(), + 'sn': '2', + 'cn': 'user 2', + 'uid': 'user2', + 'mailAlternateAddress': 'user1@alt.example.com', + 'userpassword': 'password'}))) + except ldap.CONSTRAINT_VIOLATION: + pass + else: + log.error('test_attr_uniqueness: Adding of 4th entry(mailAlternateAddress) incorrectly succeeded') + assert False + + # + # Test plugin - Add an entry, that has a duplicate "mail" value conflicting mailAlternateAddress + # + try: + topology.standalone.add_s(Entry((USER2_DN, {'objectclass': "top extensibleObject".split(), + 'sn': '2', + 'cn': 'user 2', + 'uid': 'user2', + 'mail': 'user1@alt.example.com', + 'userpassword': 'password'}))) + except ldap.CONSTRAINT_VIOLATION: + pass + else: + log.error('test_attr_uniqueness: Adding of 5th entry(mailAlternateAddress) incorrectly succeeded') + assert False + + # + # Test plugin - Add an entry, that has a duplicate "mailAlternateAddress" conflicting mail + # + try: + topology.standalone.add_s(Entry((USER2_DN, {'objectclass': "top extensibleObject".split(), + 'sn': '2', + 'cn': 'user 2', + 'uid': 'user2', + 'mailAlternateAddress': 'user1@example.com', + 'userpassword': 'password'}))) + except ldap.CONSTRAINT_VIOLATION: + pass + else: + log.error('test_attr_uniqueness: Adding of 6th entry(mail) incorrectly succeeded') + assert False + + # + # Cleanup + # + try: + topology.standalone.delete_s(USER1_DN) + except ldap.LDAPError as e: + log.fatal('test_attr_uniqueness: Failed to delete test entry: ' + e.message['desc']) + assert False + + log.info('test_attr_uniqueness: PASS\n') + + +def test_attr_uniqueness_final(topology): + topology.standalone.delete() + log.info('attr_uniqueness test suite PASSED') + + +def run_isolated(): + global installation1_prefix + installation1_prefix = None + + topo = topology(True) + test_attr_uniqueness_init(topo) + test_attr_uniqueness(topo) + test_attr_uniqueness_final(topo) + + +if __name__ == '__main__': + run_isolated() + diff --git a/dirsrvtests/tests/suites/automember_plugin/automember_test.py b/dirsrvtests/tests/suites/automember_plugin/automember_test.py new file mode 100644 index 0000000..3e5f020 --- /dev/null +++ b/dirsrvtests/tests/suites/automember_plugin/automember_test.py @@ -0,0 +1,93 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2015 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import os +import sys +import time +import ldap +import logging +import pytest +from lib389 import DirSrv, Entry, tools, tasks +from lib389.tools import DirSrvTools +from lib389._constants import * +from lib389.properties import * +from lib389.tasks import * +from lib389.utils import * + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + +installation1_prefix = None + + +class TopologyStandalone(object): + def __init__(self, standalone): + standalone.open() + self.standalone = standalone + + +@pytest.fixture(scope="module") +def topology(request): + global installation1_prefix + if installation1_prefix: + args_instance[SER_DEPLOYED_DIR] = installation1_prefix + + # Creating standalone instance ... + standalone = DirSrv(verbose=False) + args_instance[SER_HOST] = HOST_STANDALONE + args_instance[SER_PORT] = PORT_STANDALONE + args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE + args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX + args_standalone = args_instance.copy() + standalone.allocate(args_standalone) + instance_standalone = standalone.exists() + if instance_standalone: + standalone.delete() + standalone.create() + standalone.open() + + # Clear out the tmp dir + standalone.clearTmpDir(__file__) + + return TopologyStandalone(standalone) + + +def test_automember_init(topology): + ''' + Write any test suite initialization here(if needed) + ''' + + return + + +def test_automember_(topology): + ''' + Write a single test here... + ''' + + return + + +def test_automember_final(topology): + topology.standalone.delete() + log.info('automember test suite PASSED') + + +def run_isolated(): + global installation1_prefix + installation1_prefix = None + + topo = topology(True) + test_automember_init(topo) + test_automember_(topo) + test_automember_final(topo) + + +if __name__ == '__main__': + run_isolated() + diff --git a/dirsrvtests/tests/suites/basic/basic_test.py b/dirsrvtests/tests/suites/basic/basic_test.py new file mode 100644 index 0000000..d2f81ff --- /dev/null +++ b/dirsrvtests/tests/suites/basic/basic_test.py @@ -0,0 +1,775 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2015 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import os +import sys +import time +import ldap +import ldap.sasl +import logging +import pytest +import shutil +from subprocess import check_output +from lib389 import DirSrv, Entry, tools, tasks +from lib389.tools import DirSrvTools +from lib389._constants import * +from lib389.properties import * +from lib389.tasks import * +from lib389.utils import * + +log = logging.getLogger(__name__) + +installation_prefix = None + +# Globals +USER1_DN = 'uid=user1,' + DEFAULT_SUFFIX +USER2_DN = 'uid=user2,' + DEFAULT_SUFFIX +USER3_DN = 'uid=user3,' + DEFAULT_SUFFIX + +ROOTDSE_DEF_ATTR_LIST = ('namingContexts', + 'supportedLDAPVersion', + 'supportedControl', + 'supportedExtension', + 'supportedSASLMechanisms', + 'vendorName', + 'vendorVersion') + +class TopologyStandalone(object): + def __init__(self, standalone): + standalone.open() + self.standalone = standalone + + +@pytest.fixture(scope="module") +def topology(request): + """This fixture is used to standalone topology for the 'module'.""" + + global installation_prefix + + if installation_prefix: + args_instance[SER_DEPLOYED_DIR] = installation_prefix + + standalone = DirSrv(verbose=False) + + # Args for the standalone instance + args_instance[SER_HOST] = HOST_STANDALONE + args_instance[SER_PORT] = PORT_STANDALONE + args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE + args_standalone = args_instance.copy() + standalone.allocate(args_standalone) + + # Get the status of the instance and restart it if it exists + instance_standalone = standalone.exists() + + # Remove the instance + if instance_standalone: + standalone.delete() + + # Create the instance + standalone.create() + + # Used to retrieve configuration information (dbdir, confdir...) + standalone.open() + + # Delete each instance in the end + def fin(): + standalone.delete() + request.addfinalizer(fin) + + # clear the tmp directory + standalone.clearTmpDir(__file__) + + # Here we have standalone instance up and running + return TopologyStandalone(standalone) + + +@pytest.fixture(scope="module") +def import_example_ldif(topology): + """Import the Example LDIF for the tests in this suite""" + + log.info('Initializing the "basic" test suite') + + import_ldif = '%s/Example.ldif' % get_data_dir(topology.standalone.prefix) + try: + topology.standalone.tasks.importLDIF(suffix=DEFAULT_SUFFIX, + input_file=import_ldif, + args={TASK_WAIT: True}) + except ValueError: + log.error('Online import failed') + assert False + + +@pytest.fixture(params=ROOTDSE_DEF_ATTR_LIST) +def rootdse_attr(topology, request): + """Adds an attr from the list + as the default attr to the rootDSE + """ + + RETURN_DEFAULT_OPATTR = "nsslapd-return-default-opattr" + rootdse_attr_name = request.param + + log.info(" Add the %s: %s to rootdse" % (RETURN_DEFAULT_OPATTR, + rootdse_attr_name)) + mod = [(ldap.MOD_ADD, RETURN_DEFAULT_OPATTR, rootdse_attr_name)] + try: + topology.standalone.modify_s("", mod) + except ldap.LDAPError as e: + log.fatal('Failed to add attr: error (%s)' % (e.message['desc'])) + assert False + + def fin(): + log.info(" Delete the %s: %s from rootdse" % (RETURN_DEFAULT_OPATTR, + rootdse_attr_name)) + mod = [(ldap.MOD_DELETE, RETURN_DEFAULT_OPATTR, rootdse_attr_name)] + try: + topology.standalone.modify_s("", mod) + except ldap.LDAPError as e: + log.fatal('Failed to delete attr: error (%s)' % (e.message['desc'])) + assert False + request.addfinalizer(fin) + + return rootdse_attr_name + + +def test_basic_ops(topology, import_example_ldif): + """Test doing adds, mods, modrdns, and deletes""" + + log.info('Running test_basic_ops...') + + USER1_NEWDN = 'cn=user1' + USER2_NEWDN = 'cn=user2' + USER3_NEWDN = 'cn=user3' + NEW_SUPERIOR = 'ou=people,' + DEFAULT_SUFFIX + USER1_RDN_DN = 'cn=user1,' + DEFAULT_SUFFIX + USER2_RDN_DN = 'cn=user2,' + DEFAULT_SUFFIX + USER3_RDN_DN = 'cn=user3,' + NEW_SUPERIOR # New superior test + + # + # Adds + # + try: + topology.standalone.add_s(Entry((USER1_DN, + {'objectclass': "top extensibleObject".split(), + 'sn': '1', + 'cn': 'user1', + 'uid': 'user1', + 'userpassword': 'password'}))) + except ldap.LDAPError as e: + log.error('Failed to add test user' + USER1_DN + ': error ' + e.message['desc']) + assert False + + try: + topology.standalone.add_s(Entry((USER2_DN, + {'objectclass': "top extensibleObject".split(), + 'sn': '2', + 'cn': 'user2', + 'uid': 'user2', + 'userpassword': 'password'}))) + except ldap.LDAPError as e: + log.error('Failed to add test user' + USER2_DN + ': error ' + e.message['desc']) + assert False + + try: + topology.standalone.add_s(Entry((USER3_DN, + {'objectclass': "top extensibleObject".split(), + 'sn': '3', + 'cn': 'user3', + 'uid': 'user3', + 'userpassword': 'password'}))) + except ldap.LDAPError as e: + log.error('Failed to add test user' + USER3_DN + ': error ' + e.message['desc']) + assert False + + # + # Mods + # + try: + topology.standalone.modify_s(USER1_DN, [(ldap.MOD_ADD, 'description', + 'New description')]) + except ldap.LDAPError as e: + log.error('Failed to add description: error ' + e.message['desc']) + assert False + + try: + topology.standalone.modify_s(USER1_DN, [(ldap.MOD_REPLACE, 'description', + 'Modified description')]) + except ldap.LDAPError as e: + log.error('Failed to modify description: error ' + e.message['desc']) + assert False + + try: + topology.standalone.modify_s(USER1_DN, [(ldap.MOD_DELETE, 'description', + None)]) + except ldap.LDAPError as e: + log.error('Failed to delete description: error ' + e.message['desc']) + assert False + + # + # Modrdns + # + try: + topology.standalone.rename_s(USER1_DN, USER1_NEWDN, delold=1) + except ldap.LDAPError as e: + log.error('Failed to modrdn user1: error ' + e.message['desc']) + assert False + + try: + topology.standalone.rename_s(USER2_DN, USER2_NEWDN, delold=0) + except ldap.LDAPError as e: + log.error('Failed to modrdn user2: error ' + e.message['desc']) + assert False + + # Modrdn - New superior + try: + topology.standalone.rename_s(USER3_DN, USER3_NEWDN, + newsuperior=NEW_SUPERIOR, delold=1) + except ldap.LDAPError as e: + log.error('Failed to modrdn(new superior) user3: error ' + e.message['desc']) + assert False + + # + # Deletes + # + try: + topology.standalone.delete_s(USER1_RDN_DN) + except ldap.LDAPError as e: + log.error('Failed to delete test entry1: ' + e.message['desc']) + assert False + + try: + topology.standalone.delete_s(USER2_RDN_DN) + except ldap.LDAPError as e: + log.error('Failed to delete test entry2: ' + e.message['desc']) + assert False + + try: + topology.standalone.delete_s(USER3_RDN_DN) + except ldap.LDAPError as e: + log.error('Failed to delete test entry3: ' + e.message['desc']) + assert False + + log.info('test_basic_ops: PASSED') + + +def test_basic_import_export(topology, import_example_ldif): + """Test online and offline LDIF imports & exports""" + + log.info('Running test_basic_import_export...') + + tmp_dir = topology.standalone.getDir(__file__, TMP_DIR) + + # + # Test online/offline LDIF imports + # + + # Generate a test ldif (50k entries) + import_ldif = tmp_dir + '/basic_import.ldif' + try: + topology.standalone.buildLDIF(50000, import_ldif) + except OSError as e: + log.fatal('test_basic_import_export: failed to create test ldif,\ + error: %s - %s' % (e.errno, e.strerror)) + assert False + + # Online + try: + topology.standalone.tasks.importLDIF(suffix=DEFAULT_SUFFIX, + input_file=import_ldif, + args={TASK_WAIT: True}) + except ValueError: + log.fatal('test_basic_import_export: Online import failed') + assert False + + # Offline + if not topology.standalone.ldif2db(DEFAULT_BENAME, None, None, None, import_ldif): + log.fatal('test_basic_import_export: Offline import failed') + assert False + + # + # Test online and offline LDIF export + # + + # Online export + export_ldif = tmp_dir + 'export.ldif' + exportTask = Tasks(topology.standalone) + try: + args = {TASK_WAIT: True} + exportTask.exportLDIF(DEFAULT_SUFFIX, None, export_ldif, args) + except ValueError: + log.fatal('test_basic_import_export: Online export failed') + assert False + + # Offline export + if not topology.standalone.db2ldif(DEFAULT_BENAME, (DEFAULT_SUFFIX,), + None, None, None, export_ldif): + log.fatal('test_basic_import_export: Failed to run offline db2ldif') + assert False + + # + # Cleanup - Import the Example LDIF for the other tests in this suite + # + import_ldif = '%s/Example.ldif' % get_data_dir(topology.standalone.prefix) + try: + topology.standalone.tasks.importLDIF(suffix=DEFAULT_SUFFIX, + input_file=import_ldif, + args={TASK_WAIT: True}) + except ValueError: + log.fatal('test_basic_import_export: Online import failed') + assert False + + log.info('test_basic_import_export: PASSED') + + +def test_basic_backup(topology, import_example_ldif): + """Test online and offline back and restore""" + + log.info('Running test_basic_backup...') + + backup_dir = '%sbasic_backup/' % topology.standalone.getDir(__file__, TMP_DIR) + + # Test online backup + try: + topology.standalone.tasks.db2bak(backup_dir=backup_dir, + args={TASK_WAIT: True}) + except ValueError: + log.fatal('test_basic_backup: Online backup failed') + assert False + + # Test online restore + try: + topology.standalone.tasks.bak2db(backup_dir=backup_dir, + args={TASK_WAIT: True}) + except ValueError: + log.fatal('test_basic_backup: Online restore failed') + assert False + + # Test offline backup + if not topology.standalone.db2bak(backup_dir): + log.fatal('test_basic_backup: Offline backup failed') + assert False + + # Test offline restore + if not topology.standalone.bak2db(backup_dir): + log.fatal('test_basic_backup: Offline backup failed') + assert False + + log.info('test_basic_backup: PASSED') + + +def test_basic_acl(topology, import_example_ldif): + """Run some basic access control(ACL) tests""" + + log.info('Running test_basic_acl...') + + DENY_ACI = ('(targetattr = "*") (version 3.0;acl "deny user";deny (all)' + + '(userdn = "ldap:///' + USER1_DN + '");)') + + # + # Add two users + # + try: + topology.standalone.add_s(Entry((USER1_DN, + {'objectclass': "top extensibleObject".split(), + 'sn': '1', + 'cn': 'user 1', + 'uid': 'user1', + 'userpassword': PASSWORD}))) + except ldap.LDAPError as e: + log.fatal('test_basic_acl: Failed to add test user ' + USER1_DN + + ': error ' + e.message['desc']) + assert False + + try: + topology.standalone.add_s(Entry((USER2_DN, + {'objectclass': "top extensibleObject".split(), + 'sn': '2', + 'cn': 'user 2', + 'uid': 'user2', + 'userpassword': PASSWORD}))) + except ldap.LDAPError as e: + log.fatal('test_basic_acl: Failed to add test user ' + USER1_DN + + ': error ' + e.message['desc']) + assert False + + # + # Add an aci that denies USER1 from doing anything, + # and also set the default anonymous access + # + try: + topology.standalone.modify_s(DEFAULT_SUFFIX, [(ldap.MOD_ADD, 'aci', DENY_ACI)]) + except ldap.LDAPError as e: + log.fatal('test_basic_acl: Failed to add DENY ACI: error ' + e.message['desc']) + assert False + + # + # Make sure USER1_DN can not search anything, but USER2_dn can... + # + try: + topology.standalone.simple_bind_s(USER1_DN, PASSWORD) + except ldap.LDAPError as e: + log.fatal('test_basic_acl: Failed to bind as user1, error: ' + e.message['desc']) + assert False + + try: + entries = topology.standalone.search_s(DEFAULT_SUFFIX, + ldap.SCOPE_SUBTREE, + '(uid=*)') + if entries: + log.fatal('test_basic_acl: User1 was incorrectly able to search the suffix!') + assert False + except ldap.LDAPError as e: + log.fatal('test_basic_acl: Search suffix failed(as user1): ' + e.message['desc']) + assert False + + # Now try user2... Also check that userpassword is stripped out + try: + topology.standalone.simple_bind_s(USER2_DN, PASSWORD) + except ldap.LDAPError as e: + log.fatal('test_basic_acl: Failed to bind as user2, error: ' + e.message['desc']) + assert False + + try: + entries = topology.standalone.search_s(DEFAULT_SUFFIX, + ldap.SCOPE_SUBTREE, + '(uid=user1)') + if not entries: + log.fatal('test_basic_acl: User1 incorrectly not able to search the suffix') + assert False + if entries[0].hasAttr('userpassword'): + # The default anonymous access aci should have stripped out userpassword + log.fatal('test_basic_acl: User2 was incorrectly able to see userpassword') + assert False + except ldap.LDAPError as e: + log.fatal('test_basic_acl: Search for user1 failed(as user2): ' + e.message['desc']) + assert False + + # Make sure Root DN can also search (this also resets the bind dn to the + # Root DN for future operations) + try: + topology.standalone.simple_bind_s(DN_DM, PW_DM) + except ldap.LDAPError as e: + log.fatal('test_basic_acl: Failed to bind as ROotDN, error: ' + e.message['desc']) + assert False + + try: + entries = topology.standalone.search_s(DEFAULT_SUFFIX, + ldap.SCOPE_SUBTREE, + '(uid=*)') + if not entries: + log.fatal('test_basic_acl: Root DN incorrectly not able to search the suffix') + assert False + except ldap.LDAPError as e: + log.fatal('test_basic_acl: Search for user1 failed(as user2): ' + e.message['desc']) + assert False + + # + # Cleanup + # + try: + topology.standalone.modify_s(DEFAULT_SUFFIX, [(ldap.MOD_DELETE, 'aci', DENY_ACI)]) + except ldap.LDAPError as e: + log.fatal('test_basic_acl: Failed to delete DENY ACI: error ' + e.message['desc']) + assert False + + try: + topology.standalone.delete_s(USER1_DN) + except ldap.LDAPError as e: + log.fatal('test_basic_acl: Failed to delete test entry1: ' + e.message['desc']) + assert False + + try: + topology.standalone.delete_s(USER2_DN) + except ldap.LDAPError as e: + log.fatal('test_basic_acl: Failed to delete test entry2: ' + e.message['desc']) + assert False + + log.info('test_basic_acl: PASSED') + + +def test_basic_searches(topology, import_example_ldif): + """The search results are gathered from testing with Example.ldif""" + + log.info('Running test_basic_searches...') + + filters = (('(uid=scarter)', 1), + ('(uid=tmorris*)', 1), + ('(uid=*hunt*)', 4), + ('(uid=*cope)', 2), + ('(mail=*)', 150), + ('(roomnumber>=4000)', 35), + ('(roomnumber<=4000)', 115), + ('(&(roomnumber>=4000)(roomnumber<=4500))', 18), + ('(!(l=sunnyvale))', 120), + ('(&(uid=t*)(l=santa clara))', 7), + ('(|(uid=k*)(uid=r*))', 18), + ('(|(uid=t*)(l=sunnyvale))', 50), + ('(&(!(uid=r*))(ou=people))', 139), + ('(&(uid=m*)(l=sunnyvale)(ou=people)(mail=*example*)(roomNumber=*))', 3), + ('(&(|(uid=m*)(l=santa clara))(roomNumber=22*))', 5), + ('(&(|(uid=m*)(l=santa clara))(roomNumber=22*)(!(roomnumber=2254)))', 4)) + + for (search_filter, search_result) in filters: + try: + entries = topology.standalone.search_s(DEFAULT_SUFFIX, + ldap.SCOPE_SUBTREE, + search_filter) + if len(entries) != search_result: + log.fatal('test_basic_searches: An incorrect number of entries\ + was returned from filter (%s): (%d) expected (%d)' % + (search_filter, len(entries), search_result)) + assert False + except ldap.LDAPError as e: + log.fatal('Search failed: ' + e.message['desc']) + assert False + + log.info('test_basic_searches: PASSED') + + +def test_basic_referrals(topology, import_example_ldif): + """Set the server to referral mode, + and make sure we recive the referal error(10) + """ + + log.info('Running test_basic_referrals...') + + SUFFIX_CONFIG = 'cn="dc=example,dc=com",cn=mapping tree,cn=config' + + # + # Set the referral, adn the backend state + # + try: + topology.standalone.modify_s(SUFFIX_CONFIG, + [(ldap.MOD_REPLACE, + 'nsslapd-referral', + 'ldap://localhost.localdomain:389/o%3dnetscaperoot')]) + except ldap.LDAPError as e: + log.fatal('test_basic_referrals: Failed to set referral: error ' + e.message['desc']) + assert False + + try: + topology.standalone.modify_s(SUFFIX_CONFIG, [(ldap.MOD_REPLACE, + 'nsslapd-state', 'Referral')]) + except ldap.LDAPError as e: + log.fatal('test_basic_referrals: Failed to set backend state: error ' + + e.message['desc']) + assert False + + # + # Test that a referral error is returned + # + topology.standalone.set_option(ldap.OPT_REFERRALS, 0) # Do not follow referral + try: + topology.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, 'objectclass=top') + except ldap.REFERRAL: + pass + except ldap.LDAPError as e: + log.fatal('test_basic_referrals: Search failed: ' + e.message['desc']) + assert False + + # + # Make sure server can restart in referral mode + # + topology.standalone.restart(timeout=10) + + # + # Cleanup + # + try: + topology.standalone.modify_s(SUFFIX_CONFIG, [(ldap.MOD_REPLACE, + 'nsslapd-state', 'Backend')]) + except ldap.LDAPError as e: + log.fatal('test_basic_referrals: Failed to set backend state: error ' + + e.message['desc']) + assert False + + try: + topology.standalone.modify_s(SUFFIX_CONFIG, [(ldap.MOD_DELETE, + 'nsslapd-referral', None)]) + except ldap.LDAPError as e: + log.fatal('test_basic_referrals: Failed to delete referral: error ' + + e.message['desc']) + assert False + topology.standalone.set_option(ldap.OPT_REFERRALS, 1) + + log.info('test_basic_referrals: PASSED') + + +def test_basic_systemctl(topology, import_example_ldif): + """Test systemctl can stop and start the server. Also test that start reports an + error when the instance does not start. Only for RPM builds + """ + + log.info('Running test_basic_systemctl...') + + # We can only use systemctl on RPM installations + if topology.standalone.prefix and topology.standalone.prefix != '/': + return + + data_dir = topology.standalone.getDir(__file__, DATA_DIR) + tmp_dir = topology.standalone.getDir(__file__, TMP_DIR) + config_dir = topology.standalone.confdir + start_ds = 'sudo systemctl start dirsrv@' + topology.standalone.serverid + '.service' + stop_ds = 'sudo systemctl stop dirsrv@' + topology.standalone.serverid + '.service' + is_running = 'sudo systemctl is-active dirsrv@' + topology.standalone.serverid + '.service' + + # + # Stop the server + # + log.info('Stopping the server...') + rc = os.system(stop_ds) + log.info('Check the status...') + if rc != 0 or os.system(is_running) == 0: + log.fatal('test_basic_systemctl: Failed to stop the server') + assert False + log.info('Stopped the server.') + + # + # Start the server + # + log.info('Starting the server...') + rc = os.system(start_ds) + log.info('Check the status...') + if rc != 0 or os.system(is_running) != 0: + log.fatal('test_basic_systemctl: Failed to start the server') + assert False + log.info('Started the server.') + + # + # Stop the server, break the dse.ldif so a start fails, + # and verify that systemctl detects the failed start + # + log.info('Stopping the server...') + rc = os.system(stop_ds) + log.info('Check the status...') + if rc != 0 or os.system(is_running) == 0: + log.fatal('test_basic_systemctl: Failed to stop the server') + assert False + log.info('Stopped the server before breaking the dse.ldif.') + + shutil.copy(config_dir + '/dse.ldif', tmp_dir) + shutil.copy(data_dir + 'basic/dse.ldif.broken', config_dir + '/dse.ldif') + + log.info('Attempting to start the server with broken dse.ldif...') + rc = os.system(start_ds) + log.info('Check the status...') + if rc == 0 or os.system(is_running) == 0: + log.fatal('test_basic_systemctl: The server incorrectly started') + assert False + log.info('Server failed to start as expected') + time.sleep(5) + + # + # Fix the dse.ldif, and make sure the server starts up, + # and systemctl correctly identifies the successful start + # + shutil.copy(tmp_dir + 'dse.ldif', config_dir) + log.info('Starting the server with good dse.ldif...') + rc = os.system(start_ds) + time.sleep(5) + log.info('Check the status...') + if rc != 0 or os.system(is_running) != 0: + log.fatal('test_basic_systemctl: Failed to start the server') + assert False + log.info('Server started after fixing dse.ldif.') + time.sleep(1) + + log.info('test_basic_systemctl: PASSED') + + +def test_basic_ldapagent(topology, import_example_ldif): + """Test that the ldap agent starts""" + + log.info('Running test_basic_ldapagent...') + + tmp_dir = topology.standalone.getDir(__file__, TMP_DIR) + var_dir = topology.standalone.prefix + '/var' + config_file = tmp_dir + '/agent.conf' + cmd = 'sudo %s/ldap-agent %s' % (get_sbin_dir(prefix=topology.standalone.prefix), + config_file) + + agent_config_file = open(config_file, 'w') + agent_config_file.write('agentx-master ' + var_dir + '/agentx/master\n') + agent_config_file.write('agent-logdir ' + var_dir + '/log/dirsrv\n') + agent_config_file.write('server slapd-' + topology.standalone.serverid + '\n') + agent_config_file.close() + + rc = os.system(cmd) + if rc != 0: + log.fatal('test_basic_ldapagent: Failed to start snmp ldap agent: error %d' % rc) + assert False + + log.info('snmp ldap agent started') + + # + # Cleanup - kill the agent + # + pid = check_output(['pidof', '-s', 'ldap-agent-bin']) + log.info('Cleanup - killing agent: ' + pid) + rc = os.system('sudo kill -9 ' + pid) + + log.info('test_basic_ldapagent: PASSED') + + +def test_basic_dse(topology, import_example_ldif): + """Test that the dse.ldif is not wipped out + after the process is killed (bug 910581) + """ + + log.info('Running test_basic_dse...') + + dse_file = topology.standalone.confdir + '/dse.ldif' + pid = check_output(['pidof', '-s', 'ns-slapd']) + os.system('sudo kill -9 ' + pid) + if os.path.getsize(dse_file) == 0: + log.fatal('test_basic_dse: dse.ldif\'s content was incorrectly removed!') + assert False + + topology.standalone.start(timeout=10) + log.info('dse.ldif was not corrupted, and the server was restarted') + + log.info('test_basic_dse: PASSED') + + +@pytest.mark.parametrize("rootdse_attr_name", ROOTDSE_DEF_ATTR_LIST) +def test_def_rootdse_attr(topology, import_example_ldif, rootdse_attr_name): + """Tests that operational attributes + are not returned by default in rootDSE searches + """ + + log.info(" Assert rootdse search hasn't %s attr" % rootdse_attr_name) + try: + entries = topology.standalone.search_s("", ldap.SCOPE_BASE) + entry = str(entries[0]) + assert rootdse_attr_name not in entry + + except ldap.LDAPError as e: + log.fatal('Search failed, error: ' + e.message['desc']) + assert False + + +def test_mod_def_rootdse_attr(topology, import_example_ldif, rootdse_attr): + """Tests that operational attributes are returned + by default in rootDSE searches after config modification + """ + + log.info(" Assert rootdse search has %s attr" % rootdse_attr) + try: + entries = topology.standalone.search_s("", ldap.SCOPE_BASE) + entry = str(entries[0]) + assert rootdse_attr in entry + + except ldap.LDAPError as e: + log.fatal('Search failed, error: ' + e.message['desc']) + assert False + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/betxns/betxn_test.py b/dirsrvtests/tests/suites/betxns/betxn_test.py new file mode 100644 index 0000000..7bef791 --- /dev/null +++ b/dirsrvtests/tests/suites/betxns/betxn_test.py @@ -0,0 +1,258 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2015 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import os +import sys +import time +import ldap +import logging +import pytest +import six +from lib389 import DirSrv, Entry, tools, tasks +from lib389.tools import DirSrvTools +from lib389._constants import * +from lib389.properties import * +from lib389.tasks import * +from lib389.utils import * +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + +installation1_prefix = None + + +class TopologyStandalone(object): + def __init__(self, standalone): + standalone.open() + self.standalone = standalone + + +@pytest.fixture(scope="module") +def topology(request): + global installation1_prefix + if installation1_prefix: + args_instance[SER_DEPLOYED_DIR] = installation1_prefix + + # Creating standalone instance ... + standalone = DirSrv(verbose=False) + args_instance[SER_HOST] = HOST_STANDALONE + args_instance[SER_PORT] = PORT_STANDALONE + args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE + args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX + args_standalone = args_instance.copy() + standalone.allocate(args_standalone) + instance_standalone = standalone.exists() + if instance_standalone: + standalone.delete() + standalone.create() + standalone.open() + + # Clear out the tmp dir + standalone.clearTmpDir(__file__) + + return TopologyStandalone(standalone) + + +def test_betxn_init(topology): + # First enable dynamic plugins - makes plugin testing much easier + try: + topology.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-dynamic-plugins', 'on')]) + except ldap.LDAPError as e: + ldap.error('Failed to enable dynamic plugin!' + e.message['desc']) + assert False + + +def test_betxt_7bit(topology): + ''' + Test that the 7-bit plugin correctly rejects an invlaid update + ''' + + log.info('Running test_betxt_7bit...') + + USER_DN = 'uid=test_entry,' + DEFAULT_SUFFIX + eight_bit_rdn = six.u('uid=Fu\u00c4\u00e8') + BAD_RDN = eight_bit_rdn.encode('utf-8') + + # This plugin should on by default, but just in case... + topology.standalone.plugins.enable(name=PLUGIN_7_BIT_CHECK) + + # Add our test user + try: + topology.standalone.add_s(Entry((USER_DN, {'objectclass': "top extensibleObject".split(), + 'sn': '1', + 'cn': 'test 1', + 'uid': 'test_entry', + 'userpassword': 'password'}))) + except ldap.LDAPError as e: + log.error('Failed to add test user' + USER_DN + ': error ' + e.message['desc']) + assert False + + # Attempt a modrdn, this should fail + try: + topology.standalone.rename_s(USER_DN, BAD_RDN, delold=0) + log.fatal('test_betxt_7bit: Modrdn operation incorrectly succeeded') + assert False + except ldap.LDAPError as e: + log.info('Modrdn failed as expected: error ' + e.message['desc']) + + # Make sure the operation did not succeed, attempt to search for the new RDN + try: + entries = topology.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, BAD_RDN) + if entries: + log.fatal('test_betxt_7bit: Incorrectly found the entry using the invalid RDN') + assert False + except ldap.LDAPError as e: + log.fatal('Error whiles earching for test entry: ' + e.message['desc']) + assert False + + # + # Cleanup - remove the user + # + try: + topology.standalone.delete_s(USER_DN) + except ldap.LDAPError as e: + log.fatal('Failed to delete test entry: ' + e.message['desc']) + assert False + + log.info('test_betxt_7bit: PASSED') + + +def test_betxn_attr_uniqueness(topology): + ''' + Test that we can not add two entries that have the same attr value that is + defined by the plugin. + ''' + + log.info('Running test_betxn_attr_uniqueness...') + + USER1_DN = 'uid=test_entry1,' + DEFAULT_SUFFIX + USER2_DN = 'uid=test_entry2,' + DEFAULT_SUFFIX + + topology.standalone.plugins.enable(name=PLUGIN_ATTR_UNIQUENESS) + + # Add the first entry + try: + topology.standalone.add_s(Entry((USER1_DN, {'objectclass': "top extensibleObject".split(), + 'sn': '1', + 'cn': 'test 1', + 'uid': 'test_entry1', + 'userpassword': 'password1'}))) + except ldap.LDAPError as e: + log.fatal('test_betxn_attr_uniqueness: Failed to add test user: ' + + USER1_DN + ', error ' + e.message['desc']) + assert False + + # Add the second entry with a dupliate uid + try: + topology.standalone.add_s(Entry((USER2_DN, {'objectclass': "top extensibleObject".split(), + 'sn': '2', + 'cn': 'test 2', + 'uid': 'test_entry2', + 'uid': 'test_entry1', # Duplicate value + 'userpassword': 'password2'}))) + log.fatal('test_betxn_attr_uniqueness: The second entry was incorrectly added.') + assert False + except ldap.LDAPError as e: + log.error('test_betxn_attr_uniqueness: Failed to add test user as expected: ' + + USER1_DN + ', error ' + e.message['desc']) + + # + # Cleanup - disable plugin, remove test entry + # + topology.standalone.plugins.disable(name=PLUGIN_ATTR_UNIQUENESS) + + try: + topology.standalone.delete_s(USER1_DN) + except ldap.LDAPError as e: + log.fatal('test_betxn_attr_uniqueness: Failed to delete test entry1: ' + + e.message['desc']) + assert False + + log.info('test_betxn_attr_uniqueness: PASSED') + + +def test_betxn_memberof(topology): + ENTRY1_DN = 'cn=group1,' + DEFAULT_SUFFIX + ENTRY2_DN = 'cn=group2,' + DEFAULT_SUFFIX + PLUGIN_DN = 'cn=' + PLUGIN_MEMBER_OF + ',cn=plugins,cn=config' + + # Enable and configure memberOf plugin + topology.standalone.plugins.enable(name=PLUGIN_MEMBER_OF) + try: + topology.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, 'memberofgroupattr', 'member')]) + except ldap.LDAPError as e: + log.fatal('test_betxn_memberof: Failed to update config(member): error ' + e.message['desc']) + assert False + + # Add our test entries + try: + topology.standalone.add_s(Entry((ENTRY1_DN, {'objectclass': "top groupofnames".split(), + 'cn': 'group1'}))) + except ldap.LDAPError as e: + log.error('test_betxn_memberof: Failed to add group1:' + + ENTRY1_DN + ', error ' + e.message['desc']) + assert False + + try: + topology.standalone.add_s(Entry((ENTRY2_DN, {'objectclass': "top groupofnames".split(), + 'cn': 'group1'}))) + except ldap.LDAPError as e: + log.error('test_betxn_memberof: Failed to add group2:' + + ENTRY2_DN + ', error ' + e.message['desc']) + assert False + + # + # Test mod replace + # + + # Add group2 to group1 - it should fail with objectclass violation + try: + topology.standalone.modify_s(ENTRY1_DN, [(ldap.MOD_REPLACE, 'member', ENTRY2_DN)]) + log.fatal('test_betxn_memberof: Group2 was incorrectly allowed to be added to group1') + assert False + except ldap.LDAPError as e: + log.info('test_betxn_memberof: Group2 was correctly rejected (mod replace): error ' + e.message['desc']) + + # + # Test mod add + # + + # Add group2 to group1 - it should fail with objectclass violation + try: + topology.standalone.modify_s(ENTRY1_DN, [(ldap.MOD_ADD, 'member', ENTRY2_DN)]) + log.fatal('test_betxn_memberof: Group2 was incorrectly allowed to be added to group1') + assert False + except ldap.LDAPError as e: + log.info('test_betxn_memberof: Group2 was correctly rejected (mod add): error ' + e.message['desc']) + + # + # Done + # + + log.info('test_betxn_memberof: PASSED') + + +def test_betxn_final(topology): + topology.standalone.delete() + log.info('betxn test suite PASSED') + + +def run_isolated(): + global installation1_prefix + installation1_prefix = None + + topo = topology(True) + test_betxn_init(topo) + test_betxt_7bit(topo) + test_betxn_attr_uniqueness(topo) + test_betxn_memberof(topo) + test_betxn_final(topo) + + +if __name__ == '__main__': + run_isolated() + diff --git a/dirsrvtests/tests/suites/chaining_plugin/chaining_test.py b/dirsrvtests/tests/suites/chaining_plugin/chaining_test.py new file mode 100644 index 0000000..50eed9a --- /dev/null +++ b/dirsrvtests/tests/suites/chaining_plugin/chaining_test.py @@ -0,0 +1,93 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2015 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import os +import sys +import time +import ldap +import logging +import pytest +from lib389 import DirSrv, Entry, tools, tasks +from lib389.tools import DirSrvTools +from lib389._constants import * +from lib389.properties import * +from lib389.tasks import * +from lib389.utils import * + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + +installation1_prefix = None + + +class TopologyStandalone(object): + def __init__(self, standalone): + standalone.open() + self.standalone = standalone + + +@pytest.fixture(scope="module") +def topology(request): + global installation1_prefix + if installation1_prefix: + args_instance[SER_DEPLOYED_DIR] = installation1_prefix + + # Creating standalone instance ... + standalone = DirSrv(verbose=False) + args_instance[SER_HOST] = HOST_STANDALONE + args_instance[SER_PORT] = PORT_STANDALONE + args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE + args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX + args_standalone = args_instance.copy() + standalone.allocate(args_standalone) + instance_standalone = standalone.exists() + if instance_standalone: + standalone.delete() + standalone.create() + standalone.open() + + # Clear out the tmp dir + standalone.clearTmpDir(__file__) + + return TopologyStandalone(standalone) + + +def test_chaining_init(topology): + ''' + Write any test suite initialization here(if needed) + ''' + + return + + +def test_chaining_(topology): + ''' + Write a single test here... + ''' + + return + + +def test_chaining_final(topology): + topology.standalone.delete() + log.info('chaining test suite PASSED') + + +def run_isolated(): + global installation1_prefix + installation1_prefix = None + + topo = topology(True) + test_chaining_init(topo) + test_chaining_(topo) + test_chaining_final(topo) + + +if __name__ == '__main__': + run_isolated() + diff --git a/dirsrvtests/tests/suites/clu/clu_test.py b/dirsrvtests/tests/suites/clu/clu_test.py new file mode 100644 index 0000000..4f2804f --- /dev/null +++ b/dirsrvtests/tests/suites/clu/clu_test.py @@ -0,0 +1,115 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2015 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import os +import sys +import time +import ldap +import logging +import pytest +from lib389 import DirSrv, Entry, tools, tasks +from lib389.tools import DirSrvTools +from lib389._constants import * +from lib389.properties import * +from lib389.tasks import * +from lib389.utils import * +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + +installation1_prefix = None + + +class TopologyStandalone(object): + def __init__(self, standalone): + standalone.open() + self.standalone = standalone + + +@pytest.fixture(scope="module") +def topology(request): + global installation1_prefix + if installation1_prefix: + args_instance[SER_DEPLOYED_DIR] = installation1_prefix + + # Creating standalone instance ... + standalone = DirSrv(verbose=False) + args_instance[SER_HOST] = HOST_STANDALONE + args_instance[SER_PORT] = PORT_STANDALONE + args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE + args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX + args_standalone = args_instance.copy() + standalone.allocate(args_standalone) + instance_standalone = standalone.exists() + if instance_standalone: + standalone.delete() + standalone.create() + standalone.open() + + # Clear out the tmp dir + standalone.clearTmpDir(__file__) + + return TopologyStandalone(standalone) + + +def test_clu_init(topology): + ''' + Write any test suite initialization here(if needed) + ''' + + return + + +def test_clu_pwdhash(topology): + ''' + Test the pwdhash script + ''' + + log.info('Running test_clu_pwdhash...') + + cmd = 'pwdhash -s ssha testpassword' + + p = os.popen(cmd) + result = p.readline() + p.close() + + if not result: + log.fatal('test_clu_pwdhash: Failed to run pwdhash') + assert False + + if len(result) < 20: + log.fatal('test_clu_pwdhash: Encrypted password is too short') + assert False + + log.info('pwdhash generated: ' + result) + log.info('test_clu_pwdhash: PASSED') + + +def test_clu_final(topology): + topology.standalone.delete() + log.info('clu test suite PASSED') + + +def run_isolated(): + ''' + This test is for the simple scripts that don't have a lot of options or + points of failure. Scripts that do, should have their own individual tests. + ''' + global installation1_prefix + installation1_prefix = None + + topo = topology(True) + test_clu_init(topo) + + test_clu_pwdhash(topo) + + test_clu_final(topo) + + +if __name__ == '__main__': + run_isolated() + diff --git a/dirsrvtests/tests/suites/clu/db2ldif_test.py b/dirsrvtests/tests/suites/clu/db2ldif_test.py new file mode 100644 index 0000000..dbfb0d0 --- /dev/null +++ b/dirsrvtests/tests/suites/clu/db2ldif_test.py @@ -0,0 +1,92 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2015 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import os +import sys +import time +import ldap +import logging +import pytest +from lib389 import DirSrv, Entry, tools, tasks +from lib389.tools import DirSrvTools +from lib389._constants import * +from lib389.properties import * +from lib389.tasks import * +from lib389.utils import * +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + +installation1_prefix = None + + +class TopologyStandalone(object): + def __init__(self, standalone): + standalone.open() + self.standalone = standalone + + +@pytest.fixture(scope="module") +def topology(request): + global installation1_prefix + if installation1_prefix: + args_instance[SER_DEPLOYED_DIR] = installation1_prefix + + # Creating standalone instance ... + standalone = DirSrv(verbose=False) + args_instance[SER_HOST] = HOST_STANDALONE + args_instance[SER_PORT] = PORT_STANDALONE + args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE + args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX + args_standalone = args_instance.copy() + standalone.allocate(args_standalone) + instance_standalone = standalone.exists() + if instance_standalone: + standalone.delete() + standalone.create() + standalone.open() + + # Clear out the tmp dir + standalone.clearTmpDir(__file__) + + return TopologyStandalone(standalone) + + +def test_db2ldif_init(topology): + ''' + Write any test suite initialization here(if needed) + ''' + + return + + +def test_db2ldif_final(topology): + topology.standalone.delete() + log.info('db2ldif test suite PASSED') + + +def run_isolated(): + ''' + Test db2lidf/db2ldif.pl - test/stress functionality, all the command line options, + valid/invalid option combinations, etc, etc. + ''' + global installation1_prefix + installation1_prefix = None + + topo = topology(True) + test_db2ldif_init(topo) + + # test 1 function... + # test 2 function... + # ... + + test_db2ldif_final(topo) + + +if __name__ == '__main__': + run_isolated() + diff --git a/dirsrvtests/tests/suites/collation_plugin/collatation_test.py b/dirsrvtests/tests/suites/collation_plugin/collatation_test.py new file mode 100644 index 0000000..1a918c6 --- /dev/null +++ b/dirsrvtests/tests/suites/collation_plugin/collatation_test.py @@ -0,0 +1,93 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2015 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import os +import sys +import time +import ldap +import logging +import pytest +from lib389 import DirSrv, Entry, tools, tasks +from lib389.tools import DirSrvTools +from lib389._constants import * +from lib389.properties import * +from lib389.tasks import * +from lib389.utils import * + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + +installation1_prefix = None + + +class TopologyStandalone(object): + def __init__(self, standalone): + standalone.open() + self.standalone = standalone + + +@pytest.fixture(scope="module") +def topology(request): + global installation1_prefix + if installation1_prefix: + args_instance[SER_DEPLOYED_DIR] = installation1_prefix + + # Creating standalone instance ... + standalone = DirSrv(verbose=False) + args_instance[SER_HOST] = HOST_STANDALONE + args_instance[SER_PORT] = PORT_STANDALONE + args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE + args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX + args_standalone = args_instance.copy() + standalone.allocate(args_standalone) + instance_standalone = standalone.exists() + if instance_standalone: + standalone.delete() + standalone.create() + standalone.open() + + # Clear out the tmp dir + standalone.clearTmpDir(__file__) + + return TopologyStandalone(standalone) + + +def test_collatation_init(topology): + ''' + Write any test suite initialization here(if needed) + ''' + + return + + +def test_collatation_(topology): + ''' + Write a single test here... + ''' + + return + + +def test_collatation_final(topology): + topology.standalone.delete() + log.info('collatation test suite PASSED') + + +def run_isolated(): + global installation1_prefix + installation1_prefix = None + + topo = topology(True) + test_collatation_init(topo) + test_collatation_(topo) + test_collatation_final(topo) + + +if __name__ == '__main__': + run_isolated() + diff --git a/dirsrvtests/tests/suites/config/config_test.py b/dirsrvtests/tests/suites/config/config_test.py new file mode 100644 index 0000000..d3631e3 --- /dev/null +++ b/dirsrvtests/tests/suites/config/config_test.py @@ -0,0 +1,198 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2015 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import os +import sys +import time +import ldap +import logging +import pytest +from lib389 import DirSrv, Entry, tools, tasks +from lib389.tools import DirSrvTools +from lib389._constants import * +from lib389.properties import * +from lib389.tasks import * + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + +installation1_prefix = None + + +class TopologyStandalone(object): + def __init__(self, standalone): + standalone.open() + self.standalone = standalone + + +@pytest.fixture(scope="module") +def topology(request): + global installation1_prefix + if installation1_prefix: + args_instance[SER_DEPLOYED_DIR] = installation1_prefix + + # Creating standalone instance ... + standalone = DirSrv(verbose=False) + args_instance[SER_HOST] = HOST_STANDALONE + args_instance[SER_PORT] = PORT_STANDALONE + args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE + args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX + args_standalone = args_instance.copy() + standalone.allocate(args_standalone) + instance_standalone = standalone.exists() + if instance_standalone: + standalone.delete() + standalone.create() + standalone.open() + + # Clear out the tmp dir + standalone.clearTmpDir(__file__) + + return TopologyStandalone(standalone) + + +def test_config_init(topology): + ''' + Initialization function + ''' + return + + +def test_config_listen_backport_size(topology): + ''' + We need to check that we can search on nsslapd-listen-backlog-size, + and change its value: to a psoitive number and a negative number. + Verify invalid value is rejected. + ''' + + log.info('Running test_config_listen_backport_size...') + + try: + entry = topology.standalone.search_s(DN_CONFIG, ldap.SCOPE_BASE, 'objectclass=top', + ['nsslapd-listen-backlog-size']) + default_val = entry[0].getValue('nsslapd-listen-backlog-size') + if not default_val: + log.fatal('test_config_listen_backport_size: Failed to get nsslapd-listen-backlog-size from config') + assert False + except ldap.LDAPError as e: + log.fatal('test_config_listen_backport_size: Failed to search config, error: ' + e.message('desc')) + assert False + + try: + topology.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-listen-backlog-size', '256')]) + except ldap.LDAPError as e: + log.fatal('test_config_listen_backport_size: Failed to modify config, error: ' + e.message('desc')) + assert False + + try: + topology.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-listen-backlog-size', '-1')]) + except ldap.LDAPError as e: + log.fatal('test_config_listen_backport_size: Failed to modify config(negative value), error: ' + + e.message('desc')) + assert False + + try: + topology.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-listen-backlog-size', 'ZZ')]) + log.fatal('test_config_listen_backport_size: Invalid value was successfully added') + assert False + except ldap.LDAPError as e: + pass + + # + # Cleanup - undo what we've done + # + try: + topology.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-listen-backlog-size', default_val)]) + except ldap.LDAPError as e: + log.fatal('test_config_listen_backport_size: Failed to reset config, error: ' + e.message('desc')) + assert False + + log.info('test_config_listen_backport_size: PASSED') + + +def test_config_deadlock_policy(topology): + ''' + We need to check that nsslapd-db-deadlock-policy exists, that we can + change the value, and invalid values are rejected + ''' + + log.info('Running test_config_deadlock_policy...') + + LDBM_DN = 'cn=config,cn=ldbm database,cn=plugins,cn=config' + default_val = '9' + + try: + entry = topology.standalone.search_s(LDBM_DN, ldap.SCOPE_BASE, 'objectclass=top', + ['nsslapd-db-deadlock-policy']) + val = entry[0].getValue('nsslapd-db-deadlock-policy') + if not val: + log.fatal('test_config_deadlock_policy: Failed to get nsslapd-db-deadlock-policy from config') + assert False + if val != default_val: + log.fatal('test_config_deadlock_policy: The wrong derfualt value was present: (%s) but expected (%s)' % + (val, default_val)) + assert False + except ldap.LDAPError as e: + log.fatal('test_config_deadlock_policy: Failed to search config, error: ' + e.message('desc')) + assert False + + # Try a range of valid values + for val in ('0', '5', '9'): + try: + topology.standalone.modify_s(LDBM_DN, [(ldap.MOD_REPLACE, 'nsslapd-db-deadlock-policy', val)]) + except ldap.LDAPError as e: + log.fatal('test_config_deadlock_policy: Failed to modify config: nsslapd-db-deadlock-policy to (%s), error: %s' % + (val, e.message('desc'))) + assert False + + # Try a range of invalid values + for val in ('-1', '10'): + try: + topology.standalone.modify_s(LDBM_DN, [(ldap.MOD_REPLACE, 'nsslapd-db-deadlock-policy', val)]) + log.fatal('test_config_deadlock_policy: Able to add invalid value to nsslapd-db-deadlock-policy(%s)' % (val)) + assert False + except ldap.LDAPError as e: + pass + # + # Cleanup - undo what we've done + # + try: + topology.standalone.modify_s(LDBM_DN, [(ldap.MOD_REPLACE, 'nsslapd-db-deadlock-policy', default_val)]) + except ldap.LDAPError as e: + log.fatal('test_config_deadlock_policy: Failed to reset nsslapd-db-deadlock-policy to the default value(%s), error: %s' % + (default_val, e.message('desc'))) + + log.info('test_config_deadlock_policy: PASSED') + + +def test_config_final(topology): + topology.standalone.delete() + log.info('Testcase PASSED') + + +def run_isolated(): + ''' + This test suite is designed to test all things cn=config Like, the core cn=config settings, + or the ldbm database settings, etc. This suite shoud not test individual plugins - there + should be individual suites for each plugin. + ''' + global installation1_prefix + installation1_prefix = None + + topo = topology(True) + test_config_init(topo) + + test_config_listen_backport_size(topo) + test_config_deadlock_policy(topo) + + test_config_final(topo) + + +if __name__ == '__main__': + run_isolated() + diff --git a/dirsrvtests/tests/suites/cos_plugin/cos_test.py b/dirsrvtests/tests/suites/cos_plugin/cos_test.py new file mode 100644 index 0000000..2dc3ac9 --- /dev/null +++ b/dirsrvtests/tests/suites/cos_plugin/cos_test.py @@ -0,0 +1,93 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2015 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import os +import sys +import time +import ldap +import logging +import pytest +from lib389 import DirSrv, Entry, tools, tasks +from lib389.tools import DirSrvTools +from lib389._constants import * +from lib389.properties import * +from lib389.tasks import * +from lib389.utils import * + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + +installation1_prefix = None + + +class TopologyStandalone(object): + def __init__(self, standalone): + standalone.open() + self.standalone = standalone + + +@pytest.fixture(scope="module") +def topology(request): + global installation1_prefix + if installation1_prefix: + args_instance[SER_DEPLOYED_DIR] = installation1_prefix + + # Creating standalone instance ... + standalone = DirSrv(verbose=False) + args_instance[SER_HOST] = HOST_STANDALONE + args_instance[SER_PORT] = PORT_STANDALONE + args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE + args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX + args_standalone = args_instance.copy() + standalone.allocate(args_standalone) + instance_standalone = standalone.exists() + if instance_standalone: + standalone.delete() + standalone.create() + standalone.open() + + # Clear out the tmp dir + standalone.clearTmpDir(__file__) + + return TopologyStandalone(standalone) + + +def test_cos_init(topology): + ''' + Write any test suite initialization here(if needed) + ''' + + return + + +def test_cos_(topology): + ''' + Write a single test here... + ''' + + return + + +def test_cos_final(topology): + topology.standalone.delete() + log.info('cos test suite PASSED') + + +def run_isolated(): + global installation1_prefix + installation1_prefix = None + + topo = topology(True) + test_cos_init(topo) + test_cos_(topo) + test_cos_final(topo) + + +if __name__ == '__main__': + run_isolated() + diff --git a/dirsrvtests/tests/suites/deref_plugin/deref_test.py b/dirsrvtests/tests/suites/deref_plugin/deref_test.py new file mode 100644 index 0000000..9beaa38 --- /dev/null +++ b/dirsrvtests/tests/suites/deref_plugin/deref_test.py @@ -0,0 +1,93 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2015 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import os +import sys +import time +import ldap +import logging +import pytest +from lib389 import DirSrv, Entry, tools, tasks +from lib389.tools import DirSrvTools +from lib389._constants import * +from lib389.properties import * +from lib389.tasks import * +from lib389.utils import * + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + +installation1_prefix = None + + +class TopologyStandalone(object): + def __init__(self, standalone): + standalone.open() + self.standalone = standalone + + +@pytest.fixture(scope="module") +def topology(request): + global installation1_prefix + if installation1_prefix: + args_instance[SER_DEPLOYED_DIR] = installation1_prefix + + # Creating standalone instance ... + standalone = DirSrv(verbose=False) + args_instance[SER_HOST] = HOST_STANDALONE + args_instance[SER_PORT] = PORT_STANDALONE + args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE + args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX + args_standalone = args_instance.copy() + standalone.allocate(args_standalone) + instance_standalone = standalone.exists() + if instance_standalone: + standalone.delete() + standalone.create() + standalone.open() + + # Clear out the tmp dir + standalone.clearTmpDir(__file__) + + return TopologyStandalone(standalone) + + +def test_deref_init(topology): + ''' + Write any test suite initialization here(if needed) + ''' + + return + + +def test_deref_(topology): + ''' + Write a single test here... + ''' + + return + + +def test_deref_final(topology): + topology.standalone.delete() + log.info('deref test suite PASSED') + + +def run_isolated(): + global installation1_prefix + installation1_prefix = None + + topo = topology(True) + test_deref_init(topo) + test_deref_(topo) + test_deref_final(topo) + + +if __name__ == '__main__': + run_isolated() + diff --git a/dirsrvtests/tests/suites/disk_monitoring/disk_monitor_test.py b/dirsrvtests/tests/suites/disk_monitoring/disk_monitor_test.py new file mode 100644 index 0000000..0b84c54 --- /dev/null +++ b/dirsrvtests/tests/suites/disk_monitoring/disk_monitor_test.py @@ -0,0 +1,93 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2015 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import os +import sys +import time +import ldap +import logging +import pytest +from lib389 import DirSrv, Entry, tools, tasks +from lib389.tools import DirSrvTools +from lib389._constants import * +from lib389.properties import * +from lib389.tasks import * +from lib389.utils import * + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + +installation1_prefix = None + + +class TopologyStandalone(object): + def __init__(self, standalone): + standalone.open() + self.standalone = standalone + + +@pytest.fixture(scope="module") +def topology(request): + global installation1_prefix + if installation1_prefix: + args_instance[SER_DEPLOYED_DIR] = installation1_prefix + + # Creating standalone instance ... + standalone = DirSrv(verbose=False) + args_instance[SER_HOST] = HOST_STANDALONE + args_instance[SER_PORT] = PORT_STANDALONE + args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE + args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX + args_standalone = args_instance.copy() + standalone.allocate(args_standalone) + instance_standalone = standalone.exists() + if instance_standalone: + standalone.delete() + standalone.create() + standalone.open() + + # Clear out the tmp dir + standalone.clearTmpDir(__file__) + + return TopologyStandalone(standalone) + + +def test_disk_monitor_init(topology): + ''' + Write any test suite initialization here(if needed) + ''' + + return + + +def test_disk_monitor_(topology): + ''' + Write a single test here... + ''' + + return + + +def test_disk_monitor_final(topology): + topology.standalone.delete() + log.info('disk_monitor test suite PASSED') + + +def run_isolated(): + global installation1_prefix + installation1_prefix = None + + topo = topology(True) + test_disk_monitor_init(topo) + test_disk_monitor_(topo) + test_disk_monitor_final(topo) + + +if __name__ == '__main__': + run_isolated() + diff --git a/dirsrvtests/tests/suites/distrib_plugin/distrib_test.py b/dirsrvtests/tests/suites/distrib_plugin/distrib_test.py new file mode 100644 index 0000000..ab1cf87 --- /dev/null +++ b/dirsrvtests/tests/suites/distrib_plugin/distrib_test.py @@ -0,0 +1,93 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2015 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import os +import sys +import time +import ldap +import logging +import pytest +from lib389 import DirSrv, Entry, tools, tasks +from lib389.tools import DirSrvTools +from lib389._constants import * +from lib389.properties import * +from lib389.tasks import * +from lib389.utils import * + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + +installation1_prefix = None + + +class TopologyStandalone(object): + def __init__(self, standalone): + standalone.open() + self.standalone = standalone + + +@pytest.fixture(scope="module") +def topology(request): + global installation1_prefix + if installation1_prefix: + args_instance[SER_DEPLOYED_DIR] = installation1_prefix + + # Creating standalone instance ... + standalone = DirSrv(verbose=False) + args_instance[SER_HOST] = HOST_STANDALONE + args_instance[SER_PORT] = PORT_STANDALONE + args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE + args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX + args_standalone = args_instance.copy() + standalone.allocate(args_standalone) + instance_standalone = standalone.exists() + if instance_standalone: + standalone.delete() + standalone.create() + standalone.open() + + # Clear out the tmp dir + standalone.clearTmpDir(__file__) + + return TopologyStandalone(standalone) + + +def test_distrib_init(topology): + ''' + Write any test suite initialization here(if needed) + ''' + + return + + +def test_distrib_(topology): + ''' + Write a single test here... + ''' + + return + + +def test_distrib_final(topology): + topology.standalone.delete() + log.info('distrib test suite PASSED') + + +def run_isolated(): + global installation1_prefix + installation1_prefix = None + + topo = topology(True) + test_distrib_init(topo) + test_distrib_(topo) + test_distrib_final(topo) + + +if __name__ == '__main__': + run_isolated() + diff --git a/dirsrvtests/tests/suites/dna_plugin/dna_test.py b/dirsrvtests/tests/suites/dna_plugin/dna_test.py new file mode 100644 index 0000000..6b0ab8b --- /dev/null +++ b/dirsrvtests/tests/suites/dna_plugin/dna_test.py @@ -0,0 +1,93 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2015 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import os +import sys +import time +import ldap +import logging +import pytest +from lib389 import DirSrv, Entry, tools, tasks +from lib389.tools import DirSrvTools +from lib389._constants import * +from lib389.properties import * +from lib389.tasks import * +from lib389.utils import * + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + +installation1_prefix = None + + +class TopologyStandalone(object): + def __init__(self, standalone): + standalone.open() + self.standalone = standalone + + +@pytest.fixture(scope="module") +def topology(request): + global installation1_prefix + if installation1_prefix: + args_instance[SER_DEPLOYED_DIR] = installation1_prefix + + # Creating standalone instance ... + standalone = DirSrv(verbose=False) + args_instance[SER_HOST] = HOST_STANDALONE + args_instance[SER_PORT] = PORT_STANDALONE + args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE + args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX + args_standalone = args_instance.copy() + standalone.allocate(args_standalone) + instance_standalone = standalone.exists() + if instance_standalone: + standalone.delete() + standalone.create() + standalone.open() + + # Clear out the tmp dir + standalone.clearTmpDir(__file__) + + return TopologyStandalone(standalone) + + +def test_dna_init(topology): + ''' + Write any test suite initialization here(if needed) + ''' + + return + + +def test_dna_(topology): + ''' + Write a single test here... + ''' + + return + + +def test_dna_final(topology): + topology.standalone.delete() + log.info('dna test suite PASSED') + + +def run_isolated(): + global installation1_prefix + installation1_prefix = None + + topo = topology(True) + test_dna_init(topo) + test_dna_(topo) + test_dna_final(topo) + + +if __name__ == '__main__': + run_isolated() + diff --git a/dirsrvtests/tests/suites/ds_logs/ds_logs_test.py b/dirsrvtests/tests/suites/ds_logs/ds_logs_test.py new file mode 100644 index 0000000..9d870d4 --- /dev/null +++ b/dirsrvtests/tests/suites/ds_logs/ds_logs_test.py @@ -0,0 +1,93 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2015 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import os +import sys +import time +import ldap +import logging +import pytest +from lib389 import DirSrv, Entry, tools, tasks +from lib389.tools import DirSrvTools +from lib389._constants import * +from lib389.properties import * +from lib389.tasks import * +from lib389.utils import * + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + +installation1_prefix = None + + +class TopologyStandalone(object): + def __init__(self, standalone): + standalone.open() + self.standalone = standalone + + +@pytest.fixture(scope="module") +def topology(request): + global installation1_prefix + if installation1_prefix: + args_instance[SER_DEPLOYED_DIR] = installation1_prefix + + # Creating standalone instance ... + standalone = DirSrv(verbose=False) + args_instance[SER_HOST] = HOST_STANDALONE + args_instance[SER_PORT] = PORT_STANDALONE + args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE + args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX + args_standalone = args_instance.copy() + standalone.allocate(args_standalone) + instance_standalone = standalone.exists() + if instance_standalone: + standalone.delete() + standalone.create() + standalone.open() + + # Clear out the tmp dir + standalone.clearTmpDir(__file__) + + return TopologyStandalone(standalone) + + +def test_ds_logs_init(topology): + ''' + Write any test suite initialization here(if needed) + ''' + + return + + +def test_ds_logs_(topology): + ''' + Write a single test here... + ''' + + return + + +def test_ds_logs_final(topology): + topology.standalone.delete() + log.info('ds_logs test suite PASSED') + + +def run_isolated(): + global installation1_prefix + installation1_prefix = None + + topo = topology(True) + test_ds_logs_init(topo) + test_ds_logs_(topo) + test_ds_logs_final(topo) + + +if __name__ == '__main__': + run_isolated() + diff --git a/dirsrvtests/tests/suites/dynamic-plugins/plugin_tests.py b/dirsrvtests/tests/suites/dynamic-plugins/plugin_tests.py new file mode 100644 index 0000000..30dfa88 --- /dev/null +++ b/dirsrvtests/tests/suites/dynamic-plugins/plugin_tests.py @@ -0,0 +1,2406 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2015 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +''' +Created on Dec 09, 2014 + +@author: mreynolds +''' +import os +import sys +import time +import ldap +import logging +import pytest +from lib389 import DirSrv, Entry, tools, tasks +from lib389.tools import DirSrvTools +from lib389._constants import * +from lib389.properties import * +from lib389.tasks import * + +log = logging.getLogger(__name__) + +USER1_DN = 'uid=user1,' + DEFAULT_SUFFIX +USER2_DN = 'uid=user2,' + DEFAULT_SUFFIX +USER3_DN = 'uid=user3,' + DEFAULT_SUFFIX +BUSER1_DN = 'uid=user1,ou=branch1,' + DEFAULT_SUFFIX +BUSER2_DN = 'uid=user2,ou=branch2,' + DEFAULT_SUFFIX +BUSER3_DN = 'uid=user3,ou=branch2,' + DEFAULT_SUFFIX +BRANCH1_DN = 'ou=branch1,' + DEFAULT_SUFFIX +BRANCH2_DN = 'ou=branch2,' + DEFAULT_SUFFIX +GROUP_OU = 'ou=groups,' + DEFAULT_SUFFIX +PEOPLE_OU = 'ou=people,' + DEFAULT_SUFFIX +GROUP_DN = 'cn=group,' + DEFAULT_SUFFIX +CONFIG_AREA = 'nsslapd-pluginConfigArea' + +''' + Functional tests for each plugin + + Test: + plugin restarts (test when on and off) + plugin config validation + plugin dependencies + plugin functionality (including plugin tasks) +''' + + +################################################################################ +# +# Test Plugin Dependency +# +################################################################################ +def test_dependency(inst, plugin): + """ + Set the "account usabilty" plugin to depend on this plugin. This plugin + is generic, always enabled, and perfect for our testing + """ + + try: + inst.modify_s('cn=' + PLUGIN_ACCT_USABILITY + ',cn=plugins,cn=config', + [(ldap.MOD_REPLACE, 'nsslapd-plugin-depends-on-named', plugin)]) + + except ldap.LDAPError as e: + log.fatal('test_dependency: Failed to modify ' + PLUGIN_ACCT_USABILITY + ': error ' + e.message['desc']) + assert False + + try: + inst.modify_s('cn=' + plugin + ',cn=plugins,cn=config', + [(ldap.MOD_REPLACE, 'nsslapd-pluginenabled', 'off')]) + + except ldap.UNWILLING_TO_PERFORM: + # failed as expected + pass + else: + # Incorrectly succeeded + log.fatal('test_dependency: Plugin dependency check failed (%s)' % plugin) + assert False + + # Now undo the change + try: + inst.modify_s('cn=' + PLUGIN_ACCT_USABILITY + ',cn=plugins,cn=config', + [(ldap.MOD_DELETE, 'nsslapd-plugin-depends-on-named', None)]) + except ldap.LDAPError as e: + log.fatal('test_dependency: Failed to reset ' + plugin + ': error ' + e.message['desc']) + assert False + + +################################################################################ +# +# Wait for task to complete +# +################################################################################ +def wait_for_task(conn, task_dn): + finished = False + count = 0 + while count < 60: + try: + task_entry = conn.search_s(task_dn, ldap.SCOPE_BASE, 'objectclass=*') + if not task_entry: + log.fatal('wait_for_task: Search failed to find task: ' + task_dn) + assert False + if task_entry[0].hasAttr('nstaskexitcode'): + # task is done + finished = True + break + except ldap.LDAPError as e: + log.fatal('wait_for_task: Search failed: ' + e.message['desc']) + assert False + + time.sleep(1) + count += 1 + if not finished: + log.fatal('wait_for_task: Task (%s) did not complete!' % task_dn) + assert False + + +################################################################################ +# +# Test Account Policy Plugin (0) +# +################################################################################ +def test_acctpolicy(inst, args=None): + # stop the plugin, and start it + inst.plugins.disable(name=PLUGIN_ACCT_POLICY) + inst.plugins.enable(name=PLUGIN_ACCT_POLICY) + + if args == "restart": + return True + + CONFIG_DN = 'cn=config,cn=Account Policy Plugin,cn=plugins,cn=config' + + log.info('Testing ' + PLUGIN_ACCT_POLICY + '...') + + ############################################################################ + # Configure plugin + ############################################################################ + + # Add the config entry + try: + inst.add_s(Entry((CONFIG_DN, { + 'objectclass': 'top extensibleObject'.split(), + 'cn': 'config', + 'alwaysrecordlogin': 'yes', + 'stateattrname': 'lastLoginTime' + }))) + except ldap.ALREADY_EXISTS: + try: + inst.modify_s(CONFIG_DN, + [(ldap.MOD_REPLACE, 'alwaysrecordlogin', 'yes'), + (ldap.MOD_REPLACE, 'stateattrname', 'lastLoginTime')]) + except ldap.LDAPError as e: + log.fatal('test_acctpolicy: Failed to modify config entry: error ' + e.message['desc']) + assert False + except ldap.LDAPError as e: + log.fatal('test_acctpolicy: Failed to add config entry: error ' + e.message['desc']) + assert False + + ############################################################################ + # Test plugin + ############################################################################ + + # Add an entry + time.sleep(1) + try: + inst.add_s(Entry((USER1_DN, {'objectclass': "top extensibleObject".split(), + 'sn': '1', + 'cn': 'user 1', + 'uid': 'user1', + 'userpassword': 'password'}))) + except ldap.LDAPError as e: + log.fatal('test_acctpolicy: Failed to add test user' + USER1_DN + ': error ' + e.message['desc']) + assert False + + # bind as user + try: + inst.simple_bind_s(USER1_DN, "password") + except ldap.LDAPError as e: + log.fatal('test_acctpolicy: Failed to bind as user1: ' + e.message['desc']) + assert False + + # Bind as Root DN + time.sleep(1) + try: + inst.simple_bind_s(DN_DM, PASSWORD) + except ldap.LDAPError as e: + log.fatal('test_acctpolicy: Failed to bind as rootDN: ' + e.message['desc']) + assert False + + # Check lastLoginTime of USER1 + try: + entries = inst.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, 'lastLoginTime=*') + if not entries: + log.fatal('test_acctpolicy: Search failed to find an entry with lastLoginTime.') + assert False + except ldap.LDAPError as e: + log.fatal('test_acctpolicy: Search failed: ' + e.message['desc']) + assert False + + ############################################################################ + # Change config - change the stateAttrName to a new attribute + ############################################################################ + + try: + inst.modify_s(CONFIG_DN, [(ldap.MOD_REPLACE, 'stateattrname', 'testLastLoginTime')]) + + except ldap.LDAPError as e: + log.fatal('test_acctpolicy: Failed to modify config entry: error ' + e.message['desc']) + assert False + + ############################################################################ + # Test plugin + ############################################################################ + + time.sleep(1) + # login as user + try: + inst.simple_bind_s(USER1_DN, "password") + except ldap.LDAPError as e: + log.fatal('test_acctpolicy: Failed to bind(2nd) as user1: ' + e.message['desc']) + assert False + + time.sleep(1) + # Bind as Root DN + try: + inst.simple_bind_s(DN_DM, PASSWORD) + except ldap.LDAPError as e: + log.fatal('test_acctpolicy: Failed to bind as rootDN: ' + e.message['desc']) + assert False + + # Check testLastLoginTime was added to USER1 + try: + entries = inst.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, '(testLastLoginTime=*)') + if not entries: + log.fatal('test_acctpolicy: Search failed to find an entry with testLastLoginTime.') + assert False + except ldap.LDAPError as e: + log.fatal('test_acctpolicy: Search failed: ' + e.message['desc']) + assert False + + ############################################################################ + # Test plugin dependency + ############################################################################ + + test_dependency(inst, PLUGIN_ACCT_POLICY) + + ############################################################################ + # Cleanup + ############################################################################ + + try: + inst.delete_s(USER1_DN) + except ldap.LDAPError as e: + log.fatal('test_acctpolicy: Failed to delete test entry: ' + e.message['desc']) + assert False + + ############################################################################ + # Test passed + ############################################################################ + + log.info('test_acctpolicy: PASS\n') + + return + + +################################################################################ +# +# Test Attribute Uniqueness Plugin (1) +# +################################################################################ +def test_attruniq(inst, args=None): + # stop the plugin, and start it + inst.plugins.disable(name=PLUGIN_ATTR_UNIQUENESS) + inst.plugins.enable(name=PLUGIN_ATTR_UNIQUENESS) + + if args == "restart": + return + + log.info('Testing ' + PLUGIN_ATTR_UNIQUENESS + '...') + + ############################################################################ + # Configure plugin + ############################################################################ + + try: + inst.modify_s('cn=' + PLUGIN_ATTR_UNIQUENESS + ',cn=plugins,cn=config', + [(ldap.MOD_REPLACE, 'uniqueness-attribute-name', 'uid')]) + + except ldap.LDAPError as e: + log.fatal('test_attruniq: Failed to configure plugin for "uid": error ' + e.message['desc']) + assert False + + ############################################################################ + # Test plugin + ############################################################################ + + # Add an entry + try: + inst.add_s(Entry((USER1_DN, {'objectclass': "top extensibleObject".split(), + 'sn': '1', + 'cn': 'user 1', + 'uid': 'user1', + 'mail': 'user1@example.com', + 'mailAlternateAddress' : 'user1@alt.example.com', + 'userpassword': 'password'}))) + except ldap.LDAPError as e: + log.fatal('test_attruniq: Failed to add test user' + USER1_DN + ': error ' + e.message['desc']) + assert False + + # Add an entry with a duplicate "uid" + try: + inst.add_s(Entry((USER2_DN, {'objectclass': "top extensibleObject".split(), + 'sn': '2', + 'cn': 'user 2', + 'uid': 'user2', + 'uid': 'user1', + 'userpassword': 'password'}))) + + except ldap.CONSTRAINT_VIOLATION: + pass + else: + log.fatal('test_attruniq: Adding of 2nd entry(uid) incorrectly succeeded') + assert False + + ############################################################################ + # Change config to use "mail" instead of "uid" + ############################################################################ + + try: + inst.modify_s('cn=' + PLUGIN_ATTR_UNIQUENESS + ',cn=plugins,cn=config', + [(ldap.MOD_REPLACE, 'uniqueness-attribute-name', 'mail')]) + + except ldap.LDAPError as e: + log.fatal('test_attruniq: Failed to configure plugin for "mail": error ' + e.message['desc']) + assert False + + ############################################################################ + # Test plugin - Add an entry, that has a duplicate "mail" value + ############################################################################ + + try: + inst.add_s(Entry((USER2_DN, {'objectclass': "top extensibleObject".split(), + 'sn': '2', + 'cn': 'user 2', + 'uid': 'user2', + 'mail': 'user1@example.com', + 'userpassword': 'password'}))) + except ldap.CONSTRAINT_VIOLATION: + pass + else: + log.fatal('test_attruniq: Adding of 2nd entry(mail) incorrectly succeeded') + assert False + + ############################################################################ + # Reconfigure plugin for mail and mailAlternateAddress + ############################################################################ + + try: + inst.modify_s('cn=' + PLUGIN_ATTR_UNIQUENESS + ',cn=plugins,cn=config', + [(ldap.MOD_REPLACE, 'uniqueness-attribute-name', 'mail'), + (ldap.MOD_ADD, 'uniqueness-attribute-name', + 'mailAlternateAddress')]) + + except ldap.LDAPError as e: + log.error('test_attruniq: Failed to reconfigure plugin for "mail mailAlternateAddress": error ' + e.message['desc']) + assert False + + ############################################################################ + # Test plugin - Add an entry, that has a duplicate "mail" value + ############################################################################ + + try: + inst.add_s(Entry((USER2_DN, {'objectclass': "top extensibleObject".split(), + 'sn': '2', + 'cn': 'user 2', + 'uid': 'user2', + 'mail': 'user1@example.com', + 'userpassword': 'password'}))) + except ldap.CONSTRAINT_VIOLATION: + pass + else: + log.error('test_attruniq: Adding of 3rd entry(mail) incorrectly succeeded') + assert False + + ############################################################################ + # Test plugin - Add an entry, that has a duplicate "mailAlternateAddress" value + ############################################################################ + + try: + inst.add_s(Entry((USER2_DN, {'objectclass': "top extensibleObject".split(), + 'sn': '2', + 'cn': 'user 2', + 'uid': 'user2', + 'mailAlternateAddress': 'user1@alt.example.com', + 'userpassword': 'password'}))) + except ldap.CONSTRAINT_VIOLATION: + pass + else: + log.error('test_attruniq: Adding of 4th entry(mailAlternateAddress) incorrectly succeeded') + assert False + + ############################################################################ + # Test plugin - Add an entry, that has a duplicate "mail" value conflicting mailAlternateAddress + ############################################################################ + + try: + inst.add_s(Entry((USER2_DN, {'objectclass': "top extensibleObject".split(), + 'sn': '2', + 'cn': 'user 2', + 'uid': 'user2', + 'mail': 'user1@alt.example.com', + 'userpassword': 'password'}))) + except ldap.CONSTRAINT_VIOLATION: + pass + else: + log.error('test_attruniq: Adding of 5th entry(mailAlternateAddress) incorrectly succeeded') + assert False + + ############################################################################ + # Test plugin - Add an entry, that has a duplicate "mailAlternateAddress" conflicting mail + ############################################################################ + + try: + inst.add_s(Entry((USER2_DN, {'objectclass': "top extensibleObject".split(), + 'sn': '2', + 'cn': 'user 2', + 'uid': 'user2', + 'mailAlternateAddress': 'user1@example.com', + 'userpassword': 'password'}))) + except ldap.CONSTRAINT_VIOLATION: + pass + else: + log.error('test_attruniq: Adding of 6th entry(mail) incorrectly succeeded') + assert False + + ############################################################################ + # Test plugin dependency + ############################################################################ + + test_dependency(inst, PLUGIN_ATTR_UNIQUENESS) + + ############################################################################ + # Cleanup + ############################################################################ + + try: + inst.delete_s(USER1_DN) + except ldap.LDAPError as e: + log.fatal('test_attruniq: Failed to delete test entry: ' + e.message['desc']) + assert False + + ############################################################################ + # Test passed + ############################################################################ + + log.info('test_attruniq: PASS\n') + return + + +################################################################################ +# +# Test Auto Membership Plugin (2) +# +################################################################################ +def test_automember(inst, args=None): + # stop the plugin, and start it + inst.plugins.disable(name=PLUGIN_AUTOMEMBER) + inst.plugins.enable(name=PLUGIN_AUTOMEMBER) + + if args == "restart": + return + + CONFIG_DN = 'cn=config,cn=' + PLUGIN_AUTOMEMBER + ',cn=plugins,cn=config' + + log.info('Testing ' + PLUGIN_AUTOMEMBER + '...') + + ############################################################################ + # Configure plugin + ############################################################################ + + # Add the automember group + try: + inst.add_s(Entry((GROUP_DN, { + 'objectclass': 'top extensibleObject'.split(), + 'cn': 'group' + }))) + except ldap.LDAPError as e: + log.fatal('test_automember: Failed to add group: error ' + e.message['desc']) + assert False + + # Add ou=branch1 + try: + inst.add_s(Entry((BRANCH1_DN, { + 'objectclass': 'top extensibleObject'.split(), + 'ou': 'branch1' + }))) + except ldap.LDAPError as e: + log.fatal('test_automember: Failed to add branch1: error ' + e.message['desc']) + assert False + + # Add ou=branch2 + try: + inst.add_s(Entry((BRANCH2_DN, { + 'objectclass': 'top extensibleObject'.split(), + 'ou': 'branch2' + }))) + except ldap.LDAPError as e: + log.fatal('test_automember: Failed to add branch2: error ' + e.message['desc']) + assert False + + # Add the automember config entry + try: + inst.add_s(Entry((CONFIG_DN, { + 'objectclass': 'top autoMemberDefinition'.split(), + 'cn': 'config', + 'autoMemberScope': 'ou=branch1,' + DEFAULT_SUFFIX, + 'autoMemberFilter': 'objectclass=top', + 'autoMemberDefaultGroup': 'cn=group,' + DEFAULT_SUFFIX, + 'autoMemberGroupingAttr': 'member:dn' + }))) + except ldap.LDAPError as e: + log.fatal('test_automember: Failed to add config entry: error ' + e.message['desc']) + assert False + + ############################################################################ + # Test the plugin + ############################################################################ + + # Add a user that should get added to the group + try: + inst.add_s(Entry((BUSER1_DN, { + 'objectclass': 'top extensibleObject'.split(), + 'uid': 'user1' + }))) + except ldap.LDAPError as e: + log.fatal('test_automember: Failed to add user: error ' + e.message['desc']) + assert False + + # Check the group + try: + entries = inst.search_s(GROUP_DN, ldap.SCOPE_BASE, + '(member=' + BUSER1_DN + ')') + if not entries: + log.fatal('test_automember: Search failed to find member user1') + assert False + except ldap.LDAPError as e: + log.fatal('test_automember: Search failed: ' + e.message['desc']) + assert False + + ############################################################################ + # Change config + ############################################################################ + + try: + inst.modify_s(CONFIG_DN, + [(ldap.MOD_REPLACE, 'autoMemberGroupingAttr', 'uniquemember:dn'), + (ldap.MOD_REPLACE, 'autoMemberScope', 'ou=branch2,' + DEFAULT_SUFFIX)]) + + except ldap.LDAPError as e: + log.fatal('test_automember: Failed to modify config entry: error ' + e.message['desc']) + assert False + + ############################################################################ + # Test plugin + ############################################################################ + + # Add a user that should get added to the group + try: + inst.add_s(Entry((BUSER2_DN, { + 'objectclass': 'top extensibleObject'.split(), + 'uid': 'user2' + }))) + except ldap.LDAPError as e: + log.fatal('test_automember: Failed to user to branch2: error ' + e.message['desc']) + assert False + + # Check the group + try: + entries = inst.search_s(GROUP_DN, ldap.SCOPE_BASE, + '(uniquemember=' + BUSER2_DN + ')') + if not entries: + log.fatal('test_automember: Search failed to find uniquemember user2') + assert False + except ldap.LDAPError as e: + log.fatal('test_automember: Search failed: ' + e.message['desc']) + assert False + + ############################################################################ + # Test Task + ############################################################################ + + # Disable plugin + inst.plugins.disable(name=PLUGIN_AUTOMEMBER) + + # Add an entry that should be picked up by automember - verify it is not(yet) + try: + inst.add_s(Entry((BUSER3_DN, { + 'objectclass': 'top extensibleObject'.split(), + 'uid': 'user3' + }))) + except ldap.LDAPError as e: + log.fatal('test_automember: Failed to user3 to branch2: error ' + e.message['desc']) + assert False + + # Check the group - uniquemember should not exist + try: + entries = inst.search_s(GROUP_DN, ldap.SCOPE_BASE, + '(uniquemember=' + BUSER3_DN + ')') + if entries: + log.fatal('test_automember: user3 was incorrectly added to the group') + assert False + except ldap.LDAPError as e: + log.fatal('test_automember: Search failed: ' + e.message['desc']) + assert False + + # Enable plugin + inst.plugins.enable(name=PLUGIN_AUTOMEMBER) + + TASK_DN = 'cn=task-' + str(int(time.time())) + ',cn=automember rebuild membership,cn=tasks,cn=config' + # Add the task + try: + inst.add_s(Entry((TASK_DN, { + 'objectclass': 'top extensibleObject'.split(), + 'basedn': 'ou=branch2,' + DEFAULT_SUFFIX, + 'filter': 'objectclass=top'}))) + except ldap.LDAPError as e: + log.fatal('test_automember: Failed to add task: error ' + e.message['desc']) + assert False + + wait_for_task(inst, TASK_DN) + + # Verify the fixup task worked + try: + entries = inst.search_s(GROUP_DN, ldap.SCOPE_BASE, + '(uniquemember=' + BUSER3_DN + ')') + if not entries: + log.fatal('test_automember: user3 was not added to the group') + assert False + except ldap.LDAPError as e: + log.fatal('test_automember: Search failed: ' + e.message['desc']) + assert False + + ############################################################################ + # Test plugin dependency + ############################################################################ + + test_dependency(inst, PLUGIN_AUTOMEMBER) + + ############################################################################ + # Cleanup + ############################################################################ + + try: + inst.delete_s(BUSER1_DN) + except ldap.LDAPError as e: + log.fatal('test_automember: Failed to delete test entry1: ' + e.message['desc']) + assert False + + try: + inst.delete_s(BUSER2_DN) + except ldap.LDAPError as e: + log.fatal('test_automember: Failed to delete test entry2: ' + e.message['desc']) + assert False + + try: + inst.delete_s(BUSER3_DN) + except ldap.LDAPError as e: + log.fatal('test_automember: Failed to delete test entry3: ' + e.message['desc']) + assert False + + try: + inst.delete_s(BRANCH1_DN) + except ldap.LDAPError as e: + log.fatal('test_automember: Failed to delete branch1: ' + e.message['desc']) + assert False + + try: + inst.delete_s(BRANCH2_DN) + except ldap.LDAPError as e: + log.fatal('test_automember: Failed to delete test branch2: ' + e.message['desc']) + assert False + + try: + inst.delete_s(GROUP_DN) + except ldap.LDAPError as e: + log.fatal('test_automember: Failed to delete test group: ' + e.message['desc']) + assert False + + try: + inst.delete_s(CONFIG_DN) + except ldap.LDAPError as e: + log.fatal('test_automember: Failed to delete plugin config entry: ' + e.message['desc']) + assert False + + ############################################################################ + # Test passed + ############################################################################ + + log.info('test_automember: PASS\n') + return + + +################################################################################ +# +# Test DNA Plugin (3) +# +################################################################################ +def test_dna(inst, args=None): + # stop the plugin, and start it + inst.plugins.disable(name=PLUGIN_DNA) + inst.plugins.enable(name=PLUGIN_DNA) + + if args == "restart": + return + + CONFIG_DN = 'cn=config,cn=' + PLUGIN_DNA + ',cn=plugins,cn=config' + + log.info('Testing ' + PLUGIN_DNA + '...') + + ############################################################################ + # Configure plugin + ############################################################################ + + try: + inst.add_s(Entry((CONFIG_DN, { + 'objectclass': 'top dnaPluginConfig'.split(), + 'cn': 'config', + 'dnatype': 'uidNumber', + 'dnafilter': '(objectclass=top)', + 'dnascope': DEFAULT_SUFFIX, + 'dnaMagicRegen': '-1', + 'dnaMaxValue': '50000', + 'dnaNextValue': '1' + }))) + except ldap.ALREADY_EXISTS: + try: + inst.modify_s(CONFIG_DN, [(ldap.MOD_REPLACE, 'dnaNextValue', '1'), + (ldap.MOD_REPLACE, 'dnaMagicRegen', '-1')]) + except ldap.LDAPError as e: + log.fatal('test_dna: Failed to set the DNA plugin: error ' + e.message['desc']) + assert False + except ldap.LDAPError as e: + log.fatal('test_dna: Failed to add config entry: error ' + e.message['desc']) + assert False + + ############################################################################ + # Test plugin + ############################################################################ + + try: + inst.add_s(Entry((USER1_DN, { + 'objectclass': 'top extensibleObject'.split(), + 'uid': 'user1' + }))) + except ldap.LDAPError as e: + log.fatal('test_dna: Failed to user1: error ' + e.message['desc']) + assert False + + # See if the entry now has the new uidNumber assignment - uidNumber=1 + try: + entries = inst.search_s(USER1_DN, ldap.SCOPE_BASE, '(uidNumber=1)') + if not entries: + log.fatal('test_dna: user1 was not updated - (looking for uidNumber: 1)') + assert False + except ldap.LDAPError as e: + log.fatal('test_dna: Search for user1 failed: ' + e.message['desc']) + assert False + + # Test the magic regen value + try: + inst.modify_s(USER1_DN, [(ldap.MOD_REPLACE, 'uidNumber', '-1')]) + except ldap.LDAPError as e: + log.fatal('test_dna: Failed to set the magic reg value: error ' + e.message['desc']) + assert False + + # See if the entry now has the new uidNumber assignment - uidNumber=2 + try: + entries = inst.search_s(USER1_DN, ldap.SCOPE_BASE, '(uidNumber=2)') + if not entries: + log.fatal('test_dna: user1 was not updated (looking for uidNumber: 2)') + assert False + except ldap.LDAPError as e: + log.fatal('test_dna: Search for user1 failed: ' + e.message['desc']) + assert False + + ################################################################################ + # Change the config + ################################################################################ + + try: + inst.modify_s(CONFIG_DN, [(ldap.MOD_REPLACE, 'dnaMagicRegen', '-2')]) + except ldap.LDAPError as e: + log.fatal('test_dna: Failed to set the magic reg value to -2: error ' + e.message['desc']) + assert False + + ################################################################################ + # Test plugin + ################################################################################ + + # Test the magic regen value + try: + inst.modify_s(USER1_DN, [(ldap.MOD_REPLACE, 'uidNumber', '-2')]) + except ldap.LDAPError as e: + log.fatal('test_dna: Failed to set the magic reg value: error ' + e.message['desc']) + assert False + + # See if the entry now has the new uidNumber assignment - uidNumber=3 + try: + entries = inst.search_s(USER1_DN, ldap.SCOPE_BASE, '(uidNumber=3)') + if not entries: + log.fatal('test_dna: user1 was not updated (looking for uidNumber: 3)') + assert False + except ldap.LDAPError as e: + log.fatal('test_dna: Search for user1 failed: ' + e.message['desc']) + assert False + + ############################################################################ + # Test plugin dependency + ############################################################################ + + test_dependency(inst, PLUGIN_AUTOMEMBER) + + ############################################################################ + # Cleanup + ############################################################################ + + try: + inst.delete_s(USER1_DN) + except ldap.LDAPError as e: + log.fatal('test_dna: Failed to delete test entry1: ' + e.message['desc']) + assert False + + inst.plugins.disable(name=PLUGIN_DNA) + + ############################################################################ + # Test passed + ############################################################################ + + log.info('test_dna: PASS\n') + + return + + +################################################################################ +# +# Test Linked Attrs Plugin (4) +# +################################################################################ +def test_linkedattrs(inst, args=None): + # stop the plugin, and start it + inst.plugins.disable(name=PLUGIN_LINKED_ATTRS) + inst.plugins.enable(name=PLUGIN_LINKED_ATTRS) + + if args == "restart": + return + + CONFIG_DN = 'cn=config,cn=' + PLUGIN_LINKED_ATTRS + ',cn=plugins,cn=config' + + log.info('Testing ' + PLUGIN_LINKED_ATTRS + '...') + + ############################################################################ + # Configure plugin + ############################################################################ + + # Add test entries + try: + inst.add_s(Entry((USER1_DN, { + 'objectclass': 'top extensibleObject'.split(), + 'uid': 'user1' + }))) + except ldap.LDAPError as e: + log.fatal('test_linkedattrs: Failed to user1: error ' + e.message['desc']) + assert False + + try: + inst.add_s(Entry((USER2_DN, { + 'objectclass': 'top extensibleObject'.split(), + 'uid': 'user2' + }))) + except ldap.LDAPError as e: + log.fatal('test_linkedattrs: Failed to user1: error ' + e.message['desc']) + assert False + + # Add the linked attrs config entry + try: + inst.add_s(Entry((CONFIG_DN, { + 'objectclass': 'top extensibleObject'.split(), + 'cn': 'config', + 'linkType': 'directReport', + 'managedType': 'manager' + }))) + except ldap.LDAPError as e: + log.fatal('test_linkedattrs: Failed to add config entry: error ' + e.message['desc']) + assert False + + ############################################################################ + # Test plugin + ############################################################################ + + # Set "directReport" should add "manager" to the other entry + try: + inst.modify_s(USER1_DN, [(ldap.MOD_REPLACE, 'directReport', USER2_DN)]) + except ldap.LDAPError as e: + log.fatal('test_linkedattrs: Failed to add "directReport" to user1: error ' + e.message['desc']) + assert False + + # See if manager was added to the other entry + try: + entries = inst.search_s(USER2_DN, ldap.SCOPE_BASE, '(manager=*)') + if not entries: + log.fatal('test_linkedattrs: user2 missing "manager" attribute') + assert False + except ldap.LDAPError as e: + log.fatal('test_linkedattrs: Search for user1 failed: ' + e.message['desc']) + assert False + + # Remove "directReport" should remove "manager" to the other entry + try: + inst.modify_s(USER1_DN, [(ldap.MOD_DELETE, 'directReport', None)]) + except ldap.LDAPError as e: + log.fatal('test_linkedattrs: Failed to delete directReport: error ' + e.message['desc']) + assert False + + # See if manager was removed + try: + entries = inst.search_s(USER2_DN, ldap.SCOPE_BASE, '(manager=*)') + if entries: + log.fatal('test_linkedattrs: user2 "manager" attribute not removed') + assert False + except ldap.LDAPError as e: + log.fatal('test_linkedattrs: Search for user1 failed: ' + e.message['desc']) + assert False + + ############################################################################ + # Change the config - using linkType "indirectReport" now + ############################################################################ + + try: + inst.modify_s(CONFIG_DN, [(ldap.MOD_REPLACE, 'linkType', 'indirectReport')]) + except ldap.LDAPError as e: + log.error('test_linkedattrs: Failed to set linkTypee: error ' + e.message['desc']) + assert False + + ############################################################################ + # Test plugin + ############################################################################ + + # Make sure the old linkType(directManager) is not working + try: + inst.modify_s(USER1_DN, [(ldap.MOD_REPLACE, 'directReport', USER2_DN)]) + except ldap.LDAPError as e: + log.fatal('test_linkedattrs: Failed to add "directReport" to user1: error ' + e.message['desc']) + assert False + + # See if manager was added to the other entry, better not be... + try: + entries = inst.search_s(USER2_DN, ldap.SCOPE_BASE, '(manager=*)') + if entries: + log.fatal('test_linkedattrs: user2 had "manager" added unexpectedly') + assert False + except ldap.LDAPError as e: + log.fatal('test_linkedattrs: Search for user2 failed: ' + e.message['desc']) + assert False + + # Now, set the new linkType "indirectReport", which should add "manager" to the other entry + try: + inst.modify_s(USER1_DN, [(ldap.MOD_REPLACE, 'indirectReport', USER2_DN)]) + except ldap.LDAPError as e: + log.fatal('test_linkedattrs: Failed to add "indirectReport" to user1: error ' + e.message['desc']) + assert False + + # See if manager was added to the other entry, better not be + try: + entries = inst.search_s(USER2_DN, ldap.SCOPE_BASE, '(manager=*)') + if not entries: + log.fatal('test_linkedattrs: user2 missing "manager"') + assert False + except ldap.LDAPError as e: + log.fatal('test_linkedattrs: Search for user2 failed: ' + e.message['desc']) + assert False + + # Remove "indirectReport" should remove "manager" to the other entry + try: + inst.modify_s(USER1_DN, [(ldap.MOD_DELETE, 'indirectReport', None)]) + except ldap.LDAPError as e: + log.fatal('test_linkedattrs: Failed to delete directReport: error ' + e.message['desc']) + assert False + + # See if manager was removed + try: + entries = inst.search_s(USER2_DN, ldap.SCOPE_BASE, '(manager=*)') + if entries: + log.fatal('test_linkedattrs: user2 "manager" attribute not removed') + assert False + except ldap.LDAPError as e: + log.fatal('test_linkedattrs: Search for user1 failed: ' + e.message['desc']) + assert False + + ############################################################################ + # Test Fixup Task + ############################################################################ + + # Disable plugin and make some updates that would of triggered the plugin + inst.plugins.disable(name=PLUGIN_LINKED_ATTRS) + + try: + inst.modify_s(USER1_DN, [(ldap.MOD_REPLACE, 'indirectReport', USER2_DN)]) + except ldap.LDAPError as e: + log.fatal('test_linkedattrs: Failed to add "indirectReport" to user1: error ' + e.message['desc']) + assert False + + # The entry should not have a manager attribute + try: + entries = inst.search_s(USER2_DN, ldap.SCOPE_BASE, '(manager=*)') + if entries: + log.fatal('test_linkedattrs: user2 incorrectly has a "manager" attr') + assert False + except ldap.LDAPError as e: + log.fatal('test_linkedattrs: Search for user1 failed: ' + e.message['desc']) + assert False + + # Enable the plugin and rerun the task entry + inst.plugins.enable(name=PLUGIN_LINKED_ATTRS) + + # Add the task again + TASK_DN = 'cn=task-' + str(int(time.time())) + ',cn=fixup linked attributes,cn=tasks,cn=config' + try: + inst.add_s(Entry(('cn=task-' + str(int(time.time())) + ',cn=fixup linked attributes,cn=tasks,cn=config', { + 'objectclass': 'top extensibleObject'.split(), + 'basedn': DEFAULT_SUFFIX, + 'filter': 'objectclass=top'}))) + except ldap.LDAPError as e: + log.fatal('test_linkedattrs: Failed to add task: error ' + e.message['desc']) + assert False + + wait_for_task(inst, TASK_DN) + + # Check if user2 now has a manager attribute now + try: + entries = inst.search_s(USER2_DN, ldap.SCOPE_BASE, '(manager=*)') + if not entries: + log.fatal('test_linkedattrs: task failed: user2 missing "manager" attr') + assert False + except ldap.LDAPError as e: + log.fatal('test_linkedattrs: Search for user1 failed: ' + e.message['desc']) + assert False + + ############################################################################ + # Test plugin dependency + ############################################################################ + + test_dependency(inst, PLUGIN_LINKED_ATTRS) + + ############################################################################ + # Cleanup + ############################################################################ + + try: + inst.delete_s(USER1_DN) + except ldap.LDAPError as e: + log.fatal('test_linkedattrs: Failed to delete test entry1: ' + e.message['desc']) + assert False + + try: + inst.delete_s(USER2_DN) + except ldap.LDAPError as e: + log.fatal('test_linkedattrs: Failed to delete test entry2: ' + e.message['desc']) + assert False + + try: + inst.delete_s(CONFIG_DN) + except ldap.LDAPError as e: + log.fatal('test_linkedattrs: Failed to delete plugin config entry: ' + e.message['desc']) + assert False + + ############################################################################ + # Test passed + ############################################################################ + + log.info('test_linkedattrs: PASS\n') + return + + +################################################################################ +# +# Test MemberOf Plugin (5) +# +################################################################################ +def test_memberof(inst, args=None): + # stop the plugin, and start it + inst.plugins.disable(name=PLUGIN_MEMBER_OF) + inst.plugins.enable(name=PLUGIN_MEMBER_OF) + + if args == "restart": + return + + PLUGIN_DN = 'cn=' + PLUGIN_MEMBER_OF + ',cn=plugins,cn=config' + SHARED_CONFIG_DN = 'cn=memberOf Config,' + DEFAULT_SUFFIX + + log.info('Testing ' + PLUGIN_MEMBER_OF + '...') + + ############################################################################ + # Configure plugin + ############################################################################ + + try: + inst.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, 'memberofgroupattr', 'member')]) + except ldap.LDAPError as e: + log.fatal('test_memberof: Failed to update config(member): error ' + e.message['desc']) + assert False + + ############################################################################ + # Test plugin + ############################################################################ + + # Add our test entries + try: + inst.add_s(Entry((USER1_DN, { + 'objectclass': 'top extensibleObject'.split(), + 'uid': 'user1' + }))) + except ldap.LDAPError as e: + log.fatal('test_memberof: Failed to add user1: error ' + e.message['desc']) + assert False + + try: + inst.add_s(Entry((GROUP_DN, { + 'objectclass': 'top groupOfNames groupOfUniqueNames extensibleObject'.split(), + 'cn': 'group', + 'member': USER1_DN + }))) + except ldap.LDAPError as e: + log.fatal('test_memberof: Failed to add group: error ' + e.message['desc']) + assert False + + try: + inst.add_s(Entry((SHARED_CONFIG_DN, { + 'objectclass': 'top extensibleObject'.split(), + 'memberofgroupattr': 'member', + 'memberofattr': 'memberof' + }))) + except ldap.LDAPError as e: + log.fatal('test_memberof: Failed to shared config entry: error ' + e.message['desc']) + assert False + + # Check if the user now has a "memberOf" attribute + try: + entries = inst.search_s(USER1_DN, ldap.SCOPE_BASE, '(memberOf=*)') + if not entries: + log.fatal('test_memberof: user1 missing memberOf') + assert False + except ldap.LDAPError as e: + log.fatal('test_memberof: Search for user1 failed: ' + e.message['desc']) + assert False + + # Remove "member" should remove "memberOf" from the entry + try: + inst.modify_s(GROUP_DN, [(ldap.MOD_DELETE, 'member', None)]) + except ldap.LDAPError as e: + log.fatal('test_memberof: Failed to delete member: error ' + e.message['desc']) + assert False + + # Check that "memberOf" was removed + try: + entries = inst.search_s(USER1_DN, ldap.SCOPE_BASE, '(memberOf=*)') + if entries: + log.fatal('test_memberof: user1 incorrectly has memberOf attr') + assert False + except ldap.LDAPError as e: + log.fatal('test_memberof: Search for user1 failed: ' + e.message['desc']) + assert False + + ############################################################################ + # Change the config + ############################################################################ + + try: + inst.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, 'memberofgroupattr', 'uniquemember')]) + except ldap.LDAPError as e: + log.fatal('test_memberof: Failed to update config(uniquemember): error ' + e.message['desc']) + assert False + + ############################################################################ + # Test plugin + ############################################################################ + + try: + inst.modify_s(GROUP_DN, [(ldap.MOD_REPLACE, 'uniquemember', USER1_DN)]) + except ldap.LDAPError as e: + log.fatal('test_memberof: Failed to add uniquemember: error ' + e.message['desc']) + assert False + + # Check if the user now has a "memberOf" attribute + try: + entries = inst.search_s(USER1_DN, ldap.SCOPE_BASE, '(memberOf=*)') + if not entries: + log.fatal('test_memberof: user1 missing memberOf') + assert False + except ldap.LDAPError as e: + log.fatal('test_memberof: Search for user1 failed: ' + e.message['desc']) + assert False + + # Remove "uniquemember" should remove "memberOf" from the entry + try: + inst.modify_s(GROUP_DN, [(ldap.MOD_DELETE, 'uniquemember', None)]) + except ldap.LDAPError as e: + log.fatal('test_memberof: Failed to delete member: error ' + e.message['desc']) + assert False + + # Check that "memberOf" was removed + try: + entries = inst.search_s(USER1_DN, ldap.SCOPE_BASE, '(memberOf=*)') + if entries: + log.fatal('test_memberof: user1 incorrectly has memberOf attr') + assert False + except ldap.LDAPError as e: + log.fatal('test_memberof: Search for user1 failed: ' + e.message['desc']) + assert False + + ############################################################################ + # Set the shared config entry and test the plugin + ############################################################################ + + # The shared config entry uses "member" - the above test uses "uniquemember" + try: + inst.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, CONFIG_AREA, SHARED_CONFIG_DN)]) + except ldap.LDAPError as e: + log.fatal('test_memberof: Failed to set plugin area: error ' + e.message['desc']) + assert False + + # Delete the test entries then readd them to start with a clean slate + try: + inst.delete_s(USER1_DN) + except ldap.LDAPError as e: + log.fatal('test_memberof: Failed to delete test entry1: ' + e.message['desc']) + assert False + + try: + inst.delete_s(GROUP_DN) + except ldap.LDAPError as e: + log.fatal('test_memberof: Failed to delete test group: ' + e.message['desc']) + assert False + + try: + inst.add_s(Entry((USER1_DN, { + 'objectclass': 'top extensibleObject'.split(), + 'uid': 'user1' + }))) + except ldap.LDAPError as e: + log.fatal('test_memberof: Failed to add user1: error ' + e.message['desc']) + assert False + + try: + inst.add_s(Entry((GROUP_DN, { + 'objectclass': 'top groupOfNames groupOfUniqueNames extensibleObject'.split(), + 'cn': 'group', + 'member': USER1_DN + }))) + except ldap.LDAPError as e: + log.fatal('test_memberof: Failed to add group: error ' + e.message['desc']) + assert False + + # Test the shared config + # Check if the user now has a "memberOf" attribute + try: + entries = inst.search_s(USER1_DN, ldap.SCOPE_BASE, '(memberOf=*)') + if not entries: + log.fatal('test_memberof: user1 missing memberOf') + assert False + except ldap.LDAPError as e: + log.fatal('test_memberof: Search for user1 failed: ' + e.message['desc']) + assert False + + # Remove "member" should remove "memberOf" from the entry + try: + inst.modify_s(GROUP_DN, [(ldap.MOD_DELETE, 'member', None)]) + except ldap.LDAPError as e: + log.fatal('test_memberof: Failed to delete member: error ' + e.message['desc']) + assert False + + # Check that "memberOf" was removed + try: + entries = inst.search_s(USER1_DN, ldap.SCOPE_BASE, '(memberOf=*)') + if entries: + log.fatal('test_memberof: user1 incorrectly has memberOf attr') + assert False + except ldap.LDAPError as e: + log.fatal('test_memberof: Search for user1 failed: ' + e.message['desc']) + assert False + + ############################################################################ + # Change the shared config entry to use 'uniquemember' and test the plugin + ############################################################################ + + try: + inst.modify_s(SHARED_CONFIG_DN, [(ldap.MOD_REPLACE, 'memberofgroupattr', 'uniquemember')]) + except ldap.LDAPError as e: + log.fatal('test_memberof: Failed to set shared plugin entry(uniquemember): error ' + + e.message['desc']) + assert False + + try: + inst.modify_s(GROUP_DN, [(ldap.MOD_REPLACE, 'uniquemember', USER1_DN)]) + except ldap.LDAPError as e: + log.fatal('test_memberof: Failed to add uniquemember: error ' + e.message['desc']) + assert False + + # Check if the user now has a "memberOf" attribute + try: + entries = inst.search_s(USER1_DN, ldap.SCOPE_BASE, '(memberOf=*)') + if not entries: + log.fatal('test_memberof: user1 missing memberOf') + assert False + except ldap.LDAPError as e: + log.fatal('test_memberof: Search for user1 failed: ' + e.message['desc']) + assert False + + # Remove "uniquemember" should remove "memberOf" from the entry + try: + inst.modify_s(GROUP_DN, [(ldap.MOD_DELETE, 'uniquemember', None)]) + except ldap.LDAPError as e: + log.fatal('test_memberof: Failed to delete member: error ' + e.message['desc']) + assert False + + # Check that "memberOf" was removed + try: + entries = inst.search_s(USER1_DN, ldap.SCOPE_BASE, '(memberOf=*)') + if entries: + log.fatal('test_memberof: user1 incorrectly has memberOf attr') + assert False + except ldap.LDAPError as e: + log.fatal('test_memberof: Search for user1 failed: ' + e.message['desc']) + assert False + + ############################################################################ + # Remove shared config from plugin, and retest + ############################################################################ + + # First change the plugin to use member before we move the shared config that uses uniquemember + try: + inst.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, 'memberofgroupattr', 'member')]) + except ldap.LDAPError as e: + log.fatal('test_memberof: Failed to update config(uniquemember): error ' + e.message['desc']) + assert False + + # Remove shared config from plugin + try: + inst.modify_s(PLUGIN_DN, [(ldap.MOD_DELETE, CONFIG_AREA, None)]) + except ldap.LDAPError as e: + log.fatal('test_memberof: Failed to add uniquemember: error ' + e.message['desc']) + assert False + + try: + inst.modify_s(GROUP_DN, [(ldap.MOD_REPLACE, 'member', USER1_DN)]) + except ldap.LDAPError as e: + log.fatal('test_memberof: Failed to add uniquemember: error ' + e.message['desc']) + assert False + + # Check if the user now has a "memberOf" attribute + try: + entries = inst.search_s(USER1_DN, ldap.SCOPE_BASE, '(memberOf=*)') + if not entries: + log.fatal('test_memberof: user1 missing memberOf') + assert False + except ldap.LDAPError as e: + log.fatal('test_memberof: Search for user1 failed: ' + e.message['desc']) + assert False + + # Remove "uniquemember" should remove "memberOf" from the entry + try: + inst.modify_s(GROUP_DN, [(ldap.MOD_DELETE, 'member', None)]) + except ldap.LDAPError as e: + log.fatal('test_memberof: Failed to delete member: error ' + e.message['desc']) + assert False + + # Check that "memberOf" was removed + try: + entries = inst.search_s(USER1_DN, ldap.SCOPE_BASE, '(memberOf=*)') + if entries: + log.fatal('test_memberof: user1 incorrectly has memberOf attr') + assert False + except ldap.LDAPError as e: + log.fatal('test_memberof: Search for user1 failed: ' + e.message['desc']) + assert False + + ############################################################################ + # Test Fixup Task + ############################################################################ + + inst.plugins.disable(name=PLUGIN_MEMBER_OF) + + # First change the plugin to use uniquemember + try: + inst.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, 'memberofgroupattr', 'uniquemember')]) + except ldap.LDAPError as e: + log.fatal('test_memberof: Failed to update config(uniquemember): error ' + e.message['desc']) + assert False + + # Add uniquemember, should not update USER1 + try: + inst.modify_s(GROUP_DN, [(ldap.MOD_REPLACE, 'uniquemember', USER1_DN)]) + except ldap.LDAPError as e: + log.fatal('test_memberof: Failed to add uniquemember: error ' + e.message['desc']) + assert False + + # Check for "memberOf" + try: + entries = inst.search_s(USER1_DN, ldap.SCOPE_BASE, '(memberOf=*)') + if entries: + log.fatal('test_memberof: user1 incorrect has memberOf attr') + assert False + except ldap.LDAPError as e: + log.fatal('test_memberof: Search for user1 failed: ' + e.message['desc']) + assert False + + # Enable the plugin, and run the task + inst.plugins.enable(name=PLUGIN_MEMBER_OF) + + TASK_DN = 'cn=task-' + str(int(time.time())) + ',' + DN_MBO_TASK + try: + inst.add_s(Entry((TASK_DN, { + 'objectclass': 'top extensibleObject'.split(), + 'basedn': DEFAULT_SUFFIX, + 'filter': 'objectclass=top'}))) + except ldap.LDAPError as e: + log.fatal('test_memberof: Failed to add task: error ' + e.message['desc']) + assert False + + wait_for_task(inst, TASK_DN) + + # Check for "memberOf" + try: + entries = inst.search_s(USER1_DN, ldap.SCOPE_BASE, '(memberOf=*)') + if not entries: + log.fatal('test_memberof: user1 missing memberOf attr') + assert False + except ldap.LDAPError as e: + log.fatal('test_memberof: Search for user1 failed: ' + e.message['desc']) + assert False + + ############################################################################ + # Test plugin dependency + ############################################################################ + + test_dependency(inst, PLUGIN_MEMBER_OF) + + ############################################################################ + # Cleanup + ############################################################################ + + try: + inst.delete_s(USER1_DN) + except ldap.LDAPError as e: + log.fatal('test_memberof: Failed to delete test entry1: ' + e.message['desc']) + assert False + + try: + inst.delete_s(GROUP_DN) + except ldap.LDAPError as e: + log.fatal('test_memberof: Failed to delete test group: ' + e.message['desc']) + assert False + + try: + inst.delete_s(SHARED_CONFIG_DN) + except ldap.LDAPError as e: + log.fatal('test_memberof: Failed to delete shared config entry: ' + e.message['desc']) + assert False + + ############################################################################ + # Test passed + ############################################################################ + + log.info('test_memberof: PASS\n') + + return + + +################################################################################ +# +# Test Managed Entry Plugin (6) +# +################################################################################ +def test_mep(inst, args=None): + # stop the plugin, and start it + inst.plugins.disable(name=PLUGIN_MANAGED_ENTRY) + inst.plugins.enable(name=PLUGIN_MANAGED_ENTRY) + + if args == "restart": + return + + USER_DN = 'uid=user1,ou=people,' + DEFAULT_SUFFIX + MEP_USER_DN = 'cn=user1,ou=groups,' + DEFAULT_SUFFIX + USER_DN2 = 'uid=user 1,ou=people,' + DEFAULT_SUFFIX + MEP_USER_DN2 = 'uid=user 1,ou=groups,' + DEFAULT_SUFFIX + CONFIG_DN = 'cn=config,cn=' + PLUGIN_MANAGED_ENTRY + ',cn=plugins,cn=config' + TEMPLATE_DN = 'cn=MEP Template,' + DEFAULT_SUFFIX + TEMPLATE_DN2 = 'cn=MEP Template2,' + DEFAULT_SUFFIX + + log.info('Testing ' + PLUGIN_MANAGED_ENTRY + '...') + + ############################################################################ + # Configure plugin + ############################################################################ + + # Add our org units + try: + inst.add_s(Entry((PEOPLE_OU, { + 'objectclass': 'top extensibleObject'.split(), + 'ou': 'people'}))) + except ldap.ALREADY_EXISTS: + pass + except ldap.LDAPError as e: + log.fatal('test_mep: Failed to add people org unit: error ' + e.message['desc']) + assert False + + try: + inst.add_s(Entry((GROUP_OU, { + 'objectclass': 'top extensibleObject'.split(), + 'ou': 'people'}))) + except ldap.ALREADY_EXISTS: + pass + except ldap.LDAPError as e: + log.fatal('test_mep: Failed to add people org unit: error ' + e.message['desc']) + assert False + + # Add the template entry + try: + inst.add_s(Entry((TEMPLATE_DN, { + 'objectclass': 'top mepTemplateEntry extensibleObject'.split(), + 'cn': 'MEP Template', + 'mepRDNAttr': 'cn', + 'mepStaticAttr': 'objectclass: posixGroup|objectclass: extensibleObject'.split('|'), + 'mepMappedAttr': 'cn: $cn|uid: $cn|gidNumber: $uidNumber'.split('|') + }))) + except ldap.LDAPError as e: + log.fatal('test_mep: Failed to add template entry: error ' + e.message['desc']) + assert False + + # Add the config entry + try: + inst.add_s(Entry((CONFIG_DN, { + 'objectclass': 'top extensibleObject'.split(), + 'cn': 'config', + 'originScope': PEOPLE_OU, + 'originFilter': 'objectclass=posixAccount', + 'managedBase': GROUP_OU, + 'managedTemplate': TEMPLATE_DN + }))) + except ldap.LDAPError as e: + log.fatal('test_mep: Failed to add config entry: error ' + e.message['desc']) + assert False + + ############################################################################ + # Test plugin + ############################################################################ + + # Add an entry that meets the MEP scope + try: + inst.add_s(Entry((USER_DN, { + 'objectclass': 'top posixAccount extensibleObject'.split(), + 'uid': 'user1', + 'cn': 'user1', + 'uidNumber': '1', + 'gidNumber': '1', + 'homeDirectory': '/home/user1' + }))) + except ldap.LDAPError as e: + log.fatal('test_mep: Failed to user1: error ' + e.message['desc']) + assert False + + # Check if a managed group entry was created + try: + inst.search_s(MEP_USER_DN, ldap.SCOPE_BASE, '(objectclass=top)') + except ldap.LDAPError as e: + log.fatal('test_mep: Unable to find MEP entry: ' + e.message['desc']) + assert False + + ############################################################################ + # Change the config + ############################################################################ + + # Add a new template entry + try: + inst.add_s(Entry((TEMPLATE_DN2, { + 'objectclass': 'top mepTemplateEntry extensibleObject'.split(), + 'cn': 'MEP Template2', + 'mepRDNAttr': 'uid', + 'mepStaticAttr': 'objectclass: posixGroup|objectclass: extensibleObject'.split('|'), + 'mepMappedAttr': 'cn: $uid|uid: $cn|gidNumber: $gidNumber'.split('|') + }))) + except ldap.LDAPError as e: + log.fatal('test_mep: Failed to add template entry2: error ' + e.message['desc']) + assert False + + # Set the new template dn + try: + inst.modify_s(CONFIG_DN, [(ldap.MOD_REPLACE, 'managedTemplate', TEMPLATE_DN2)]) + except ldap.LDAPError as e: + log.fatal('test_mep: Failed to set mep plugin config: error ' + e.message['desc']) + assert False + + ############################################################################ + # Test plugin + ############################################################################ + + # Add an entry that meets the MEP scope + try: + inst.add_s(Entry((USER_DN2, { + 'objectclass': 'top posixAccount extensibleObject'.split(), + 'uid': 'user 1', + 'cn': 'user 1', + 'uidNumber': '1', + 'gidNumber': '1', + 'homeDirectory': '/home/user2' + }))) + except ldap.LDAPError as e: + log.fatal('test_mep: Failed to user2: error ' + e.message['desc']) + assert False + + # Check if a managed group entry was created + try: + inst.search_s(MEP_USER_DN2, ldap.SCOPE_BASE, '(objectclass=top)') + except ldap.LDAPError as e: + log.fatal('test_mep: Unable to find MEP entry2: ' + e.message['desc']) + assert False + + ############################################################################ + # Test plugin dependency + ############################################################################ + + test_dependency(inst, PLUGIN_MANAGED_ENTRY) + + ############################################################################ + # Cleanup + ############################################################################ + + try: + inst.delete_s(USER_DN) + except ldap.LDAPError as e: + log.fatal('test_mep: Failed to delete test user1: ' + e.message['desc']) + assert False + + try: + inst.delete_s(USER_DN2) + except ldap.LDAPError as e: + log.fatal('test_mep: Failed to delete test user 2: ' + e.message['desc']) + assert False + + try: + inst.delete_s(TEMPLATE_DN) + except ldap.LDAPError as e: + log.fatal('test_mep: Failed to delete template1: ' + e.message['desc']) + assert False + + inst.plugins.disable(name=PLUGIN_MANAGED_ENTRY) + + try: + inst.delete_s(TEMPLATE_DN2) + except ldap.LDAPError as e: + log.fatal('test_mep: Failed to delete template2: ' + e.message['desc']) + assert False + + try: + inst.delete_s(CONFIG_DN) + except ldap.LDAPError as e: + log.fatal('test_mep: Failed to delete config: ' + e.message['desc']) + assert False + + ############################################################################ + # Test passed + ############################################################################ + + log.info('test_mep: PASS\n') + return + + +################################################################################ +# +# Test Passthru Plugin (7) +# +################################################################################ +def test_passthru(inst, args=None): + # Passthru is a bit picky about the state of the entry - we can't just restart it + if args == "restart": + return + + # stop the plugin + inst.plugins.disable(name=PLUGIN_PASSTHRU) + + PLUGIN_DN = 'cn=' + PLUGIN_PASSTHRU + ',cn=plugins,cn=config' + PASSTHRU_DN = 'uid=admin,dc=pass,dc=thru' + PASSTHRU_DN2 = 'uid=admin2,dc=pass2,dc=thru' + PASS_SUFFIX1 = 'dc=pass,dc=thru' + PASS_SUFFIX2 = 'dc=pass2,dc=thru' + PASS_BE2 = 'PASS2' + + log.info('Testing ' + PLUGIN_PASSTHRU + '...') + + ############################################################################ + # Add a new "remote" instance, and a user for auth + ############################################################################ + + # Create second instance + passthru_inst = DirSrv(verbose=False) + + # Args for the instance + args_instance[SER_HOST] = LOCALHOST + args_instance[SER_PORT] = 33333 + args_instance[SER_SERVERID_PROP] = 'passthru' + args_instance[SER_CREATION_SUFFIX] = PASS_SUFFIX1 + args_passthru_inst = args_instance.copy() + passthru_inst.allocate(args_passthru_inst) + passthru_inst.create() + passthru_inst.open() + + # Create a second backend + passthru_inst.backend.create(PASS_SUFFIX2, {BACKEND_NAME: PASS_BE2}) + passthru_inst.mappingtree.create(PASS_SUFFIX2, bename=PASS_BE2) + + # Create the top of the tree + try: + passthru_inst.add_s(Entry((PASS_SUFFIX2, { + 'objectclass': 'top domain'.split(), + 'dc': 'pass2'}))) + except ldap.ALREADY_EXISTS: + pass + except ldap.LDAPError as e: + log.fatal('test_passthru: Failed to create suffix entry: error ' + e.message['desc']) + passthru_inst.delete() + assert False + + # Add user to suffix1 + try: + passthru_inst.add_s(Entry((PASSTHRU_DN, { + 'objectclass': 'top extensibleObject'.split(), + 'uid': 'admin', + 'userpassword': 'password' + }))) + except ldap.LDAPError as e: + log.fatal('test_passthru: Failed to admin1: error ' + e.message['desc']) + passthru_inst.delete() + assert False + + # Add user to suffix 2 + try: + passthru_inst.add_s(Entry((PASSTHRU_DN2, { + 'objectclass': 'top extensibleObject'.split(), + 'uid': 'admin2', + 'userpassword': 'password' + }))) + except ldap.LDAPError as e: + log.fatal('test_passthru: Failed to admin2 : error ' + e.message['desc']) + passthru_inst.delete() + assert False + + ############################################################################ + # Configure and start plugin + ############################################################################ + + try: + inst.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, 'nsslapd-pluginenabled', 'on'), + (ldap.MOD_REPLACE, 'nsslapd-pluginarg0', 'ldap://127.0.0.1:33333/dc=pass,dc=thru')]) + except ldap.LDAPError as e: + log.fatal('test_passthru: Failed to set mep plugin config: error ' + e.message['desc']) + passthru_inst.delete() + assert False + + ############################################################################ + # Test plugin + ############################################################################ + + # login as user + try: + inst.simple_bind_s(PASSTHRU_DN, "password") + except ldap.LDAPError as e: + log.fatal('test_passthru: pass through bind failed: ' + e.message['desc']) + passthru_inst.delete() + assert False + + ############################################################################ + # Change the config + ############################################################################ + + # login as root DN + try: + inst.simple_bind_s(DN_DM, PASSWORD) + except ldap.LDAPError as e: + log.fatal('test_passthru: pass through bind failed: ' + e.message['desc']) + passthru_inst.delete() + assert False + + try: + inst.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, 'nsslapd-pluginarg0', 'ldap://127.0.0.1:33333/dc=pass2,dc=thru')]) + except ldap.LDAPError as e: + log.fatal('test_passthru: Failed to set mep plugin config: error ' + e.message['desc']) + passthru_inst.delete() + assert False + + ############################################################################ + # Test plugin + ############################################################################ + + # login as user + try: + inst.simple_bind_s(PASSTHRU_DN2, "password") + except ldap.LDAPError as e: + log.fatal('test_passthru: pass through bind failed: ' + e.message['desc']) + passthru_inst.delete() + assert False + + # login as root DN + try: + inst.simple_bind_s(DN_DM, PASSWORD) + except ldap.LDAPError as e: + log.fatal('test_passthru: pass through bind failed: ' + e.message['desc']) + passthru_inst.delete() + assert False + + ############################################################################ + # Test plugin dependency + ############################################################################ + + test_dependency(inst, PLUGIN_PASSTHRU) + + ############################################################################ + # Cleanup + ############################################################################ + + # remove the passthru instance + passthru_inst.delete() + + ############################################################################ + # Test passed + ############################################################################ + + log.info('test_passthru: PASS\n') + + return + + +################################################################################ +# +# Test Referential Integrity Plugin (8) +# +################################################################################ +def test_referint(inst, args=None): + # stop the plugin, and start it + inst.plugins.disable(name=PLUGIN_REFER_INTEGRITY) + inst.plugins.enable(name=PLUGIN_REFER_INTEGRITY) + + if args == "restart": + return + + log.info('Testing ' + PLUGIN_REFER_INTEGRITY + '...') + PLUGIN_DN = 'cn=' + PLUGIN_REFER_INTEGRITY + ',cn=plugins,cn=config' + SHARED_CONFIG_DN = 'cn=RI Config,' + DEFAULT_SUFFIX + + ############################################################################ + # Configure plugin + ############################################################################ + + try: + inst.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, 'referint-membership-attr', 'member')]) + except ldap.LDAPError as e: + log.fatal('test_referint: Failed to configure RI plugin: error ' + e.message['desc']) + assert False + + ############################################################################ + # Test plugin + ############################################################################ + + # Add some users and a group + try: + inst.add_s(Entry((USER1_DN, { + 'objectclass': 'top extensibleObject'.split(), + 'uid': 'user1' + }))) + except ldap.LDAPError as e: + log.fatal('test_referint: Failed to add user1: error ' + e.message['desc']) + assert False + + try: + inst.add_s(Entry((USER2_DN, { + 'objectclass': 'top extensibleObject'.split(), + 'uid': 'user2' + }))) + except ldap.LDAPError as e: + log.fatal('test_referint: Failed to add user2: error ' + e.message['desc']) + assert False + + try: + inst.add_s(Entry((GROUP_DN, { + 'objectclass': 'top extensibleObject'.split(), + 'cn': 'group', + 'member': USER1_DN, + 'uniquemember': USER2_DN + }))) + except ldap.LDAPError as e: + log.fatal('test_referint: Failed to add group: error ' + e.message['desc']) + assert False + + # Grab the referint log file from the plugin + + try: + entries = inst.search_s(PLUGIN_DN, ldap.SCOPE_BASE, '(objectclass=top)') + REFERINT_LOGFILE = entries[0].getValue('referint-logfile') + except ldap.LDAPError as e: + log.fatal('test_referint: Unable to search plugin entry: ' + e.message['desc']) + assert False + + # Add shared config entry + try: + inst.add_s(Entry((SHARED_CONFIG_DN, { + 'objectclass': 'top extensibleObject'.split(), + 'referint-membership-attr': 'member', + 'referint-update-delay': '0', + 'referint-logfile': REFERINT_LOGFILE, + 'referint-logchanges': '0' + }))) + except ldap.LDAPError as e: + log.fatal('test_referint: Failed to shared config entry: error ' + e.message['desc']) + assert False + + # Delete a user + try: + inst.delete_s(USER1_DN) + except ldap.LDAPError as e: + log.fatal('test_referint: Failed to delete user1: ' + e.message['desc']) + assert False + + # Check for integrity + try: + entry = inst.search_s(GROUP_DN, ldap.SCOPE_BASE, '(member=' + USER1_DN + ')') + if entry: + log.fatal('test_referint: user1 was not removed from group') + assert False + except ldap.LDAPError as e: + log.fatal('test_referint: Unable to search group: ' + e.message['desc']) + assert False + + ############################################################################ + # Change the config + ############################################################################ + + try: + inst.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, 'referint-membership-attr', 'uniquemember')]) + except ldap.LDAPError as e: + log.fatal('test_referint: Failed to configure RI plugin: error ' + e.message['desc']) + assert False + + ############################################################################ + # Test plugin + ############################################################################ + + # Delete a user + try: + inst.delete_s(USER2_DN) + except ldap.LDAPError as e: + log.fatal('test_referint: Failed to delete user1: ' + e.message['desc']) + assert False + + # Check for integrity + try: + entry = inst.search_s(GROUP_DN, ldap.SCOPE_BASE, '(uniquemember=' + USER2_DN + ')') + if entry: + log.fatal('test_referint: user2 was not removed from group') + assert False + except ldap.LDAPError as e: + log.fatal('test_referint: Unable to search group: ' + e.message['desc']) + assert False + + ############################################################################ + # Set the shared config entry and test the plugin + ############################################################################ + + # The shared config entry uses "member" - the above test used "uniquemember" + try: + inst.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, CONFIG_AREA, SHARED_CONFIG_DN)]) + except ldap.LDAPError as e: + log.fatal('test_referint: Failed to set plugin area: error ' + e.message['desc']) + assert False + + # Delete the group, and readd everything + try: + inst.delete_s(GROUP_DN) + except ldap.LDAPError as e: + log.fatal('test_referint: Failed to delete group: ' + e.message['desc']) + assert False + + try: + inst.add_s(Entry((USER1_DN, { + 'objectclass': 'top extensibleObject'.split(), + 'uid': 'user1' + }))) + except ldap.LDAPError as e: + log.fatal('test_referint: Failed to add user1: error ' + e.message['desc']) + assert False + + try: + inst.add_s(Entry((USER2_DN, { + 'objectclass': 'top extensibleObject'.split(), + 'uid': 'user2' + }))) + except ldap.LDAPError as e: + log.fatal('test_referint: Failed to add user2: error ' + e.message['desc']) + assert False + + try: + inst.add_s(Entry((GROUP_DN, { + 'objectclass': 'top extensibleObject'.split(), + 'cn': 'group', + 'member': USER1_DN, + 'uniquemember': USER2_DN + }))) + except ldap.LDAPError as e: + log.fatal('test_referint: Failed to add group: error ' + e.message['desc']) + assert False + + # Delete a user + try: + inst.delete_s(USER1_DN) + except ldap.LDAPError as e: + log.fatal('test_referint: Failed to delete user1: ' + e.message['desc']) + assert False + + # Check for integrity + try: + entry = inst.search_s(GROUP_DN, ldap.SCOPE_BASE, '(member=' + USER1_DN + ')') + if entry: + log.fatal('test_referint: user1 was not removed from group') + assert False + except ldap.LDAPError as e: + log.fatal('test_referint: Unable to search group: ' + e.message['desc']) + assert False + + ############################################################################ + # Change the shared config entry to use 'uniquemember' and test the plugin + ############################################################################ + + try: + inst.modify_s(SHARED_CONFIG_DN, [(ldap.MOD_REPLACE, 'referint-membership-attr', 'uniquemember')]) + except ldap.LDAPError as e: + log.fatal('test_referint: Failed to set shared plugin entry(uniquemember): error ' + + e.message['desc']) + assert False + + # Delete a user + try: + inst.delete_s(USER2_DN) + except ldap.LDAPError as e: + log.fatal('test_referint: Failed to delete user1: ' + e.message['desc']) + assert False + + # Check for integrity + try: + entry = inst.search_s(GROUP_DN, ldap.SCOPE_BASE, '(uniquemember=' + USER2_DN + ')') + if entry: + log.fatal('test_referint: user2 was not removed from group') + assert False + except ldap.LDAPError as e: + log.fatal('test_referint: Unable to search group: ' + e.message['desc']) + assert False + + ############################################################################ + # Remove shared config from plugin, and retest + ############################################################################ + + # First change the plugin to use member before we move the shared config that uses uniquemember + try: + inst.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, 'referint-membership-attr', 'member')]) + except ldap.LDAPError as e: + log.fatal('test_referint: Failed to update config(uniquemember): error ' + e.message['desc']) + assert False + + # Remove shared config from plugin + try: + inst.modify_s(PLUGIN_DN, [(ldap.MOD_DELETE, CONFIG_AREA, None)]) + except ldap.LDAPError as e: + log.fatal('test_referint: Failed to add uniquemember: error ' + e.message['desc']) + assert False + + # Add test user + try: + inst.add_s(Entry((USER1_DN, { + 'objectclass': 'top extensibleObject'.split(), + 'uid': 'user1' + }))) + except ldap.LDAPError as e: + log.fatal('test_referint: Failed to add user1: error ' + e.message['desc']) + assert False + + # Add user to group + try: + inst.modify_s(GROUP_DN, [(ldap.MOD_REPLACE, 'member', USER1_DN)]) + except ldap.LDAPError as e: + log.fatal('test_referint: Failed to add uniquemember: error ' + e.message['desc']) + assert False + + # Delete a user + try: + inst.delete_s(USER1_DN) + except ldap.LDAPError as e: + log.fatal('test_referint: Failed to delete user1: ' + e.message['desc']) + assert False + + # Check for integrity + try: + entry = inst.search_s(GROUP_DN, ldap.SCOPE_BASE, '(member=' + USER1_DN + ')') + if entry: + log.fatal('test_referint: user1 was not removed from group') + assert False + except ldap.LDAPError as e: + log.fatal('test_referint: Unable to search group: ' + e.message['desc']) + assert False + + ############################################################################ + # Test plugin dependency + ############################################################################ + + test_dependency(inst, PLUGIN_REFER_INTEGRITY) + + ############################################################################ + # Cleanup + ############################################################################ + + try: + inst.delete_s(GROUP_DN) + except ldap.LDAPError as e: + log.fatal('test_referint: Failed to delete group: ' + e.message['desc']) + assert False + + try: + inst.delete_s(SHARED_CONFIG_DN) + except ldap.LDAPError as e: + log.fatal('test_referint: Failed to delete shared config entry: ' + e.message['desc']) + assert False + + ############################################################################ + # Test passed + ############################################################################ + + log.info('test_referint: PASS\n') + + return + + +################################################################################ +# +# Test Retro Changelog Plugin (9) +# +################################################################################ +def test_retrocl(inst, args=None): + # stop the plugin, and start it + inst.plugins.disable(name=PLUGIN_RETRO_CHANGELOG) + inst.plugins.enable(name=PLUGIN_RETRO_CHANGELOG) + + if args == "restart": + return + + log.info('Testing ' + PLUGIN_RETRO_CHANGELOG + '...') + + ############################################################################ + # Configure plugin + ############################################################################ + + # Gather the current change count (it's not 1 once we start the stabilty tests) + try: + entry = inst.search_s(RETROCL_SUFFIX, ldap.SCOPE_SUBTREE, '(changenumber=*)') + except ldap.LDAPError as e: + log.fatal('test_retrocl: Failed to get the count: error ' + e.message['desc']) + assert False + + entry_count = len(entry) + + ############################################################################ + # Test plugin + ############################################################################ + + # Add a user + try: + inst.add_s(Entry((USER1_DN, { + 'objectclass': 'top extensibleObject'.split(), + 'uid': 'user1' + }))) + except ldap.LDAPError as e: + log.fatal('test_retrocl: Failed to add user1: error ' + e.message['desc']) + assert False + + # Check we logged this in the retro cl + try: + entry = inst.search_s(RETROCL_SUFFIX, ldap.SCOPE_SUBTREE, '(changenumber=*)') + if not entry or len(entry) == entry_count: + log.fatal('test_retrocl: changelog not updated') + assert False + except ldap.LDAPError as e: + log.fatal('test_retrocl: Unable to search group: ' + e.message['desc']) + assert False + + entry_count += 1 + + ############################################################################ + # Change the config - disable plugin + ############################################################################ + + inst.plugins.disable(name=PLUGIN_RETRO_CHANGELOG) + + ############################################################################ + # Test plugin + ############################################################################ + + try: + inst.delete_s(USER1_DN) + except ldap.LDAPError as e: + log.fatal('test_retrocl: Failed to delete user1: ' + e.message['desc']) + assert False + + # Check we didn't logged this in the retro cl + try: + entry = inst.search_s(RETROCL_SUFFIX, ldap.SCOPE_SUBTREE, '(changenumber=*)') + if len(entry) != entry_count: + log.fatal('test_retrocl: changelog incorrectly updated - change count: ' + + str(len(entry)) + ' - expected 1') + assert False + except ldap.LDAPError as e: + log.fatal('test_retrocl: Unable to search retro changelog: ' + e.message['desc']) + assert False + + ############################################################################ + # Test plugin dependency + ############################################################################ + + inst.plugins.enable(name=PLUGIN_RETRO_CHANGELOG) + test_dependency(inst, PLUGIN_RETRO_CHANGELOG) + + ############################################################################ + # Cleanup + ############################################################################ + + # None + + ############################################################################ + # Test passed + ############################################################################ + + log.info('test_retrocl: PASS\n') + + return + + +################################################################################ +# +# Test Root DN Access Control Plugin (10) +# +################################################################################ +def test_rootdn(inst, args=None): + # stop the plugin, and start it + inst.plugins.disable(name=PLUGIN_ROOTDN_ACCESS) + inst.plugins.enable(name=PLUGIN_ROOTDN_ACCESS) + + if args == "restart": + return + + PLUGIN_DN = 'cn=' + PLUGIN_ROOTDN_ACCESS + ',cn=plugins,cn=config' + + log.info('Testing ' + PLUGIN_ROOTDN_ACCESS + '...') + + ############################################################################ + # Configure plugin + ############################################################################ + + # Add an user and aci to open up cn=config + try: + inst.add_s(Entry((USER1_DN, { + 'objectclass': 'top extensibleObject'.split(), + 'uid': 'user1', + 'userpassword': 'password' + }))) + except ldap.LDAPError as e: + log.fatal('test_rootdn: Failed to add user1: error ' + e.message['desc']) + assert False + + # Set an aci so we can modify the plugin after ew deny the root dn + ACI = ('(target ="ldap:///cn=config")(targetattr = "*")(version 3.0;acl ' + + '"all access";allow (all)(userdn="ldap:///anyone");)') + try: + inst.modify_s(DN_CONFIG, [(ldap.MOD_ADD, 'aci', ACI)]) + except ldap.LDAPError as e: + log.fatal('test_rootdn: Failed to add aci to config: error ' + e.message['desc']) + assert False + + # Set allowed IP to an unknown host - blocks root dn + try: + inst.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, 'rootdn-allow-ip', '10.10.10.10')]) + except ldap.LDAPError as e: + log.fatal('test_rootdn: Failed to set rootDN plugin config: error ' + e.message['desc']) + assert False + + ############################################################################ + # Test plugin + ############################################################################ + + # Bind as Root DN + failed = False + try: + inst.simple_bind_s(DN_DM, PASSWORD) + except ldap.LDAPError as e: + failed = True + + if not failed: + log.fatal('test_rootdn: Root DN was incorrectly able to bind') + assert False + + ############################################################################ + # Change the config + ############################################################################ + + # Bind as the user who can make updates to the config + try: + inst.simple_bind_s(USER1_DN, 'password') + except ldap.LDAPError as e: + log.fatal('test_rootdn: failed to bind as user1') + assert False + + # First, test that invalid plugin changes are rejected + try: + inst.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, 'rootdn-deny-ip', '12.12.ZZZ.12')]) + log.fatal('test_rootdn: Incorrectly allowed to add invalid "rootdn-deny-ip: 12.12.ZZZ.12"') + assert False + except ldap.LDAPError: + pass + + try: + inst.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, 'rootdn-allow-host', 'host._.com')]) + log.fatal('test_rootdn: Incorrectly allowed to add invalid "rootdn-allow-host: host._.com"') + assert False + except ldap.LDAPError: + pass + + # Remove the restriction + try: + inst.modify_s(PLUGIN_DN, [(ldap.MOD_DELETE, 'rootdn-allow-ip', None)]) + except ldap.LDAPError as e: + log.fatal('test_rootdn: Failed to set rootDN plugin config: error ' + e.message['desc']) + assert False + + ############################################################################ + # Test plugin + ############################################################################ + + # Bind as Root DN + failed = False + try: + inst.simple_bind_s(DN_DM, PASSWORD) + except ldap.LDAPError as e: + failed = True + + if failed: + log.fatal('test_rootdn: Root DN was not able to bind') + assert False + + ############################################################################ + # Test plugin dependency + ############################################################################ + + test_dependency(inst, PLUGIN_ROOTDN_ACCESS) + + ############################################################################ + # Cleanup - remove ACI from cn=config and test user + ############################################################################ + + try: + inst.modify_s(DN_CONFIG, [(ldap.MOD_DELETE, 'aci', ACI)]) + except ldap.LDAPError as e: + log.fatal('test_rootdn: Failed to add aci to config: error ' + e.message['desc']) + assert False + + try: + inst.delete_s(USER1_DN) + except ldap.LDAPError as e: + log.fatal('test_rootdn: Failed to delete user1: ' + e.message['desc']) + assert False + + ############################################################################ + # Test passed + ############################################################################ + + log.info('test_rootdn: PASS\n') + + return + + +# Array of test functions +func_tests = [test_acctpolicy, test_attruniq, test_automember, test_dna, + test_linkedattrs, test_memberof, test_mep, test_passthru, + test_referint, test_retrocl, test_rootdn] + + +def test_all_plugins(inst, args=None): + for func in func_tests: + func(inst, args) + + return + diff --git a/dirsrvtests/tests/suites/dynamic-plugins/stress_tests.py b/dirsrvtests/tests/suites/dynamic-plugins/stress_tests.py new file mode 100644 index 0000000..920d3f6 --- /dev/null +++ b/dirsrvtests/tests/suites/dynamic-plugins/stress_tests.py @@ -0,0 +1,146 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2015 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +''' +Created on Dec 16, 2014 + +@author: mreynolds +''' +import os +import sys +import time +import ldap +import logging +import pytest +import threading +from lib389 import DirSrv, Entry, tools, tasks +from lib389.tools import DirSrvTools +from lib389._constants import * +from lib389.properties import * + +log = logging.getLogger(__name__) + +NUM_USERS = 250 +GROUP_DN = 'cn=stress-group,' + DEFAULT_SUFFIX + + +def openConnection(inst): + # Open a new connection to our LDAP server + server = DirSrv(verbose=False) + args_instance[SER_HOST] = HOST_STANDALONE + args_instance[SER_PORT] = PORT_STANDALONE + args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE + args_standalone = args_instance.copy() + server.allocate(args_standalone) + server.open() + + return server + + +# Configure Referential Integrity Plugin for stress test +def configureRI(inst): + inst.plugins.enable(name=PLUGIN_REFER_INTEGRITY) + PLUGIN_DN = 'cn=' + PLUGIN_REFER_INTEGRITY + ',cn=plugins,cn=config' + try: + inst.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, 'referint-membership-attr', 'uniquemember')]) + except ldap.LDAPError as e: + log.fatal('configureRI: Failed to configure RI plugin: error ' + e.message['desc']) + assert False + + +# Configure MemberOf Plugin for stress test +def configureMO(inst): + inst.plugins.enable(name=PLUGIN_MEMBER_OF) + PLUGIN_DN = 'cn=' + PLUGIN_MEMBER_OF + ',cn=plugins,cn=config' + try: + inst.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, 'memberofgroupattr', 'uniquemember')]) + except ldap.LDAPError as e: + log.fatal('configureMO: Failed to update config(uniquemember): error ' + e.message['desc']) + assert False + + +def cleanup(conn): + try: + conn.delete_s(GROUP_DN) + except ldap.LDAPError as e: + log.fatal('cleanup: failed to delete group (' + GROUP_DN + ') error: ' + e.message['desc']) + assert False + + +class DelUsers(threading.Thread): + def __init__(self, inst, rdnval): + threading.Thread.__init__(self) + self.daemon = True + self.inst = inst + self.rdnval = rdnval + + def run(self): + conn = openConnection(self.inst) + idx = 0 + log.info('DelUsers - Deleting ' + str(NUM_USERS) + ' entries (' + self.rdnval + ')...') + while idx < NUM_USERS: + USER_DN = 'uid=' + self.rdnval + str(idx) + ',' + DEFAULT_SUFFIX + try: + conn.delete_s(USER_DN) + except ldap.LDAPError as e: + log.fatal('DeleteUsers: failed to delete (' + USER_DN + ') error: ' + e.message['desc']) + assert False + + idx += 1 + + conn.close() + log.info('DelUsers - Finished deleting ' + str(NUM_USERS) + ' entries (' + self.rdnval + ').') + + +class AddUsers(threading.Thread): + def __init__(self, inst, rdnval, addToGroup): + threading.Thread.__init__(self) + self.daemon = True + self.inst = inst + self.addToGroup = addToGroup + self.rdnval = rdnval + + def run(self): + # Start adding users + conn = openConnection(self.inst) + idx = 0 + + if self.addToGroup: + try: + conn.add_s(Entry((GROUP_DN, + {'objectclass': 'top groupOfNames groupOfUniqueNames extensibleObject'.split(), + 'uid': 'user' + str(idx)}))) + except ldap.ALREADY_EXISTS: + pass + except ldap.LDAPError as e: + log.fatal('AddUsers: failed to add group (' + USER_DN + ') error: ' + e.message['desc']) + assert False + + log.info('AddUsers - Adding ' + str(NUM_USERS) + ' entries (' + self.rdnval + ')...') + + while idx < NUM_USERS: + USER_DN = 'uid=' + self.rdnval + str(idx) + ',' + DEFAULT_SUFFIX + try: + conn.add_s(Entry((USER_DN, {'objectclass': 'top extensibleObject'.split(), + 'uid': 'user' + str(idx)}))) + except ldap.LDAPError as e: + log.fatal('AddUsers: failed to add (' + USER_DN + ') error: ' + e.message['desc']) + assert False + + if self.addToGroup: + # Add the user to the group + try: + conn.modify_s(GROUP_DN, [(ldap.MOD_ADD, 'uniquemember', USER_DN)]) + except ldap.LDAPError as e: + log.fatal('AddUsers: Failed to add user' + USER_DN + ' to group: error ' + e.message['desc']) + assert False + + idx += 1 + + conn.close() + log.info('AddUsers - Finished adding ' + str(NUM_USERS) + ' entries (' + self.rdnval + ').') diff --git a/dirsrvtests/tests/suites/dynamic-plugins/test_dynamic_plugins.py b/dirsrvtests/tests/suites/dynamic-plugins/test_dynamic_plugins.py new file mode 100644 index 0000000..c05c402 --- /dev/null +++ b/dirsrvtests/tests/suites/dynamic-plugins/test_dynamic_plugins.py @@ -0,0 +1,493 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2015 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +''' +Created on Dec 09, 2014 + +@author: mreynolds +''' +import os +import sys +import time +import ldap +import ldap.sasl +import logging +import pytest +import plugin_tests +import stress_tests +from lib389 import DirSrv, Entry, tools, tasks +from lib389.tools import DirSrvTools +from lib389._constants import * +from lib389.properties import * +from lib389.tasks import * + +log = logging.getLogger(__name__) + +installation_prefix = None + + +class TopologyStandalone(object): + def __init__(self, standalone): + standalone.open() + self.standalone = standalone + + +def repl_fail(replica): + # remove replica instance, and assert failure + replica.delete() + assert False + + +@pytest.fixture(scope="module") +def topology(request): + ''' + This fixture is used to standalone topology for the 'module'. + ''' + global installation_prefix + + if installation_prefix: + args_instance[SER_DEPLOYED_DIR] = installation_prefix + + standalone = DirSrv(verbose=False) + + # Args for the standalone instance + args_instance[SER_HOST] = HOST_STANDALONE + args_instance[SER_PORT] = PORT_STANDALONE + args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE + args_standalone = args_instance.copy() + standalone.allocate(args_standalone) + + # Get the status of the instance and restart it if it exists + instance_standalone = standalone.exists() + + # Remove the instance + if instance_standalone: + standalone.delete() + + # Create the instance + standalone.create() + + # Used to retrieve configuration information (dbdir, confdir...) + standalone.open() + + # Here we have standalone instance up and running + return TopologyStandalone(standalone) + + +def test_dynamic_plugins(topology): + """ + Test Dynamic Plugins - exercise each plugin and its main features, while + changing the configuration without restarting the server. + + Need to test: functionality, stability, and stress. These tests need to run + with replication disabled, and with replication setup with a + second instance. Then test if replication is working, and we have + same entries on each side. + + Functionality - Make sure that as configuration changes are made they take + effect immediately. Cross plugin interaction (e.g. automember/memberOf) + needs to tested, as well as plugin tasks. Need to test plugin + config validation(dependencies, etc). + + Memory Corruption - Restart the plugins many times, and in different orders and test + functionality, and stability. This will excerise the internal + plugin linked lists, dse callbacks, and task handlers. + + Stress - Put the server under load that will trigger multiple plugins(MO, RI, DNA, etc) + Restart various plugins while these operations are going on. Perform this test + 5 times(stress_max_run). + + """ + + REPLICA_PORT = 33334 + RUV_FILTER = '(&(nsuniqueid=ffffffff-ffffffff-ffffffff-ffffffff)(objectclass=nstombstone))' + master_maxcsn = 0 + replica_maxcsn = 0 + msg = ' (no replication)' + replication_run = False + stress_max_runs = 5 + + # First enable dynamic plugins + try: + topology.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-dynamic-plugins', 'on')]) + except ldap.LDAPError as e: + ldap.fatal('Failed to enable dynamic plugin!' + e.message['desc']) + assert False + + # Test that critical plugins can be updated even though the change might not be applied + try: + topology.standalone.modify_s(DN_LDBM, [(ldap.MOD_REPLACE, 'description', 'test')]) + except ldap.LDAPError as e: + ldap.fatal('Failed to apply change to critical plugin' + e.message['desc']) + assert False + + while 1: + # + # First run the tests with replication disabled, then rerun them with replication set up + # + + ############################################################################ + # Test plugin functionality + ############################################################################ + + log.info('####################################################################') + log.info('Testing Dynamic Plugins Functionality' + msg + '...') + log.info('####################################################################\n') + + plugin_tests.test_all_plugins(topology.standalone) + + log.info('####################################################################') + log.info('Successfully Tested Dynamic Plugins Functionality' + msg + '.') + log.info('####################################################################\n') + + ############################################################################ + # Test the stability by exercising the internal lists, callabcks, and task handlers + ############################################################################ + + log.info('####################################################################') + log.info('Testing Dynamic Plugins for Memory Corruption' + msg + '...') + log.info('####################################################################\n') + prev_plugin_test = None + prev_prev_plugin_test = None + + for plugin_test in plugin_tests.func_tests: + # + # Restart the plugin several times (and prev plugins) - work that linked list + # + plugin_test(topology.standalone, "restart") + + if prev_prev_plugin_test: + prev_prev_plugin_test(topology.standalone, "restart") + + plugin_test(topology.standalone, "restart") + + if prev_plugin_test: + prev_plugin_test(topology.standalone, "restart") + + plugin_test(topology.standalone, "restart") + + # Now run the functional test + plugin_test(topology.standalone) + + # Set the previous tests + if prev_plugin_test: + prev_prev_plugin_test = prev_plugin_test + prev_plugin_test = plugin_test + + log.info('####################################################################') + log.info('Successfully Tested Dynamic Plugins for Memory Corruption' + msg + '.') + log.info('####################################################################\n') + + ############################################################################ + # Stress two plugins while restarting it, and while restarting other plugins. + # The goal is to not crash, and have the plugins work after stressing them. + ############################################################################ + + log.info('####################################################################') + log.info('Stressing Dynamic Plugins' + msg + '...') + log.info('####################################################################\n') + + stress_tests.configureMO(topology.standalone) + stress_tests.configureRI(topology.standalone) + + stress_count = 0 + while stress_count < stress_max_runs: + log.info('####################################################################') + log.info('Running stress test' + msg + '. Run (%d/%d)...' % (stress_count + 1, stress_max_runs)) + log.info('####################################################################\n') + + try: + # Launch three new threads to add a bunch of users + add_users = stress_tests.AddUsers(topology.standalone, 'employee', True) + add_users.start() + add_users2 = stress_tests.AddUsers(topology.standalone, 'entry', True) + add_users2.start() + add_users3 = stress_tests.AddUsers(topology.standalone, 'person', True) + add_users3.start() + time.sleep(1) + + # While we are adding users restart the MO plugin and an idle plugin + topology.standalone.plugins.disable(name=PLUGIN_MEMBER_OF) + topology.standalone.plugins.enable(name=PLUGIN_MEMBER_OF) + time.sleep(1) + topology.standalone.plugins.disable(name=PLUGIN_MEMBER_OF) + time.sleep(1) + topology.standalone.plugins.enable(name=PLUGIN_MEMBER_OF) + topology.standalone.plugins.disable(name=PLUGIN_LINKED_ATTRS) + topology.standalone.plugins.enable(name=PLUGIN_LINKED_ATTRS) + time.sleep(1) + topology.standalone.plugins.disable(name=PLUGIN_MEMBER_OF) + topology.standalone.plugins.enable(name=PLUGIN_MEMBER_OF) + time.sleep(2) + topology.standalone.plugins.disable(name=PLUGIN_MEMBER_OF) + time.sleep(1) + topology.standalone.plugins.enable(name=PLUGIN_MEMBER_OF) + topology.standalone.plugins.disable(name=PLUGIN_LINKED_ATTRS) + topology.standalone.plugins.enable(name=PLUGIN_LINKED_ATTRS) + topology.standalone.plugins.disable(name=PLUGIN_MEMBER_OF) + time.sleep(1) + topology.standalone.plugins.enable(name=PLUGIN_MEMBER_OF) + topology.standalone.plugins.disable(name=PLUGIN_MEMBER_OF) + topology.standalone.plugins.enable(name=PLUGIN_MEMBER_OF) + + # Wait for the 'adding' threads to complete + add_users.join() + add_users2.join() + add_users3.join() + + # Now launch three threads to delete the users + del_users = stress_tests.DelUsers(topology.standalone, 'employee') + del_users.start() + del_users2 = stress_tests.DelUsers(topology.standalone, 'entry') + del_users2.start() + del_users3 = stress_tests.DelUsers(topology.standalone, 'person') + del_users3.start() + time.sleep(1) + + # Restart both the MO, RI plugins during these deletes, and an idle plugin + topology.standalone.plugins.disable(name=PLUGIN_REFER_INTEGRITY) + topology.standalone.plugins.disable(name=PLUGIN_MEMBER_OF) + topology.standalone.plugins.enable(name=PLUGIN_MEMBER_OF) + topology.standalone.plugins.enable(name=PLUGIN_REFER_INTEGRITY) + time.sleep(1) + topology.standalone.plugins.disable(name=PLUGIN_REFER_INTEGRITY) + time.sleep(1) + topology.standalone.plugins.disable(name=PLUGIN_MEMBER_OF) + time.sleep(1) + topology.standalone.plugins.enable(name=PLUGIN_MEMBER_OF) + time.sleep(1) + topology.standalone.plugins.enable(name=PLUGIN_REFER_INTEGRITY) + topology.standalone.plugins.disable(name=PLUGIN_LINKED_ATTRS) + topology.standalone.plugins.enable(name=PLUGIN_LINKED_ATTRS) + topology.standalone.plugins.disable(name=PLUGIN_REFER_INTEGRITY) + topology.standalone.plugins.disable(name=PLUGIN_MEMBER_OF) + topology.standalone.plugins.enable(name=PLUGIN_MEMBER_OF) + topology.standalone.plugins.enable(name=PLUGIN_REFER_INTEGRITY) + time.sleep(2) + topology.standalone.plugins.disable(name=PLUGIN_REFER_INTEGRITY) + time.sleep(1) + topology.standalone.plugins.disable(name=PLUGIN_MEMBER_OF) + time.sleep(1) + topology.standalone.plugins.enable(name=PLUGIN_MEMBER_OF) + time.sleep(1) + topology.standalone.plugins.enable(name=PLUGIN_REFER_INTEGRITY) + topology.standalone.plugins.disable(name=PLUGIN_LINKED_ATTRS) + topology.standalone.plugins.enable(name=PLUGIN_LINKED_ATTRS) + + # Wait for the 'deleting' threads to complete + del_users.join() + del_users2.join() + del_users3.join() + + # Now make sure both the MO and RI plugins still work correctly + plugin_tests.func_tests[8](topology.standalone) # RI plugin + plugin_tests.func_tests[5](topology.standalone) # MO plugin + + # Cleanup the stress tests + stress_tests.cleanup(topology.standalone) + + except: + log.info('Stress test failed!') + repl_fail(replica_inst) + + stress_count += 1 + log.info('####################################################################') + log.info('Successfully Stressed Dynamic Plugins' + msg + + '. Completed (%d/%d)' % (stress_count, stress_max_runs)) + log.info('####################################################################\n') + + if replication_run: + # We're done. + break + else: + # + # Enable replication and run everything one more time + # + log.info('Setting up replication, and rerunning the tests...\n') + + # Create replica instance + replica_inst = DirSrv(verbose=False) + args_instance[SER_HOST] = LOCALHOST + args_instance[SER_PORT] = REPLICA_PORT + args_instance[SER_SERVERID_PROP] = 'replica' + args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX + + args_replica_inst = args_instance.copy() + replica_inst.allocate(args_replica_inst) + replica_inst.create() + replica_inst.open() + + try: + topology.standalone.replica.enableReplication(suffix=DEFAULT_SUFFIX, + role=REPLICAROLE_MASTER, + replicaId=1) + replica_inst.replica.enableReplication(suffix=DEFAULT_SUFFIX, + role=REPLICAROLE_CONSUMER, + replicaId=65535) + properties = {RA_NAME: r'to_replica', + RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], + RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], + RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], + RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} + + repl_agreement = topology.standalone.agreement.create(suffix=DEFAULT_SUFFIX, + host=LOCALHOST, + port=REPLICA_PORT, + properties=properties) + + if not repl_agreement: + log.fatal("Fail to create a replica agreement") + repl_fail(replica_inst) + + topology.standalone.agreement.init(DEFAULT_SUFFIX, LOCALHOST, REPLICA_PORT) + topology.standalone.waitForReplInit(repl_agreement) + except: + log.info('Failed to setup replication!') + repl_fail(replica_inst) + + replication_run = True + msg = ' (replication enabled)' + time.sleep(1) + + ############################################################################ + # Check replication, and data are in sync, and remove the instance + ############################################################################ + + log.info('Checking if replication is in sync...') + + try: + # Grab master's max CSN + entry = topology.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, RUV_FILTER) + if not entry: + log.error('Failed to find db tombstone entry from master') + repl_fail(replica_inst) + elements = entry[0].getValues('nsds50ruv') + for ruv in elements: + if 'replica 1' in ruv: + parts = ruv.split() + if len(parts) == 5: + master_maxcsn = parts[4] + break + else: + log.error('RUV is incomplete') + repl_fail(replica_inst) + if master_maxcsn == 0: + log.error('Failed to find maxcsn on master') + repl_fail(replica_inst) + + except ldap.LDAPError as e: + log.fatal('Unable to search masterfor db tombstone: ' + e.message['desc']) + repl_fail(replica_inst) + + # Loop on the consumer - waiting for it to catch up + count = 0 + insync = False + while count < 10: + try: + # Grab master's max CSN + entry = replica_inst.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, RUV_FILTER) + if not entry: + log.error('Failed to find db tombstone entry on consumer') + repl_fail(replica_inst) + elements = entry[0].getValues('nsds50ruv') + for ruv in elements: + if 'replica 1' in ruv: + parts = ruv.split() + if len(parts) == 5: + replica_maxcsn = parts[4] + break + if replica_maxcsn == 0: + log.error('Failed to find maxcsn on consumer') + repl_fail(replica_inst) + except ldap.LDAPError as e: + log.fatal('Unable to search for db tombstone on consumer: ' + e.message['desc']) + repl_fail(replica_inst) + + if master_maxcsn == replica_maxcsn: + insync = True + log.info('Replication is in sync.\n') + break + count += 1 + time.sleep(1) + + # Report on replication status + if not insync: + log.error('Consumer not in sync with master!') + repl_fail(replica_inst) + + # + # Verify the databases are identical. There should not be any "user, entry, employee" entries + # + log.info('Checking if the data is the same between the replicas...') + + # Check the master + try: + entries = topology.standalone.search_s(DEFAULT_SUFFIX, + ldap.SCOPE_SUBTREE, + "(|(uid=person*)(uid=entry*)(uid=employee*))") + if len(entries) > 0: + log.error('Master database has incorrect data set!\n') + repl_fail(replica_inst) + except ldap.LDAPError as e: + log.fatal('Unable to search db on master: ' + e.message['desc']) + repl_fail(replica_inst) + + # Check the consumer + try: + entries = replica_inst.search_s(DEFAULT_SUFFIX, + ldap.SCOPE_SUBTREE, + "(|(uid=person*)(uid=entry*)(uid=employee*))") + if len(entries) > 0: + log.error('Consumer database in not consistent with master database') + repl_fail(replica_inst) + except ldap.LDAPError as e: + log.fatal('Unable to search db on consumer: ' + e.message['desc']) + repl_fail(replica_inst) + + log.info('Data is consistent across the replicas.\n') + + log.info('####################################################################') + log.info('Replication consistency test passed') + log.info('####################################################################\n') + + # Remove the replica instance + replica_inst.delete() + + ############################################################################ + # We made it to the end! + ############################################################################ + + log.info('#####################################################') + log.info('#####################################################') + log.info("Dynamic Plugins Testsuite: Completed Successfully!") + log.info('#####################################################') + log.info('#####################################################\n') + + +def test_dynamic_plugins_final(topology): + topology.standalone.delete() + + +def run_isolated(): + ''' + run_isolated is used to run these test cases independently of a test scheduler (xunit, py.test..) + To run isolated without py.test, you need to + - edit this file and comment '@pytest.fixture' line before 'topology' function. + - set the installation prefix + - run this program + ''' + global installation_prefix + installation_prefix = None + + topo = topology(True) + test_dynamic_plugins(topo) + test_dynamic_plugins_final(topo) + + +if __name__ == '__main__': + run_isolated() diff --git a/dirsrvtests/tests/suites/filter/filter_test.py b/dirsrvtests/tests/suites/filter/filter_test.py new file mode 100644 index 0000000..d212f6a --- /dev/null +++ b/dirsrvtests/tests/suites/filter/filter_test.py @@ -0,0 +1,152 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2015 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import os +import sys +import time +import ldap +import logging +import pytest +from lib389 import DirSrv, Entry, tools, tasks +from lib389.tools import DirSrvTools +from lib389._constants import * +from lib389.properties import * +from lib389.tasks import * + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + +installation1_prefix = None + + +class TopologyStandalone(object): + def __init__(self, standalone): + standalone.open() + self.standalone = standalone + + +@pytest.fixture(scope="module") +def topology(request): + global installation1_prefix + if installation1_prefix: + args_instance[SER_DEPLOYED_DIR] = installation1_prefix + + # Creating standalone instance ... + standalone = DirSrv(verbose=False) + args_instance[SER_HOST] = HOST_STANDALONE + args_instance[SER_PORT] = PORT_STANDALONE + args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE + args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX + args_standalone = args_instance.copy() + standalone.allocate(args_standalone) + instance_standalone = standalone.exists() + if instance_standalone: + standalone.delete() + standalone.create() + standalone.open() + + # Clear out the tmp dir + standalone.clearTmpDir(__file__) + + return TopologyStandalone(standalone) + + +def test_filter_init(topology): + ''' + Write your testcase here... + ''' + return + + +def test_filter_escaped(topology): + ''' + Test we can search for an '*' in a attribute value. + ''' + + log.info('Running test_filter_escaped...') + + USER1_DN = 'uid=test_entry,' + DEFAULT_SUFFIX + USER2_DN = 'uid=test_entry2,' + DEFAULT_SUFFIX + + try: + topology.standalone.add_s(Entry((USER1_DN, {'objectclass': "top extensibleObject".split(), + 'sn': '1', + 'cn': 'test * me', + 'uid': 'test_entry', + 'userpassword': PASSWORD}))) + except ldap.LDAPError as e: + log.fatal('test_filter_escaped: Failed to add test user ' + USER1_DN + ': error ' + + e.message['desc']) + assert False + + try: + topology.standalone.add_s(Entry((USER2_DN, {'objectclass': "top extensibleObject".split(), + 'sn': '2', + 'cn': 'test me', + 'uid': 'test_entry2', + 'userpassword': PASSWORD}))) + except ldap.LDAPError as e: + log.fatal('test_filter_escaped: Failed to add test user ' + USER2_DN + ': error ' + e.message['desc']) + assert False + + try: + entry = topology.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, 'cn=*\**') + if not entry or len(entry) > 1: + log.fatal('test_filter_escaped: Entry was not found using "cn=*\**"') + assert False + except ldap.LDAPError as e: + log.fatal('test_filter_escaped: Failed to search for user(%s), error: %s' % + (USER1_DN, e.message('desc'))) + assert False + + log.info('test_filter_escaped: PASSED') + + +def test_filter_search_original_attrs(topology): + ''' + Search and request attributes with extra characters. The returned entry + should not have these extra characters: "objectclass EXTRA" + ''' + + log.info('Running test_filter_search_original_attrs...') + + try: + entry = topology.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_BASE, + 'objectclass=top', ['objectclass-EXTRA']) + if entry[0].hasAttr('objectclass-EXTRA'): + log.fatal('test_filter_search_original_attrs: Entry does not have the original attribute') + assert False + except ldap.LDAPError as e: + log.fatal('test_filter_search_original_attrs: Failed to search suffix(%s), error: %s' % + (DEFAULT_SUFFIX, e.message('desc'))) + assert False + + log.info('test_filter_search_original_attrs: PASSED') + + +def test_filter_final(topology): + topology.standalone.delete() + log.info('Testcase PASSED') + + +def run_isolated(): + global installation1_prefix + installation1_prefix = None + + topo = topology(True) + + test_filter_init(topo) + test_filter_escaped(topo) + test_filter_search_original_attrs(topo) + + test_filter_final(topo) + + +if __name__ == '__main__': + run_isolated() + diff --git a/dirsrvtests/tests/suites/get_effective_rights/ger_test.py b/dirsrvtests/tests/suites/get_effective_rights/ger_test.py new file mode 100644 index 0000000..f87d0a1 --- /dev/null +++ b/dirsrvtests/tests/suites/get_effective_rights/ger_test.py @@ -0,0 +1,93 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2015 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import os +import sys +import time +import ldap +import logging +import pytest +from lib389 import DirSrv, Entry, tools, tasks +from lib389.tools import DirSrvTools +from lib389._constants import * +from lib389.properties import * +from lib389.tasks import * +from lib389.utils import * + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + +installation1_prefix = None + + +class TopologyStandalone(object): + def __init__(self, standalone): + standalone.open() + self.standalone = standalone + + +@pytest.fixture(scope="module") +def topology(request): + global installation1_prefix + if installation1_prefix: + args_instance[SER_DEPLOYED_DIR] = installation1_prefix + + # Creating standalone instance ... + standalone = DirSrv(verbose=False) + args_instance[SER_HOST] = HOST_STANDALONE + args_instance[SER_PORT] = PORT_STANDALONE + args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE + args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX + args_standalone = args_instance.copy() + standalone.allocate(args_standalone) + instance_standalone = standalone.exists() + if instance_standalone: + standalone.delete() + standalone.create() + standalone.open() + + # Clear out the tmp dir + standalone.clearTmpDir(__file__) + + return TopologyStandalone(standalone) + + +def test_ger_init(topology): + ''' + Write any test suite initialization here(if needed) + ''' + + return + + +def test_ger_(topology): + ''' + Write a single test here... + ''' + + return + + +def test_ger_final(topology): + topology.standalone.delete() + log.info('ger test suite PASSED') + + +def run_isolated(): + global installation1_prefix + installation1_prefix = None + + topo = topology(True) + test_ger_init(topo) + test_ger_(topo) + test_ger_final(topo) + + +if __name__ == '__main__': + run_isolated() + diff --git a/dirsrvtests/tests/suites/ldapi/ldapi_test.py b/dirsrvtests/tests/suites/ldapi/ldapi_test.py new file mode 100644 index 0000000..06589bd --- /dev/null +++ b/dirsrvtests/tests/suites/ldapi/ldapi_test.py @@ -0,0 +1,93 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2015 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import os +import sys +import time +import ldap +import logging +import pytest +from lib389 import DirSrv, Entry, tools, tasks +from lib389.tools import DirSrvTools +from lib389._constants import * +from lib389.properties import * +from lib389.tasks import * +from lib389.utils import * + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + +installation1_prefix = None + + +class TopologyStandalone(object): + def __init__(self, standalone): + standalone.open() + self.standalone = standalone + + +@pytest.fixture(scope="module") +def topology(request): + global installation1_prefix + if installation1_prefix: + args_instance[SER_DEPLOYED_DIR] = installation1_prefix + + # Creating standalone instance ... + standalone = DirSrv(verbose=False) + args_instance[SER_HOST] = HOST_STANDALONE + args_instance[SER_PORT] = PORT_STANDALONE + args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE + args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX + args_standalone = args_instance.copy() + standalone.allocate(args_standalone) + instance_standalone = standalone.exists() + if instance_standalone: + standalone.delete() + standalone.create() + standalone.open() + + # Clear out the tmp dir + standalone.clearTmpDir(__file__) + + return TopologyStandalone(standalone) + + +def test_ldapi_init(topology): + ''' + Write any test suite initialization here(if needed) + ''' + + return + + +def test_ldapi_(topology): + ''' + Write a single test here... + ''' + + return + + +def test_ldapi_final(topology): + topology.standalone.delete() + log.info('ldapi test suite PASSED') + + +def run_isolated(): + global installation1_prefix + installation1_prefix = None + + topo = topology(True) + test_ldapi_init(topo) + test_ldapi_(topo) + test_ldapi_final(topo) + + +if __name__ == '__main__': + run_isolated() + diff --git a/dirsrvtests/tests/suites/linkedattrs_plugin/linked_attrs_test.py b/dirsrvtests/tests/suites/linkedattrs_plugin/linked_attrs_test.py new file mode 100644 index 0000000..d61898f --- /dev/null +++ b/dirsrvtests/tests/suites/linkedattrs_plugin/linked_attrs_test.py @@ -0,0 +1,93 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2015 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import os +import sys +import time +import ldap +import logging +import pytest +from lib389 import DirSrv, Entry, tools, tasks +from lib389.tools import DirSrvTools +from lib389._constants import * +from lib389.properties import * +from lib389.tasks import * +from lib389.utils import * + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + +installation1_prefix = None + + +class TopologyStandalone(object): + def __init__(self, standalone): + standalone.open() + self.standalone = standalone + + +@pytest.fixture(scope="module") +def topology(request): + global installation1_prefix + if installation1_prefix: + args_instance[SER_DEPLOYED_DIR] = installation1_prefix + + # Creating standalone instance ... + standalone = DirSrv(verbose=False) + args_instance[SER_HOST] = HOST_STANDALONE + args_instance[SER_PORT] = PORT_STANDALONE + args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE + args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX + args_standalone = args_instance.copy() + standalone.allocate(args_standalone) + instance_standalone = standalone.exists() + if instance_standalone: + standalone.delete() + standalone.create() + standalone.open() + + # Clear out the tmp dir + standalone.clearTmpDir(__file__) + + return TopologyStandalone(standalone) + + +def test_linked_attrs_init(topology): + ''' + Write any test suite initialization here(if needed) + ''' + + return + + +def test_linked_attrs_(topology): + ''' + Write a single test here... + ''' + + return + + +def test_linked_attrs_final(topology): + topology.standalone.delete() + log.info('linked_attrs test suite PASSED') + + +def run_isolated(): + global installation1_prefix + installation1_prefix = None + + topo = topology(True) + test_linked_attrs_init(topo) + test_linked_attrs_(topo) + test_linked_attrs_final(topo) + + +if __name__ == '__main__': + run_isolated() + diff --git a/dirsrvtests/tests/suites/mapping_tree/mapping_tree_test.py b/dirsrvtests/tests/suites/mapping_tree/mapping_tree_test.py new file mode 100644 index 0000000..6cc95e4 --- /dev/null +++ b/dirsrvtests/tests/suites/mapping_tree/mapping_tree_test.py @@ -0,0 +1,93 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2015 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import os +import sys +import time +import ldap +import logging +import pytest +from lib389 import DirSrv, Entry, tools, tasks +from lib389.tools import DirSrvTools +from lib389._constants import * +from lib389.properties import * +from lib389.tasks import * +from lib389.utils import * + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + +installation1_prefix = None + + +class TopologyStandalone(object): + def __init__(self, standalone): + standalone.open() + self.standalone = standalone + + +@pytest.fixture(scope="module") +def topology(request): + global installation1_prefix + if installation1_prefix: + args_instance[SER_DEPLOYED_DIR] = installation1_prefix + + # Creating standalone instance ... + standalone = DirSrv(verbose=False) + args_instance[SER_HOST] = HOST_STANDALONE + args_instance[SER_PORT] = PORT_STANDALONE + args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE + args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX + args_standalone = args_instance.copy() + standalone.allocate(args_standalone) + instance_standalone = standalone.exists() + if instance_standalone: + standalone.delete() + standalone.create() + standalone.open() + + # Clear out the tmp dir + standalone.clearTmpDir(__file__) + + return TopologyStandalone(standalone) + + +def test_mapping_tree_init(topology): + ''' + Write any test suite initialization here(if needed) + ''' + + return + + +def test_mapping_tree_(topology): + ''' + Write a single test here... + ''' + + return + + +def test_mapping_tree_final(topology): + topology.standalone.delete() + log.info('mapping_tree test suite PASSED') + + +def run_isolated(): + global installation1_prefix + installation1_prefix = None + + topo = topology(True) + test_mapping_tree_init(topo) + test_mapping_tree_(topo) + test_mapping_tree_final(topo) + + +if __name__ == '__main__': + run_isolated() + diff --git a/dirsrvtests/tests/suites/memberof_plugin/memberof_test.py b/dirsrvtests/tests/suites/memberof_plugin/memberof_test.py new file mode 100644 index 0000000..e97c09a --- /dev/null +++ b/dirsrvtests/tests/suites/memberof_plugin/memberof_test.py @@ -0,0 +1,176 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2015 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# + +import os +import sys +import time +import ldap +import logging +import pytest +from lib389 import DirSrv, Entry, tools, tasks +from lib389.tools import DirSrvTools +from lib389._constants import * +from lib389.properties import * +from lib389.tasks import * +from lib389.utils import * + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) +installation1_prefix = None + +MEMBEROF_PLUGIN_DN = ('cn=' + PLUGIN_MEMBER_OF + ',cn=plugins,cn=config') +USER1_DN = 'uid=user1,' + DEFAULT_SUFFIX +USER2_DN = 'uid=user2,' + DEFAULT_SUFFIX +GROUP_DN = 'cn=group,' + DEFAULT_SUFFIX + + +class TopologyStandalone(object): + def __init__(self, standalone): + standalone.open() + self.standalone = standalone + + +@pytest.fixture(scope="module") +def topology(request): + global installation1_prefix + if installation1_prefix: + args_instance[SER_DEPLOYED_DIR] = installation1_prefix + + # Creating standalone instance ... + standalone = DirSrv(verbose=False) + args_instance[SER_HOST] = HOST_STANDALONE + args_instance[SER_PORT] = PORT_STANDALONE + args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE + args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX + args_standalone = args_instance.copy() + standalone.allocate(args_standalone) + instance_standalone = standalone.exists() + if instance_standalone: + standalone.delete() + standalone.create() + standalone.open() + + # Delete each instance in the end + def fin(): + standalone.delete() + #pass + request.addfinalizer(fin) + + # Clear out the tmp dir + standalone.clearTmpDir(__file__) + + return TopologyStandalone(standalone) + + +def test_memberof_auto_add_oc(topology): + """ + Test the auto add objectclass feature. The plugin should add a predefined + objectclass that will allow memberOf to be added to an entry. + """ + + # enable dynamic plugins + try: + topology.standalone.modify_s(DN_CONFIG, + [(ldap.MOD_REPLACE, + 'nsslapd-dynamic-plugins', + 'on')]) + except ldap.LDAPError as e: + ldap.error('Failed to enable dynamic plugins! ' + e.message['desc']) + assert False + + # Enable the plugin + topology.standalone.plugins.enable(name=PLUGIN_MEMBER_OF) + + # First test invalid value (config validation) + topology.standalone.plugins.enable(name=PLUGIN_MEMBER_OF) + try: + topology.standalone.modify_s(MEMBEROF_PLUGIN_DN, + [(ldap.MOD_REPLACE, + 'memberofAutoAddOC', + 'invalid123')]) + log.fatal('Incorrectly added invalid objectclass!') + assert False + except ldap.UNWILLING_TO_PERFORM: + log.info('Correctly rejected invalid objectclass') + except ldap.LDAPError as e: + ldap.error('Unexpected error adding invalid objectclass - error: ' + e.message['desc']) + assert False + + # Add valid objectclass + topology.standalone.plugins.enable(name=PLUGIN_MEMBER_OF) + try: + topology.standalone.modify_s(MEMBEROF_PLUGIN_DN, + [(ldap.MOD_REPLACE, + 'memberofAutoAddOC', + 'inetuser')]) + except ldap.LDAPError as e: + log.fatal('Failed to configure memberOf plugin: error ' + e.message['desc']) + assert False + + # Add two users + try: + topology.standalone.add_s(Entry((USER1_DN, + {'objectclass': 'top', + 'objectclass': 'person', + 'objectclass': 'organizationalPerson', + 'objectclass': 'inetorgperson', + 'sn': 'last', + 'cn': 'full', + 'givenname': 'user1', + 'uid': 'user1' + }))) + except ldap.LDAPError as e: + log.fatal('Failed to add user1 entry, error: ' + e.message['desc']) + assert False + + try: + topology.standalone.add_s(Entry((USER2_DN, + {'objectclass': 'top', + 'objectclass': 'person', + 'objectclass': 'organizationalPerson', + 'objectclass': 'inetorgperson', + 'sn': 'last', + 'cn': 'full', + 'givenname': 'user2', + 'uid': 'user2' + }))) + except ldap.LDAPError as e: + log.fatal('Failed to add user2 entry, error: ' + e.message['desc']) + assert False + + # Add a group(that already includes one user + try: + topology.standalone.add_s(Entry((GROUP_DN, + {'objectclass': 'top', + 'objectclass': 'groupOfNames', + 'cn': 'group', + 'member': USER1_DN + }))) + except ldap.LDAPError as e: + log.fatal('Failed to add group entry, error: ' + e.message['desc']) + assert False + + # Add a user to the group + try: + topology.standalone.modify_s(GROUP_DN, + [(ldap.MOD_ADD, + 'member', + USER2_DN)]) + except ldap.LDAPError as e: + log.fatal('Failed to add user2 to group: error ' + e.message['desc']) + assert False + + log.info('Test complete.') + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) \ No newline at end of file diff --git a/dirsrvtests/tests/suites/memory_leaks/range_search_test.py b/dirsrvtests/tests/suites/memory_leaks/range_search_test.py new file mode 100644 index 0000000..12599c0 --- /dev/null +++ b/dirsrvtests/tests/suites/memory_leaks/range_search_test.py @@ -0,0 +1,138 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2015 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import os +import sys +import time +import ldap +import logging +import pytest +from lib389 import DirSrv, Entry, tools, tasks +from lib389.tools import DirSrvTools +from lib389._constants import * +from lib389.properties import * +from lib389.tasks import * +from lib389.utils import * + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + +installation1_prefix = None + + +class TopologyStandalone(object): + def __init__(self, standalone): + standalone.open() + self.standalone = standalone + + +@pytest.fixture(scope="module") +def topology(request): + global installation1_prefix + if installation1_prefix: + args_instance[SER_DEPLOYED_DIR] = installation1_prefix + + # Creating standalone instance ... + standalone = DirSrv(verbose=False) + args_instance[SER_HOST] = HOST_STANDALONE + args_instance[SER_PORT] = PORT_STANDALONE + args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE + args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX + args_standalone = args_instance.copy() + standalone.allocate(args_standalone) + instance_standalone = standalone.exists() + if instance_standalone: + standalone.delete() + standalone.create() + standalone.open() + + # Clear out the tmp dir + standalone.clearTmpDir(__file__) + + def fin(): + standalone.delete() + sbin_dir = get_sbin_dir(prefix=standalone.prefix) + valgrind_disable(sbin_dir) + request.addfinalizer(fin) + + return TopologyStandalone(standalone) + + +def test_range_search_init(topology): + ''' + Enable retro cl, and valgrind. Since valgrind tests move the ns-slapd binary + around it's important to always "valgrind_disable" before "assert False"ing, + otherwise we leave the wrong ns-slapd in place if there is a failure + ''' + + log.info('Initializing test_range_search...') + + topology.standalone.plugins.enable(name=PLUGIN_RETRO_CHANGELOG) + + # First stop the instance + topology.standalone.stop(timeout=10) + + # Get the sbin directory so we know where to replace 'ns-slapd' + sbin_dir = get_sbin_dir(prefix=topology.standalone.prefix) + + # Enable valgrind + valgrind_enable(sbin_dir) + + # Now start the server with a longer timeout + topology.standalone.start(timeout=60) + + +def test_range_search(topology): + ''' + Add a 100 entries, and run a range search. When we encounter an error we + still need to disable valgrind before exiting + ''' + + log.info('Running test_range_search...') + + success = True + + # Add 100 test entries + for idx in range(1, 100): + idx = str(idx) + USER_DN = 'uid=user' + idx + ',' + DEFAULT_SUFFIX + try: + topology.standalone.add_s(Entry((USER_DN, {'objectclass': "top extensibleObject".split(), + 'uid': 'user' + idx}))) + except ldap.LDAPError as e: + log.fatal('test_range_search: Failed to add test user ' + USER_DN + ': error ' + e.message['desc']) + success = False + time.sleep(1) + + if success: + # Issue range search + try: + topology.standalone.search_s(RETROCL_SUFFIX, ldap.SCOPE_SUBTREE, + '(&(changenumber>=74)(changenumber<=84))') + except ldap.LDAPError as e: + log.fatal('test_range_search: Failed to search retro changelog(%s), error: %s' % + (RETROCL_SUFFIX, e.message('desc'))) + success = False + + if success: + # Get the results file, stop the server, and check for the leak + results_file = valgrind_get_results_file(topology.standalone) + topology.standalone.stop(timeout=30) + if valgrind_check_file(results_file, VALGRIND_LEAK_STR, 'range_candidates'): + log.fatal('test_range_search: Memory leak is still present!') + assert False + + log.info('test_range_search: PASSED') + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) + diff --git a/dirsrvtests/tests/suites/mep_plugin/mep_test.py b/dirsrvtests/tests/suites/mep_plugin/mep_test.py new file mode 100644 index 0000000..2bda08d --- /dev/null +++ b/dirsrvtests/tests/suites/mep_plugin/mep_test.py @@ -0,0 +1,93 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2015 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import os +import sys +import time +import ldap +import logging +import pytest +from lib389 import DirSrv, Entry, tools, tasks +from lib389.tools import DirSrvTools +from lib389._constants import * +from lib389.properties import * +from lib389.tasks import * +from lib389.utils import * + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + +installation1_prefix = None + + +class TopologyStandalone(object): + def __init__(self, standalone): + standalone.open() + self.standalone = standalone + + +@pytest.fixture(scope="module") +def topology(request): + global installation1_prefix + if installation1_prefix: + args_instance[SER_DEPLOYED_DIR] = installation1_prefix + + # Creating standalone instance ... + standalone = DirSrv(verbose=False) + args_instance[SER_HOST] = HOST_STANDALONE + args_instance[SER_PORT] = PORT_STANDALONE + args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE + args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX + args_standalone = args_instance.copy() + standalone.allocate(args_standalone) + instance_standalone = standalone.exists() + if instance_standalone: + standalone.delete() + standalone.create() + standalone.open() + + # Clear out the tmp dir + standalone.clearTmpDir(__file__) + + return TopologyStandalone(standalone) + + +def test_mep_init(topology): + ''' + Write any test suite initialization here(if needed) + ''' + + return + + +def test_mep_(topology): + ''' + Write a single test here... + ''' + + return + + +def test_mep_final(topology): + topology.standalone.delete() + log.info('mep test suite PASSED') + + +def run_isolated(): + global installation1_prefix + installation1_prefix = None + + topo = topology(True) + test_mep_init(topo) + test_mep_(topo) + test_mep_final(topo) + + +if __name__ == '__main__': + run_isolated() + diff --git a/dirsrvtests/tests/suites/monitor/monitor_test.py b/dirsrvtests/tests/suites/monitor/monitor_test.py new file mode 100644 index 0000000..d24b3a5 --- /dev/null +++ b/dirsrvtests/tests/suites/monitor/monitor_test.py @@ -0,0 +1,93 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2015 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import os +import sys +import time +import ldap +import logging +import pytest +from lib389 import DirSrv, Entry, tools, tasks +from lib389.tools import DirSrvTools +from lib389._constants import * +from lib389.properties import * +from lib389.tasks import * +from lib389.utils import * + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + +installation1_prefix = None + + +class TopologyStandalone(object): + def __init__(self, standalone): + standalone.open() + self.standalone = standalone + + +@pytest.fixture(scope="module") +def topology(request): + global installation1_prefix + if installation1_prefix: + args_instance[SER_DEPLOYED_DIR] = installation1_prefix + + # Creating standalone instance ... + standalone = DirSrv(verbose=False) + args_instance[SER_HOST] = HOST_STANDALONE + args_instance[SER_PORT] = PORT_STANDALONE + args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE + args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX + args_standalone = args_instance.copy() + standalone.allocate(args_standalone) + instance_standalone = standalone.exists() + if instance_standalone: + standalone.delete() + standalone.create() + standalone.open() + + # Clear out the tmp dir + standalone.clearTmpDir(__file__) + + return TopologyStandalone(standalone) + + +def test_monitor_init(topology): + ''' + Write any test suite initialization here(if needed) + ''' + + return + + +def test_monitor_(topology): + ''' + Write a single test here... + ''' + + return + + +def test_monitor_final(topology): + topology.standalone.delete() + log.info('monitor test suite PASSED') + + +def run_isolated(): + global installation1_prefix + installation1_prefix = None + + topo = topology(True) + test_monitor_init(topo) + test_monitor_(topo) + test_monitor_final(topo) + + +if __name__ == '__main__': + run_isolated() + diff --git a/dirsrvtests/tests/suites/paged_results/paged_results_test.py b/dirsrvtests/tests/suites/paged_results/paged_results_test.py new file mode 100644 index 0000000..54782bc --- /dev/null +++ b/dirsrvtests/tests/suites/paged_results/paged_results_test.py @@ -0,0 +1,93 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2015 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import os +import sys +import time +import ldap +import logging +import pytest +from lib389 import DirSrv, Entry, tools, tasks +from lib389.tools import DirSrvTools +from lib389._constants import * +from lib389.properties import * +from lib389.tasks import * +from lib389.utils import * + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + +installation1_prefix = None + + +class TopologyStandalone(object): + def __init__(self, standalone): + standalone.open() + self.standalone = standalone + + +@pytest.fixture(scope="module") +def topology(request): + global installation1_prefix + if installation1_prefix: + args_instance[SER_DEPLOYED_DIR] = installation1_prefix + + # Creating standalone instance ... + standalone = DirSrv(verbose=False) + args_instance[SER_HOST] = HOST_STANDALONE + args_instance[SER_PORT] = PORT_STANDALONE + args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE + args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX + args_standalone = args_instance.copy() + standalone.allocate(args_standalone) + instance_standalone = standalone.exists() + if instance_standalone: + standalone.delete() + standalone.create() + standalone.open() + + # Clear out the tmp dir + standalone.clearTmpDir(__file__) + + return TopologyStandalone(standalone) + + +def test_paged_results_init(topology): + ''' + Write any test suite initialization here(if needed) + ''' + + return + + +def test_paged_results_(topology): + ''' + Write a single test here... + ''' + + return + + +def test_paged_results_final(topology): + topology.standalone.delete() + log.info('paged_results test suite PASSED') + + +def run_isolated(): + global installation1_prefix + installation1_prefix = None + + topo = topology(True) + test_paged_results_init(topo) + test_paged_results_(topo) + test_paged_results_final(topo) + + +if __name__ == '__main__': + run_isolated() + diff --git a/dirsrvtests/tests/suites/pam_passthru_plugin/pam_test.py b/dirsrvtests/tests/suites/pam_passthru_plugin/pam_test.py new file mode 100644 index 0000000..05b55b2 --- /dev/null +++ b/dirsrvtests/tests/suites/pam_passthru_plugin/pam_test.py @@ -0,0 +1,93 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2015 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import os +import sys +import time +import ldap +import logging +import pytest +from lib389 import DirSrv, Entry, tools, tasks +from lib389.tools import DirSrvTools +from lib389._constants import * +from lib389.properties import * +from lib389.tasks import * +from lib389.utils import * + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + +installation1_prefix = None + + +class TopologyStandalone(object): + def __init__(self, standalone): + standalone.open() + self.standalone = standalone + + +@pytest.fixture(scope="module") +def topology(request): + global installation1_prefix + if installation1_prefix: + args_instance[SER_DEPLOYED_DIR] = installation1_prefix + + # Creating standalone instance ... + standalone = DirSrv(verbose=False) + args_instance[SER_HOST] = HOST_STANDALONE + args_instance[SER_PORT] = PORT_STANDALONE + args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE + args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX + args_standalone = args_instance.copy() + standalone.allocate(args_standalone) + instance_standalone = standalone.exists() + if instance_standalone: + standalone.delete() + standalone.create() + standalone.open() + + # Clear out the tmp dir + standalone.clearTmpDir(__file__) + + return TopologyStandalone(standalone) + + +def test_pam_init(topology): + ''' + Write any test suite initialization here(if needed) + ''' + + return + + +def test_pam_(topology): + ''' + Write a single test here... + ''' + + return + + +def test_pam_final(topology): + topology.standalone.delete() + log.info('pam test suite PASSED') + + +def run_isolated(): + global installation1_prefix + installation1_prefix = None + + topo = topology(True) + test_pam_init(topo) + test_pam_(topo) + test_pam_final(topo) + + +if __name__ == '__main__': + run_isolated() + diff --git a/dirsrvtests/tests/suites/passthru_plugin/passthru_test.py b/dirsrvtests/tests/suites/passthru_plugin/passthru_test.py new file mode 100644 index 0000000..1c5d691 --- /dev/null +++ b/dirsrvtests/tests/suites/passthru_plugin/passthru_test.py @@ -0,0 +1,93 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2015 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import os +import sys +import time +import ldap +import logging +import pytest +from lib389 import DirSrv, Entry, tools, tasks +from lib389.tools import DirSrvTools +from lib389._constants import * +from lib389.properties import * +from lib389.tasks import * +from lib389.utils import * + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + +installation1_prefix = None + + +class TopologyStandalone(object): + def __init__(self, standalone): + standalone.open() + self.standalone = standalone + + +@pytest.fixture(scope="module") +def topology(request): + global installation1_prefix + if installation1_prefix: + args_instance[SER_DEPLOYED_DIR] = installation1_prefix + + # Creating standalone instance ... + standalone = DirSrv(verbose=False) + args_instance[SER_HOST] = HOST_STANDALONE + args_instance[SER_PORT] = PORT_STANDALONE + args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE + args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX + args_standalone = args_instance.copy() + standalone.allocate(args_standalone) + instance_standalone = standalone.exists() + if instance_standalone: + standalone.delete() + standalone.create() + standalone.open() + + # Clear out the tmp dir + standalone.clearTmpDir(__file__) + + return TopologyStandalone(standalone) + + +def test_passthru_init(topology): + ''' + Write any test suite initialization here(if needed) + ''' + + return + + +def test_passthru_(topology): + ''' + Write a single test here... + ''' + + return + + +def test_passthru_final(topology): + topology.standalone.delete() + log.info('passthru test suite PASSED') + + +def run_isolated(): + global installation1_prefix + installation1_prefix = None + + topo = topology(True) + test_passthru_init(topo) + test_passthru_(topo) + test_passthru_final(topo) + + +if __name__ == '__main__': + run_isolated() + diff --git a/dirsrvtests/tests/suites/password/password_test.py b/dirsrvtests/tests/suites/password/password_test.py new file mode 100644 index 0000000..3465c2c --- /dev/null +++ b/dirsrvtests/tests/suites/password/password_test.py @@ -0,0 +1,143 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2015 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import os +import sys +import time +import ldap +import logging +import pytest +from lib389 import DirSrv, Entry, tools, tasks +from lib389.tools import DirSrvTools +from lib389._constants import * +from lib389.properties import * +from lib389.tasks import * + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + +installation1_prefix = None + + +class TopologyStandalone(object): + def __init__(self, standalone): + standalone.open() + self.standalone = standalone + + +@pytest.fixture(scope="module") +def topology(request): + global installation1_prefix + if installation1_prefix: + args_instance[SER_DEPLOYED_DIR] = installation1_prefix + + # Creating standalone instance ... + standalone = DirSrv(verbose=False) + args_instance[SER_HOST] = HOST_STANDALONE + args_instance[SER_PORT] = PORT_STANDALONE + args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE + args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX + args_standalone = args_instance.copy() + standalone.allocate(args_standalone) + instance_standalone = standalone.exists() + if instance_standalone: + standalone.delete() + standalone.create() + standalone.open() + + # Clear out the tmp dir + standalone.clearTmpDir(__file__) + + return TopologyStandalone(standalone) + + +def test_password_init(topology): + ''' + Do init, if necessary + ''' + + return + + +def test_password_delete_specific_password(topology): + ''' + Delete a specific userpassword, and make sure it is actually deleted from the entry + ''' + + log.info('Running test_password_delete_specific_password...') + + USER_DN = 'uid=test_entry,' + DEFAULT_SUFFIX + + # + # Add a test user with a password + # + try: + topology.standalone.add_s(Entry((USER_DN, {'objectclass': "top extensibleObject".split(), + 'sn': '1', + 'cn': 'user 1', + 'uid': 'user1', + 'userpassword': PASSWORD}))) + except ldap.LDAPError as e: + log.fatal('test_password_delete_specific_password: Failed to add test user ' + + USER_DN + ': error ' + e.message['desc']) + assert False + + # + # Delete the exact password + # + try: + topology.standalone.modify_s(USER_DN, [(ldap.MOD_DELETE, 'userpassword', PASSWORD)]) + except ldap.LDAPError as e: + log.fatal('test_password_delete_specific_password: Failed to delete userpassword: error ' + + e.message['desc']) + assert False + + # + # Check the password is actually deleted + # + try: + entry = topology.standalone.search_s(USER_DN, ldap.SCOPE_BASE, 'objectclass=top') + if entry[0].hasAttr('userpassword'): + log.fatal('test_password_delete_specific_password: Entry incorrectly still have the userpassword attribute') + assert False + except ldap.LDAPError as e: + log.fatal('test_password_delete_specific_password: Failed to search for user(%s), error: %s' % + (USER_DN, e.message('desc'))) + assert False + + # + # Cleanup + # + try: + topology.standalone.delete_s(USER_DN) + except ldap.LDAPError as e: + log.fatal('test_password_delete_specific_password: Failed to delete user(%s), error: %s' % + (USER_DN, e.message('desc'))) + assert False + + log.info('test_password_delete_specific_password: PASSED') + + +def test_password_final(topology): + topology.standalone.delete() + log.info('Password test suite PASSED') + + +def run_isolated(): + global installation1_prefix + installation1_prefix = None + + topo = topology(True) + test_password_init(topo) + test_password_delete_specific_password(topo) + test_password_final(topo) + + +if __name__ == '__main__': + run_isolated() + diff --git a/dirsrvtests/tests/suites/password/pwdAdmin_test.py b/dirsrvtests/tests/suites/password/pwdAdmin_test.py new file mode 100644 index 0000000..2c38756 --- /dev/null +++ b/dirsrvtests/tests/suites/password/pwdAdmin_test.py @@ -0,0 +1,447 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2015 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import os +import sys +import time +import ldap +import logging +import pytest +from lib389 import DirSrv, Entry, tools, tasks +from lib389.tools import DirSrvTools +from lib389._constants import * +from lib389.properties import * +from lib389.tasks import * +from lib389.utils import * + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + +installation1_prefix = None +CONFIG_DN = 'cn=config' +ADMIN_NAME = 'passwd_admin' +ADMIN_DN = 'cn=%s,%s' % (ADMIN_NAME, SUFFIX) +ADMIN2_NAME = 'passwd_admin2' +ADMIN2_DN = 'cn=%s,%s' % (ADMIN2_NAME, SUFFIX) +ADMIN_PWD = 'adminPassword_1' +ADMIN_GROUP_DN = 'cn=password admin group,%s' % (SUFFIX) +ENTRY_NAME = 'Joe Schmo' +ENTRY_DN = 'cn=%s,%s' % (ENTRY_NAME, SUFFIX) +INVALID_PWDS = ('2_Short', 'No_Number', 'N0Special', '{SSHA}bBy8UdtPZwu8uZna9QOYG3Pr41RpIRVDl8wddw==') + + +class TopologyStandalone(object): + def __init__(self, standalone): + standalone.open() + self.standalone = standalone + + +@pytest.fixture(scope="module") +def topology(request): + global installation1_prefix + if installation1_prefix: + args_instance[SER_DEPLOYED_DIR] = installation1_prefix + + # Creating standalone instance ... + standalone = DirSrv(verbose=False) + args_instance[SER_HOST] = HOST_STANDALONE + args_instance[SER_PORT] = PORT_STANDALONE + args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE + args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX + args_standalone = args_instance.copy() + standalone.allocate(args_standalone) + instance_standalone = standalone.exists() + if instance_standalone: + standalone.delete() + standalone.create() + standalone.open() + + # Clear out the tmp dir + standalone.clearTmpDir(__file__) + + return TopologyStandalone(standalone) + + +def test_pwdAdmin_init(topology): + ''' + Create our future Password Admin entry, set the password policy, and test + that its working + ''' + + log.info('test_pwdAdmin_init: Creating Password Administator entries...') + + # Add Password Admin 1 + try: + topology.standalone.add_s(Entry((ADMIN_DN, {'objectclass': "top extensibleObject".split(), + 'cn': ADMIN_NAME, + 'userpassword': ADMIN_PWD}))) + except ldap.LDAPError as e: + log.fatal('test_pwdAdmin_init: Failed to add test user' + ADMIN_DN + ': error ' + e.message['desc']) + assert False + + # Add Password Admin 2 + try: + topology.standalone.add_s(Entry((ADMIN2_DN, {'objectclass': "top extensibleObject".split(), + 'cn': ADMIN2_NAME, + 'userpassword': ADMIN_PWD}))) + except ldap.LDAPError as e: + log.fatal('test_pwdAdmin_init: Failed to add test user ' + ADMIN2_DN + ': error ' + e.message['desc']) + assert False + + # Add Password Admin Group + try: + topology.standalone.add_s(Entry((ADMIN_GROUP_DN, {'objectclass': "top groupOfUNiqueNames".split(), + 'cn': 'password admin group', + 'uniquemember': ADMIN_DN, + 'uniquemember': ADMIN2_DN}))) + except ldap.LDAPError as e: + log.fatal('test_pwdAdmin_init: Failed to add group' + ADMIN_GROUP_DN + ': error ' + e.message['desc']) + assert False + + # Configure password policy + log.info('test_pwdAdmin_init: Configuring password policy...') + try: + topology.standalone.modify_s(CONFIG_DN, [(ldap.MOD_REPLACE, 'nsslapd-pwpolicy-local', 'on'), + (ldap.MOD_REPLACE, 'passwordCheckSyntax', 'on'), + (ldap.MOD_REPLACE, 'passwordMinCategories', '1'), + (ldap.MOD_REPLACE, 'passwordMinTokenLength', '1'), + (ldap.MOD_REPLACE, 'passwordExp', 'on'), + (ldap.MOD_REPLACE, 'passwordMinDigits', '1'), + (ldap.MOD_REPLACE, 'passwordMinSpecials', '1')]) + except ldap.LDAPError as e: + log.fatal('test_pwdAdmin_init: Failed configure password policy: ' + + e.message['desc']) + assert False + + # + # Add an aci to allow everyone all access (just makes things easier) + # + log.info('Add aci to allow password admin to add/update entries...') + + ACI_TARGET = "(target = \"ldap:///%s\")" % SUFFIX + ACI_TARGETATTR = "(targetattr = *)" + ACI_ALLOW = "(version 3.0; acl \"Password Admin Access\"; allow (all) " + ACI_SUBJECT = "(userdn = \"ldap:///anyone\");)" + ACI_BODY = ACI_TARGET + ACI_TARGETATTR + ACI_ALLOW + ACI_SUBJECT + mod = [(ldap.MOD_ADD, 'aci', ACI_BODY)] + try: + topology.standalone.modify_s(SUFFIX, mod) + except ldap.LDAPError as e: + log.fatal('test_pwdAdmin_init: Failed to add aci for password admin: ' + + e.message['desc']) + assert False + + # + # Bind as the future Password Admin + # + log.info('test_pwdAdmin_init: Bind as the Password Administator (before activating)...') + try: + topology.standalone.simple_bind_s(ADMIN_DN, ADMIN_PWD) + except ldap.LDAPError as e: + log.fatal('test_pwdAdmin_init: Failed to bind as the Password Admin: ' + + e.message['desc']) + assert False + + # + # Setup our test entry, and test password policy is working + # + entry = Entry(ENTRY_DN) + entry.setValues('objectclass', 'top', 'person') + entry.setValues('sn', ENTRY_NAME) + entry.setValues('cn', ENTRY_NAME) + + # + # Start by attempting to add an entry with an invalid password + # + log.info('test_pwdAdmin_init: Attempt to add entries with invalid passwords, these adds should fail...') + for passwd in INVALID_PWDS: + failed_as_expected = False + entry.setValues('userpassword', passwd) + log.info('test_pwdAdmin_init: Create a regular user entry %s with password (%s)...' % + (ENTRY_DN, passwd)) + try: + topology.standalone.add_s(entry) + except ldap.LDAPError as e: + # We failed as expected + failed_as_expected = True + log.info('test_pwdAdmin_init: Add failed as expected: password (%s) result (%s)' + % (passwd, e.message['desc'])) + + if not failed_as_expected: + log.fatal('test_pwdAdmin_init: We were incorrectly able to add an entry ' + + 'with an invalid password (%s)' % (passwd)) + assert False + + +def test_pwdAdmin(topology): + ''' + Test that password administrators/root DN can + bypass password syntax/policy. + + We need to test how passwords are modified in + existing entries, and when adding new entries. + + Create the Password Admin entry, but do not set + it as an admin yet. Use the entry to verify invalid + passwords are caught. Then activate the password + admin and make sure it can bypass password policy. + ''' + + # + # Now activate a password administator, bind as root dn to do the config + # update, then rebind as the password admin + # + log.info('test_pwdAdmin: Activate the Password Administator...') + + # + # Setup our test entry, and test password policy is working + # + entry = Entry(ENTRY_DN) + entry.setValues('objectclass', 'top', 'person') + entry.setValues('sn', ENTRY_NAME) + entry.setValues('cn', ENTRY_NAME) + + # Bind as Root DN + try: + topology.standalone.simple_bind_s(DN_DM, PASSWORD) + except ldap.LDAPError as e: + log.fatal('test_pwdAdmin: Root DN failed to authenticate: ' + + e.message['desc']) + assert False + + # Set the password admin + try: + topology.standalone.modify_s(CONFIG_DN, [(ldap.MOD_REPLACE, 'passwordAdminDN', ADMIN_DN)]) + except ldap.LDAPError as e: + log.fatal('test_pwdAdmin: Failed to add password admin to config: ' + + e.message['desc']) + assert False + + # Bind as Password Admin + try: + topology.standalone.simple_bind_s(ADMIN_DN, ADMIN_PWD) + except ldap.LDAPError as e: + log.fatal('test_pwdAdmin: Failed to bind as the Password Admin: ' + + e.message['desc']) + assert False + + # + # Start adding entries with invalid passwords, delete the entry after each pass. + # + for passwd in INVALID_PWDS: + entry.setValues('userpassword', passwd) + log.info('test_pwdAdmin: Create a regular user entry %s with password (%s)...' % + (ENTRY_DN, passwd)) + try: + topology.standalone.add_s(entry) + except ldap.LDAPError as e: + log.fatal('test_pwdAdmin: Failed to add entry with password (%s) result (%s)' + % (passwd, e.message['desc'])) + assert False + + log.info('test_pwdAdmin: Successfully added entry (%s)' % ENTRY_DN) + + # Delete entry for the next pass + try: + topology.standalone.delete_s(ENTRY_DN) + except ldap.LDAPError as e: + log.fatal('test_pwdAdmin: Failed to delete entry: %s' % + (e.message['desc'])) + assert False + + # + # Add the entry for the next round of testing (modify password) + # + entry.setValues('userpassword', ADMIN_PWD) + try: + topology.standalone.add_s(entry) + except ldap.LDAPError as e: + log.fatal('test_pwdAdmin: Failed to add entry with valid password (%s) result (%s)' % + (passwd, e.message['desc'])) + assert False + + # + # Deactivate the password admin and make sure invalid password updates fail + # + log.info('test_pwdAdmin: Deactivate Password Administator and ' + + 'try invalid password updates...') + + # Bind as root DN + try: + topology.standalone.simple_bind_s(DN_DM, PASSWORD) + except ldap.LDAPError as e: + log.fatal('test_pwdAdmin: Root DN failed to authenticate: ' + + e.message['desc']) + assert False + + # Remove password admin + try: + topology.standalone.modify_s(CONFIG_DN, [(ldap.MOD_DELETE, 'passwordAdminDN', None)]) + except ldap.LDAPError as e: + log.fatal('test_pwdAdmin: Failed to remove password admin from config: ' + + e.message['desc']) + assert False + + # Bind as Password Admin (who is no longer an admin) + try: + topology.standalone.simple_bind_s(ADMIN_DN, ADMIN_PWD) + except ldap.LDAPError as e: + log.fatal('test_pwdAdmin: Failed to bind as the Password Admin: ' + + e.message['desc']) + assert False + + # + # Make invalid password updates that should fail + # + for passwd in INVALID_PWDS: + failed_as_expected = False + entry.setValues('userpassword', passwd) + try: + topology.standalone.modify_s(ENTRY_DN, [(ldap.MOD_REPLACE, 'userpassword', passwd)]) + except ldap.LDAPError as e: + # We failed as expected + failed_as_expected = True + log.info('test_pwdAdmin: Password update failed as expected: password (%s) result (%s)' + % (passwd, e.message['desc'])) + + if not failed_as_expected: + log.fatal('test_pwdAdmin: We were incorrectly able to add an invalid password (%s)' + % (passwd)) + assert False + + # + # Now activate a password administator + # + log.info('test_pwdAdmin: Activate Password Administator and try updates again...') + + # Bind as root DN to make the update + try: + topology.standalone.simple_bind_s(DN_DM, PASSWORD) + except ldap.LDAPError as e: + log.fatal('test_pwdAdmin: Root DN failed to authenticate: ' + e.message['desc']) + assert False + + # Update config - set the password admin + try: + topology.standalone.modify_s(CONFIG_DN, [(ldap.MOD_REPLACE, 'passwordAdminDN', ADMIN_DN)]) + except ldap.LDAPError as e: + log.fatal('test_pwdAdmin: Failed to add password admin to config: ' + + e.message['desc']) + assert False + + # Bind as Password Admin + try: + topology.standalone.simple_bind_s(ADMIN_DN, ADMIN_PWD) + except ldap.LDAPError as e: + log.fatal('test_pwdAdmin: Failed to bind as the Password Admin: ' + + e.message['desc']) + assert False + + # + # Make the same password updates, but this time they should succeed + # + for passwd in INVALID_PWDS: + try: + topology.standalone.modify_s(ENTRY_DN, [(ldap.MOD_REPLACE, 'userpassword', passwd)]) + except ldap.LDAPError as e: + log.fatal('test_pwdAdmin: Password update failed unexpectedly: password (%s) result (%s)' + % (passwd, e.message['desc'])) + assert False + log.info('test_pwdAdmin: Password update succeeded (%s)' % passwd) + + # + # Test Password Admin Group + # + log.info('test_pwdAdmin: Testing password admin group...') + + # Bind as root DN to make the update + try: + topology.standalone.simple_bind_s(DN_DM, PASSWORD) + except ldap.LDAPError as e: + log.fatal('test_pwdAdmin: Root DN failed to authenticate: ' + e.message['desc']) + assert False + + # Update config - set the password admin group + try: + topology.standalone.modify_s(CONFIG_DN, [(ldap.MOD_REPLACE, 'passwordAdminDN', ADMIN_GROUP_DN)]) + except ldap.LDAPError as e: + log.fatal('test_pwdAdmin: Failed to add password admin to config: ' + + e.message['desc']) + assert False + + # Bind as admin2 + try: + topology.standalone.simple_bind_s(ADMIN2_DN, ADMIN_PWD) + except ldap.LDAPError as e: + log.fatal('test_pwdAdmin: Failed to bind as the Password Admin2: ' + + e.message['desc']) + assert False + + # Make some invalid password updates, but they should succeed + for passwd in INVALID_PWDS: + try: + topology.standalone.modify_s(ENTRY_DN, [(ldap.MOD_REPLACE, 'userpassword', passwd)]) + except ldap.LDAPError as e: + log.fatal('test_pwdAdmin: Password update failed unexpectedly: password (%s) result (%s)' + % (passwd, e.message['desc'])) + assert False + log.info('test_pwdAdmin: Password update succeeded (%s)' % passwd) + + # Cleanup - bind as Root DN for the other tests + try: + topology.standalone.simple_bind_s(DN_DM, PASSWORD) + except ldap.LDAPError as e: + log.fatal('test_pwdAdmin: Root DN failed to authenticate: ' + e.message['desc']) + assert False + + +def test_pwdAdmin_config_validation(topology): + ''' + Test config validation: + + - Test adding multiple passwordAdminDN attributes + - Test adding invalid values(non-DN's) + ''' + # Add mulitple attributes - one already eists so just try and add as second one + try: + topology.standalone.modify_s(CONFIG_DN, [(ldap.MOD_ADD, 'passwordAdminDN', ENTRY_DN)]) + log.fatal('test_pwdAdmin_config_validation: Incorrectly was able to add two config attributes') + assert False + except ldap.LDAPError as e: + log.info('test_pwdAdmin_config_validation: Failed as expected: ' + + e.message['desc']) + + # Attempt to set invalid DN + try: + topology.standalone.modify_s(CONFIG_DN, [(ldap.MOD_ADD, 'passwordAdminDN', 'ZZZZZ')]) + log.fatal('test_pwdAdmin_config_validation: Incorrectly was able to add invalid DN') + assert False + except ldap.LDAPError as e: + log.info('test_pwdAdmin_config_validation: Failed as expected: ' + + e.message['desc']) + + +def test_pwdAdmin_final(topology): + topology.standalone.delete() + log.info('pwdAdmin test suite PASSED') + + +def run_isolated(): + global installation1_prefix + installation1_prefix = None + + topo = topology(True) + test_pwdAdmin_init(topo) + test_pwdAdmin(topo) + test_pwdAdmin_config_validation(topo) + test_pwdAdmin_final(topo) + + +if __name__ == '__main__': + run_isolated() + diff --git a/dirsrvtests/tests/suites/password/pwdPolicy_test.py b/dirsrvtests/tests/suites/password/pwdPolicy_test.py new file mode 100644 index 0000000..9ceb62c --- /dev/null +++ b/dirsrvtests/tests/suites/password/pwdPolicy_test.py @@ -0,0 +1,82 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2015 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import os +import sys +import time +import ldap +import logging +import pytest +from lib389 import DirSrv, Entry, tools, tasks +from lib389.tools import DirSrvTools +from lib389._constants import * +from lib389.properties import * +from lib389.tasks import * + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + +installation1_prefix = None + + +class TopologyStandalone(object): + def __init__(self, standalone): + standalone.open() + self.standalone = standalone + + +@pytest.fixture(scope="module") +def topology(request): + global installation1_prefix + if installation1_prefix: + args_instance[SER_DEPLOYED_DIR] = installation1_prefix + + # Creating standalone instance ... + standalone = DirSrv(verbose=False) + args_instance[SER_HOST] = HOST_STANDALONE + args_instance[SER_PORT] = PORT_STANDALONE + args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE + args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX + args_standalone = args_instance.copy() + standalone.allocate(args_standalone) + instance_standalone = standalone.exists() + if instance_standalone: + standalone.delete() + standalone.create() + standalone.open() + + # Clear out the tmp dir + standalone.clearTmpDir(__file__) + + return TopologyStandalone(standalone) + + +def test_pwdPolicy_init(topology): + ''' + Init the test suite (if necessary) + ''' + return + + +def test_pwdPolicy_final(topology): + topology.standalone.delete() + log.info('Password Policy test suite PASSED') + + +def run_isolated(): + global installation1_prefix + installation1_prefix = None + + topo = topology(True) + test_pwdPolicy_init(topo) + test_pwdPolicy_final(topo) + + +if __name__ == '__main__': + run_isolated() + diff --git a/dirsrvtests/tests/suites/posix_winsync_plugin/posix_winsync_test.py b/dirsrvtests/tests/suites/posix_winsync_plugin/posix_winsync_test.py new file mode 100644 index 0000000..c50702b --- /dev/null +++ b/dirsrvtests/tests/suites/posix_winsync_plugin/posix_winsync_test.py @@ -0,0 +1,93 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2015 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import os +import sys +import time +import ldap +import logging +import pytest +from lib389 import DirSrv, Entry, tools, tasks +from lib389.tools import DirSrvTools +from lib389._constants import * +from lib389.properties import * +from lib389.tasks import * +from lib389.utils import * + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + +installation1_prefix = None + + +class TopologyStandalone(object): + def __init__(self, standalone): + standalone.open() + self.standalone = standalone + + +@pytest.fixture(scope="module") +def topology(request): + global installation1_prefix + if installation1_prefix: + args_instance[SER_DEPLOYED_DIR] = installation1_prefix + + # Creating standalone instance ... + standalone = DirSrv(verbose=False) + args_instance[SER_HOST] = HOST_STANDALONE + args_instance[SER_PORT] = PORT_STANDALONE + args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE + args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX + args_standalone = args_instance.copy() + standalone.allocate(args_standalone) + instance_standalone = standalone.exists() + if instance_standalone: + standalone.delete() + standalone.create() + standalone.open() + + # Clear out the tmp dir + standalone.clearTmpDir(__file__) + + return TopologyStandalone(standalone) + + +def test_posix_winsync_init(topology): + ''' + Write any test suite initialization here(if needed) + ''' + + return + + +def test_posix_winsync_(topology): + ''' + Write a single test here... + ''' + + return + + +def test_posix_winsync_final(topology): + topology.standalone.delete() + log.info('posix_winsync test suite PASSED') + + +def run_isolated(): + global installation1_prefix + installation1_prefix = None + + topo = topology(True) + test_posix_winsync_init(topo) + test_posix_winsync_(topo) + test_posix_winsync_final(topo) + + +if __name__ == '__main__': + run_isolated() + diff --git a/dirsrvtests/tests/suites/psearch/psearch_test.py b/dirsrvtests/tests/suites/psearch/psearch_test.py new file mode 100644 index 0000000..d68f06d --- /dev/null +++ b/dirsrvtests/tests/suites/psearch/psearch_test.py @@ -0,0 +1,93 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2015 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import os +import sys +import time +import ldap +import logging +import pytest +from lib389 import DirSrv, Entry, tools, tasks +from lib389.tools import DirSrvTools +from lib389._constants import * +from lib389.properties import * +from lib389.tasks import * +from lib389.utils import * + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + +installation1_prefix = None + + +class TopologyStandalone(object): + def __init__(self, standalone): + standalone.open() + self.standalone = standalone + + +@pytest.fixture(scope="module") +def topology(request): + global installation1_prefix + if installation1_prefix: + args_instance[SER_DEPLOYED_DIR] = installation1_prefix + + # Creating standalone instance ... + standalone = DirSrv(verbose=False) + args_instance[SER_HOST] = HOST_STANDALONE + args_instance[SER_PORT] = PORT_STANDALONE + args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE + args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX + args_standalone = args_instance.copy() + standalone.allocate(args_standalone) + instance_standalone = standalone.exists() + if instance_standalone: + standalone.delete() + standalone.create() + standalone.open() + + # Clear out the tmp dir + standalone.clearTmpDir(__file__) + + return TopologyStandalone(standalone) + + +def test_psearch_init(topology): + ''' + Write any test suite initialization here(if needed) + ''' + + return + + +def test_psearch_(topology): + ''' + Write a single test here... + ''' + + return + + +def test_psearch_final(topology): + topology.standalone.delete() + log.info('psearch test suite PASSED') + + +def run_isolated(): + global installation1_prefix + installation1_prefix = None + + topo = topology(True) + test_psearch_init(topo) + test_psearch_(topo) + test_psearch_final(topo) + + +if __name__ == '__main__': + run_isolated() + diff --git a/dirsrvtests/tests/suites/referint_plugin/referint_test.py b/dirsrvtests/tests/suites/referint_plugin/referint_test.py new file mode 100644 index 0000000..9a96ba6 --- /dev/null +++ b/dirsrvtests/tests/suites/referint_plugin/referint_test.py @@ -0,0 +1,93 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2015 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import os +import sys +import time +import ldap +import logging +import pytest +from lib389 import DirSrv, Entry, tools, tasks +from lib389.tools import DirSrvTools +from lib389._constants import * +from lib389.properties import * +from lib389.tasks import * +from lib389.utils import * + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + +installation1_prefix = None + + +class TopologyStandalone(object): + def __init__(self, standalone): + standalone.open() + self.standalone = standalone + + +@pytest.fixture(scope="module") +def topology(request): + global installation1_prefix + if installation1_prefix: + args_instance[SER_DEPLOYED_DIR] = installation1_prefix + + # Creating standalone instance ... + standalone = DirSrv(verbose=False) + args_instance[SER_HOST] = HOST_STANDALONE + args_instance[SER_PORT] = PORT_STANDALONE + args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE + args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX + args_standalone = args_instance.copy() + standalone.allocate(args_standalone) + instance_standalone = standalone.exists() + if instance_standalone: + standalone.delete() + standalone.create() + standalone.open() + + # Clear out the tmp dir + standalone.clearTmpDir(__file__) + + return TopologyStandalone(standalone) + + +def test_referint_init(topology): + ''' + Write any test suite initialization here(if needed) + ''' + + return + + +def test_referint_(topology): + ''' + Write a single test here... + ''' + + return + + +def test_referint_final(topology): + topology.standalone.delete() + log.info('referint test suite PASSED') + + +def run_isolated(): + global installation1_prefix + installation1_prefix = None + + topo = topology(True) + test_referint_init(topo) + test_referint_(topo) + test_referint_final(topo) + + +if __name__ == '__main__': + run_isolated() + diff --git a/dirsrvtests/tests/suites/replication/cleanallruv_test.py b/dirsrvtests/tests/suites/replication/cleanallruv_test.py new file mode 100644 index 0000000..373269d --- /dev/null +++ b/dirsrvtests/tests/suites/replication/cleanallruv_test.py @@ -0,0 +1,1494 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2015 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import os +import sys +import time +import ldap +import logging +import pytest +import threading +from lib389 import DirSrv, Entry, tools, tasks +from lib389.tools import DirSrvTools +from lib389._constants import * +from lib389.properties import * +from lib389.tasks import * +from lib389.utils import * +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + +installation1_prefix = None + + +class AddUsers(threading.Thread): + def __init__(self, inst, num_users): + threading.Thread.__init__(self) + self.daemon = True + self.inst = inst + self.num_users = num_users + + def openConnection(self, inst): + # Open a new connection to our LDAP server + server = DirSrv(verbose=False) + args_instance[SER_HOST] = inst.host + args_instance[SER_PORT] = inst.port + args_instance[SER_SERVERID_PROP] = inst.serverid + args_standalone = args_instance.copy() + server.allocate(args_standalone) + server.open() + return server + + def run(self): + # Start adding users + conn = self.openConnection(self.inst) + idx = 0 + + while idx < self.num_users: + USER_DN = 'uid=' + self.inst.serverid + '_' + str(idx) + ',' + DEFAULT_SUFFIX + try: + conn.add_s(Entry((USER_DN, {'objectclass': 'top extensibleObject'.split(), + 'uid': 'user' + str(idx)}))) + except ldap.UNWILLING_TO_PERFORM: + # One of the masters was probably put into read only mode - just break out + break + except ldap.LDAPError as e: + log.error('AddUsers: failed to add (' + USER_DN + ') error: ' + e.message['desc']) + assert False + idx += 1 + + conn.close() + + +class TopologyReplication(object): + def __init__(self, master1, master2, master3, master4, m1_m2_agmt, m1_m3_agmt, m1_m4_agmt): + master1.open() + self.master1 = master1 + master2.open() + self.master2 = master2 + master3.open() + self.master3 = master3 + master4.open() + self.master4 = master4 + + # Store the agreement dn's for future initializations + self.m1_m2_agmt = m1_m2_agmt + self.m1_m3_agmt = m1_m3_agmt + self.m1_m4_agmt = m1_m4_agmt + + +@pytest.fixture(scope="module") +def topology(request): + global installation1_prefix + if installation1_prefix: + args_instance[SER_DEPLOYED_DIR] = installation1_prefix + + # Creating master 1... + master1 = DirSrv(verbose=False) + args_instance[SER_HOST] = HOST_MASTER_1 + args_instance[SER_PORT] = PORT_MASTER_1 + args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1 + args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX + args_master = args_instance.copy() + master1.allocate(args_master) + instance_master1 = master1.exists() + if instance_master1: + master1.delete() + master1.create() + master1.open() + master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_1) + master1.log = log + + # Creating master 2... + master2 = DirSrv(verbose=False) + args_instance[SER_HOST] = HOST_MASTER_2 + args_instance[SER_PORT] = PORT_MASTER_2 + args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_2 + args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX + args_master = args_instance.copy() + master2.allocate(args_master) + instance_master2 = master2.exists() + if instance_master2: + master2.delete() + master2.create() + master2.open() + master2.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_2) + + # Creating master 3... + master3 = DirSrv(verbose=False) + args_instance[SER_HOST] = HOST_MASTER_3 + args_instance[SER_PORT] = PORT_MASTER_3 + args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_3 + args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX + args_master = args_instance.copy() + master3.allocate(args_master) + instance_master3 = master3.exists() + if instance_master3: + master3.delete() + master3.create() + master3.open() + master3.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_3) + + # Creating master 4... + master4 = DirSrv(verbose=False) + args_instance[SER_HOST] = HOST_MASTER_4 + args_instance[SER_PORT] = PORT_MASTER_4 + args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_4 + args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX + args_master = args_instance.copy() + master4.allocate(args_master) + instance_master4 = master4.exists() + if instance_master4: + master4.delete() + master4.create() + master4.open() + master4.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_4) + + # + # Create all the agreements + # + # Creating agreement from master 1 to master 2 + properties = {RA_NAME: r'meTo_$host:$port', + RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], + RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], + RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], + RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} + m1_m2_agmt = master1.agreement.create(suffix=SUFFIX, host=master2.host, port=master2.port, properties=properties) + if not m1_m2_agmt: + log.fatal("Fail to create a master -> master replica agreement") + sys.exit(1) + log.debug("%s created" % m1_m2_agmt) + + # Creating agreement from master 1 to master 3 + properties = {RA_NAME: r'meTo_$host:$port', + RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], + RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], + RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], + RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} + m1_m3_agmt = master1.agreement.create(suffix=SUFFIX, host=master3.host, port=master3.port, properties=properties) + if not m1_m3_agmt: + log.fatal("Fail to create a master -> master replica agreement") + sys.exit(1) + log.debug("%s created" % m1_m3_agmt) + + # Creating agreement from master 1 to master 4 + properties = {RA_NAME: r'meTo_$host:$port', + RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], + RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], + RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], + RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} + m1_m4_agmt = master1.agreement.create(suffix=SUFFIX, host=master4.host, port=master4.port, properties=properties) + if not m1_m4_agmt: + log.fatal("Fail to create a master -> master replica agreement") + sys.exit(1) + log.debug("%s created" % m1_m4_agmt) + + # Creating agreement from master 2 to master 1 + properties = {RA_NAME: r'meTo_$host:$port', + RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], + RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], + RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], + RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} + m2_m1_agmt = master2.agreement.create(suffix=SUFFIX, host=master1.host, port=master1.port, properties=properties) + if not m2_m1_agmt: + log.fatal("Fail to create a master -> master replica agreement") + sys.exit(1) + log.debug("%s created" % m2_m1_agmt) + + # Creating agreement from master 2 to master 3 + properties = {RA_NAME: r'meTo_$host:$port', + RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], + RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], + RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], + RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} + m2_m3_agmt = master2.agreement.create(suffix=SUFFIX, host=master3.host, port=master3.port, properties=properties) + if not m2_m3_agmt: + log.fatal("Fail to create a master -> master replica agreement") + sys.exit(1) + log.debug("%s created" % m2_m3_agmt) + + # Creating agreement from master 2 to master 4 + properties = {RA_NAME: r'meTo_$host:$port', + RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], + RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], + RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], + RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} + m2_m4_agmt = master2.agreement.create(suffix=SUFFIX, host=master4.host, port=master4.port, properties=properties) + if not m2_m4_agmt: + log.fatal("Fail to create a master -> master replica agreement") + sys.exit(1) + log.debug("%s created" % m2_m4_agmt) + + # Creating agreement from master 3 to master 1 + properties = {RA_NAME: r'meTo_$host:$port', + RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], + RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], + RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], + RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} + m3_m1_agmt = master3.agreement.create(suffix=SUFFIX, host=master1.host, port=master1.port, properties=properties) + if not m3_m1_agmt: + log.fatal("Fail to create a master -> master replica agreement") + sys.exit(1) + log.debug("%s created" % m3_m1_agmt) + + # Creating agreement from master 3 to master 2 + properties = {RA_NAME: r'meTo_$host:$port', + RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], + RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], + RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], + RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} + m3_m2_agmt = master3.agreement.create(suffix=SUFFIX, host=master2.host, port=master2.port, properties=properties) + if not m3_m2_agmt: + log.fatal("Fail to create a master -> master replica agreement") + sys.exit(1) + log.debug("%s created" % m3_m2_agmt) + + # Creating agreement from master 3 to master 4 + properties = {RA_NAME: r'meTo_$host:$port', + RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], + RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], + RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], + RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} + m3_m4_agmt = master3.agreement.create(suffix=SUFFIX, host=master4.host, port=master4.port, properties=properties) + if not m3_m4_agmt: + log.fatal("Fail to create a master -> master replica agreement") + sys.exit(1) + log.debug("%s created" % m3_m4_agmt) + + # Creating agreement from master 4 to master 1 + properties = {RA_NAME: r'meTo_$host:$port', + RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], + RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], + RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], + RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} + m4_m1_agmt = master4.agreement.create(suffix=SUFFIX, host=master1.host, port=master1.port, properties=properties) + if not m4_m1_agmt: + log.fatal("Fail to create a master -> master replica agreement") + sys.exit(1) + log.debug("%s created" % m4_m1_agmt) + + # Creating agreement from master 4 to master 2 + properties = {RA_NAME: r'meTo_$host:$port', + RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], + RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], + RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], + RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} + m4_m2_agmt = master4.agreement.create(suffix=SUFFIX, host=master2.host, port=master2.port, properties=properties) + if not m4_m2_agmt: + log.fatal("Fail to create a master -> master replica agreement") + sys.exit(1) + log.debug("%s created" % m4_m2_agmt) + + # Creating agreement from master 4 to master 3 + properties = {RA_NAME: r'meTo_$host:$port', + RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], + RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], + RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], + RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} + m4_m3_agmt = master4.agreement.create(suffix=SUFFIX, host=master3.host, port=master3.port, properties=properties) + if not m4_m3_agmt: + log.fatal("Fail to create a master -> master replica agreement") + sys.exit(1) + log.debug("%s created" % m4_m3_agmt) + + # Allow the replicas to get situated with the new agreements + time.sleep(5) + + # + # Initialize all the agreements + # + master1.agreement.init(SUFFIX, HOST_MASTER_2, PORT_MASTER_2) + master1.waitForReplInit(m1_m2_agmt) + master1.agreement.init(SUFFIX, HOST_MASTER_3, PORT_MASTER_3) + master1.waitForReplInit(m1_m3_agmt) + master1.agreement.init(SUFFIX, HOST_MASTER_4, PORT_MASTER_4) + master1.waitForReplInit(m1_m4_agmt) + + # Check replication is working... + if master1.testReplication(DEFAULT_SUFFIX, master2): + log.info('Replication is working.') + else: + log.fatal('Replication is not working.') + assert False + + # Clear out the tmp dir + master1.clearTmpDir(__file__) + + return TopologyReplication(master1, master2, master3, master4, m1_m2_agmt, m1_m3_agmt, m1_m4_agmt) + + +def restore_master4(topology): + ''' + In our tests will always be removing master 4, so we need a common + way to restore it for another test + ''' + + log.info('Restoring master 4...') + + # Enable replication on master 4 + topology.master4.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_4) + + # + # Create agreements from master 4 -> m1, m2 ,m3 + # + # Creating agreement from master 4 to master 1 + properties = {RA_NAME: r'meTo_$host:$port', + RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], + RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], + RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], + RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} + m4_m1_agmt = topology.master4.agreement.create(suffix=SUFFIX, host=topology.master1.host, + port=topology.master1.port, properties=properties) + if not m4_m1_agmt: + log.fatal("Fail to create a master -> master replica agreement") + sys.exit(1) + log.debug("%s created" % m4_m1_agmt) + + # Creating agreement from master 4 to master 2 + properties = {RA_NAME: r'meTo_$host:$port', + RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], + RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], + RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], + RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} + m4_m2_agmt = topology.master4.agreement.create(suffix=SUFFIX, host=topology.master2.host, + port=topology.master2.port, properties=properties) + if not m4_m2_agmt: + log.fatal("Fail to create a master -> master replica agreement") + sys.exit(1) + log.debug("%s created" % m4_m2_agmt) + + # Creating agreement from master 4 to master 3 + properties = {RA_NAME: r'meTo_$host:$port', + RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], + RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], + RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], + RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} + m4_m3_agmt = topology.master4.agreement.create(suffix=SUFFIX, host=topology.master3.host, + port=topology.master3.port, properties=properties) + if not m4_m3_agmt: + log.fatal("Fail to create a master -> master replica agreement") + sys.exit(1) + log.debug("%s created" % m4_m3_agmt) + + # + # Create agreements from m1, m2, m3 to master 4 + # + # Creating agreement from master 1 to master 4 + properties = {RA_NAME: r'meTo_$host:$port', + RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], + RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], + RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], + RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} + m1_m4_agmt = topology.master1.agreement.create(suffix=SUFFIX, host=topology.master4.host, + port=topology.master4.port, properties=properties) + if not m1_m4_agmt: + log.fatal("Fail to create a master -> master replica agreement") + sys.exit(1) + log.debug("%s created" % m1_m4_agmt) + + # Creating agreement from master 2 to master 4 + properties = {RA_NAME: r'meTo_$host:$port', + RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], + RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], + RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], + RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} + m2_m4_agmt = topology.master2.agreement.create(suffix=SUFFIX, host=topology.master4.host, + port=topology.master4.port, properties=properties) + if not m2_m4_agmt: + log.fatal("Fail to create a master -> master replica agreement") + sys.exit(1) + log.debug("%s created" % m2_m4_agmt) + + # Creating agreement from master 3 to master 4 + properties = {RA_NAME: r'meTo_$host:$port', + RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], + RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], + RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], + RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} + m3_m4_agmt = topology.master3.agreement.create(suffix=SUFFIX, host=topology.master4.host, + port=topology.master4.port, properties=properties) + if not m3_m4_agmt: + log.fatal("Fail to create a master -> master replica agreement") + sys.exit(1) + log.debug("%s created" % m3_m4_agmt) + + # + # Restart the other servers - this allows the rid(for master4) to be used again/valid + # + topology.master1.restart(timeout=30) + topology.master2.restart(timeout=30) + topology.master3.restart(timeout=30) + topology.master4.restart(timeout=30) + + # + # Initialize the agreements + # + topology.master1.agreement.init(SUFFIX, HOST_MASTER_2, PORT_MASTER_2) + topology.master1.waitForReplInit(topology.m1_m2_agmt) + topology.master1.agreement.init(SUFFIX, HOST_MASTER_3, PORT_MASTER_3) + topology.master1.waitForReplInit(topology.m1_m3_agmt) + topology.master1.agreement.init(SUFFIX, HOST_MASTER_4, PORT_MASTER_4) + topology.master1.waitForReplInit(topology.m1_m4_agmt) + + # + # Test Replication is working + # + # Check replication is working with previous working master(m1 -> m2) + if topology.master1.testReplication(DEFAULT_SUFFIX, topology.master2): + log.info('Replication is working m1 -> m2.') + else: + log.fatal('restore_master4: Replication is not working from m1 -> m2.') + assert False + + # Check replication is working from master 1 to master 4... + if topology.master1.testReplication(DEFAULT_SUFFIX, topology.master4): + log.info('Replication is working m1 -> m4.') + else: + log.fatal('restore_master4: Replication is not working from m1 -> m4.') + assert False + + # Check replication is working from master 4 to master1... + if topology.master4.testReplication(DEFAULT_SUFFIX, topology.master1): + log.info('Replication is working m4 -> m1.') + else: + log.fatal('restore_master4: Replication is not working from m4 -> 1.') + assert False + + log.info('Master 4 has been successfully restored.') + + +def test_cleanallruv_init(topology): + ''' + Make updates on each master to make sure we have the all master RUVs on + each master. + ''' + + log.info('Initializing cleanAllRUV test suite...') + + # Master 1 + if not topology.master1.testReplication(DEFAULT_SUFFIX, topology.master2): + log.fatal('test_cleanallruv_init: Replication is not working between master 1 and master 2.') + assert False + + if not topology.master1.testReplication(DEFAULT_SUFFIX, topology.master3): + log.fatal('test_cleanallruv_init: Replication is not working between master 1 and master 3.') + assert False + + if not topology.master1.testReplication(DEFAULT_SUFFIX, topology.master4): + log.fatal('test_cleanallruv_init: Replication is not working between master 1 and master 4.') + assert False + + # Master 2 + if not topology.master2.testReplication(DEFAULT_SUFFIX, topology.master1): + log.fatal('test_cleanallruv_init: Replication is not working between master 2 and master 1.') + assert False + + if not topology.master2.testReplication(DEFAULT_SUFFIX, topology.master3): + log.fatal('test_cleanallruv_init: Replication is not working between master 2 and master 3.') + assert False + + if not topology.master2.testReplication(DEFAULT_SUFFIX, topology.master4): + log.fatal('test_cleanallruv_init: Replication is not working between master 2 and master 4.') + assert False + + # Master 3 + if not topology.master3.testReplication(DEFAULT_SUFFIX, topology.master1): + log.fatal('test_cleanallruv_init: Replication is not working between master 2 and master 1.') + assert False + + if not topology.master3.testReplication(DEFAULT_SUFFIX, topology.master2): + log.fatal('test_cleanallruv_init: Replication is not working between master 2 and master 2.') + assert False + + if not topology.master3.testReplication(DEFAULT_SUFFIX, topology.master4): + log.fatal('test_cleanallruv_init: Replication is not working between master 2 and master 4.') + assert False + + # Master 4 + if not topology.master4.testReplication(DEFAULT_SUFFIX, topology.master1): + log.fatal('test_cleanallruv_init: Replication is not working between master 2 and master 1.') + assert False + + if not topology.master4.testReplication(DEFAULT_SUFFIX, topology.master2): + log.fatal('test_cleanallruv_init: Replication is not working between master 2 and master 2.') + assert False + + if not topology.master4.testReplication(DEFAULT_SUFFIX, topology.master3): + log.fatal('test_cleanallruv_init: Replication is not working between master 2 and master 3.') + assert False + + log.info('Initialized cleanAllRUV test suite.') + + +def test_cleanallruv_clean(topology): + ''' + Disable a master, remove agreements to that master, and clean the RUVs on + the remaining replicas + ''' + + log.info('Running test_cleanallruv_clean...') + + # Disable master 4 + log.info('test_cleanallruv_clean: disable master 4...') + try: + topology.master4.replica.disableReplication(DEFAULT_SUFFIX) + except: + log.fatal('error!') + assert False + + # Remove the agreements from the other masters that point to master 4 + log.info('test_cleanallruv_clean: remove all the agreements to master 4...') + try: + topology.master1.agreement.delete(DEFAULT_SUFFIX, topology.master4) + except ldap.LDAPError as e: + log.fatal('test_cleanallruv_clean: Failed to delete agmt(m1 -> m4), error: ' + + e.message['desc']) + assert False + try: + topology.master2.agreement.delete(DEFAULT_SUFFIX, topology.master4) + except ldap.LDAPError as e: + log.fatal('test_cleanallruv_clean: Failed to delete agmt(m2 -> m4), error: ' + + e.message['desc']) + assert False + try: + topology.master3.agreement.delete(DEFAULT_SUFFIX, topology.master4) + except ldap.LDAPError as e: + log.fatal('test_cleanallruv_clean: Failed to delete agmt(m3 -> m4), error: ' + + e.message['desc']) + assert False + + # Run the task + log.info('test_cleanallruv_clean: run the cleanAllRUV task...') + try: + topology.master1.tasks.cleanAllRUV(suffix=DEFAULT_SUFFIX, replicaid='4', + args={TASK_WAIT: True}) + except ValueError as e: + log.fatal('test_cleanallruv_clean: Problem running cleanAllRuv task: ' + + e.message('desc')) + assert False + + # Check the other master's RUV for 'replica 4' + log.info('test_cleanallruv_clean: check all the masters have been cleaned...') + clean = False + count = 0 + while not clean and count < 5: + clean = True + + # Check master 1 + try: + entry = topology.master1.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, REPLICA_RUV_FILTER) + if not entry: + log.error('test_cleanallruv_clean: Failed to find db tombstone entry from master') + repl_fail(replica_inst) + elements = entry[0].getValues('nsds50ruv') + for ruv in elements: + if 'replica 4' in ruv: + # Not cleaned + log.error('test_cleanallruv_clean: Master 1 not cleaned!') + clean = False + if clean: + log.info('test_cleanallruv_clean: Master 1 is cleaned.') + except ldap.LDAPError as e: + log.fatal('test_cleanallruv_clean: Unable to search master 1 for db tombstone: ' + e.message['desc']) + + # Check master 2 + try: + entry = topology.master2.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, REPLICA_RUV_FILTER) + if not entry: + log.error('test_cleanallruv_clean: Failed to find db tombstone entry from master') + repl_fail(replica_inst) + elements = entry[0].getValues('nsds50ruv') + for ruv in elements: + if 'replica 4' in ruv: + # Not cleaned + log.error('test_cleanallruv_clean: Master 2 not cleaned!') + clean = False + if clean: + log.info('test_cleanallruv_clean: Master 2 is cleaned.') + except ldap.LDAPError as e: + log.fatal('Unable to search master 2 for db tombstone: ' + e.message['desc']) + + # Check master 3 + try: + entry = topology.master3.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, REPLICA_RUV_FILTER) + if not entry: + log.error('test_cleanallruv_clean: Failed to find db tombstone entry from master') + repl_fail(replica_inst) + elements = entry[0].getValues('nsds50ruv') + for ruv in elements: + if 'replica 4' in ruv: + # Not cleaned + log.error('test_cleanallruv_clean: Master 3 not cleaned!') + clean = False + if clean: + log.info('test_cleanallruv_clean: Master 3 is cleaned.') + except ldap.LDAPError as e: + log.fatal('test_cleanallruv_clean: Unable to search master 3 for db tombstone: ' + e.message['desc']) + + # Sleep a bit and give it chance to clean up... + time.sleep(5) + count += 1 + + if not clean: + log.fatal('test_cleanallruv_clean: Failed to clean replicas') + assert False + + log.info('Allow cleanallruv threads to finish...') + time.sleep(30) + + log.info('test_cleanallruv_clean PASSED, restoring master 4...') + + # + # Cleanup - restore master 4 + # + restore_master4(topology) + + +def test_cleanallruv_clean_restart(topology): + ''' + Test that if a master istopped during the clean process, that it + resumes and finishes when its started. + ''' + + log.info('Running test_cleanallruv_clean_restart...') + + # Disable master 4 + log.info('test_cleanallruv_clean_restart: disable master 4...') + try: + topology.master4.replica.disableReplication(DEFAULT_SUFFIX) + except: + log.fatal('error!') + assert False + + # Remove the agreements from the other masters that point to master 4 + log.info('test_cleanallruv_clean: remove all the agreements to master 4...') + try: + topology.master1.agreement.delete(DEFAULT_SUFFIX, topology.master4) + except ldap.LDAPError as e: + log.fatal('test_cleanallruv_clean_restart: Failed to delete agmt(m1 -> m4), error: ' + + e.message['desc']) + assert False + try: + topology.master2.agreement.delete(DEFAULT_SUFFIX, topology.master4) + except ldap.LDAPError as e: + log.fatal('test_cleanallruv_clean_restart: Failed to delete agmt(m2 -> m4), error: ' + + e.message['desc']) + assert False + try: + topology.master3.agreement.delete(DEFAULT_SUFFIX, topology.master4) + except ldap.LDAPError as e: + log.fatal('test_cleanallruv_clean_restart: Failed to delete agmt(m3 -> m4), error: ' + + e.message['desc']) + assert False + + # Stop master 3 to keep the task running, so we can stop master 1... + topology.master3.stop(timeout=30) + + # Run the task + log.info('test_cleanallruv_clean_restart: run the cleanAllRUV task...') + try: + topology.master1.tasks.cleanAllRUV(suffix=DEFAULT_SUFFIX, replicaid='4', + args={TASK_WAIT: False}) + except ValueError as e: + log.fatal('test_cleanallruv_clean_restart: Problem running cleanAllRuv task: ' + + e.message('desc')) + assert False + + # Sleep a bit, then stop master 1 + time.sleep(3) + topology.master1.stop(timeout=30) + + # Now start master 3 & 1, and make sure we didn't crash + topology.master3.start(timeout=30) + if topology.master3.detectDisorderlyShutdown(): + log.fatal('test_cleanallruv_clean_restart: Master 3 previously crashed!') + assert False + + topology.master1.start(timeout=30) + if topology.master1.detectDisorderlyShutdown(): + log.fatal('test_cleanallruv_clean_restart: Master 1 previously crashed!') + assert False + + # Wait a little for agmts/cleanallruv to wake up + time.sleep(5) + + # Check the other master's RUV for 'replica 4' + log.info('test_cleanallruv_clean_restart: check all the masters have been cleaned...') + clean = False + count = 0 + while not clean and count < 10: + clean = True + + # Check master 1 + try: + entry = topology.master1.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, REPLICA_RUV_FILTER) + if not entry: + log.error('test_cleanallruv_clean_restart: Failed to find db tombstone entry from master') + repl_fail(replica_inst) + elements = entry[0].getValues('nsds50ruv') + for ruv in elements: + if 'replica 4' in ruv: + # Not cleaned + log.error('test_cleanallruv_clean_restart: Master 1 not cleaned!') + clean = False + if clean: + log.info('test_cleanallruv_clean_restart: Master 1 is cleaned.') + except ldap.LDAPError as e: + log.fatal('test_cleanallruv_clean_restart: Unable to search master 1 for db tombstone: ' + + e.message['desc']) + + # Check master 2 + try: + entry = topology.master2.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, REPLICA_RUV_FILTER) + if not entry: + log.error('test_cleanallruv_clean_restart: Failed to find db tombstone entry from master') + repl_fail(replica_inst) + elements = entry[0].getValues('nsds50ruv') + for ruv in elements: + if 'replica 4' in ruv: + # Not cleaned + log.error('test_cleanallruv_clean_restart: Master 2 not cleaned!') + clean = False + if clean: + log.info('test_cleanallruv_clean_restart: Master 2 is cleaned.') + except ldap.LDAPError as e: + log.fatal('test_cleanallruv_clean_restart: Unable to search master 2 for db tombstone: ' + + e.message['desc']) + + # Check master 3 + try: + entry = topology.master3.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, REPLICA_RUV_FILTER) + if not entry: + log.error('test_cleanallruv_clean_restart: Failed to find db tombstone entry from master') + repl_fail(replica_inst) + elements = entry[0].getValues('nsds50ruv') + for ruv in elements: + if 'replica 4' in ruv: + # Not cleaned + log.error('test_cleanallruv_clean_restart: Master 3 not cleaned!') + clean = False + if clean: + log.info('test_cleanallruv_clean_restart: Master 3 is cleaned.') + except ldap.LDAPError as e: + log.fatal('test_cleanallruv_clean_restart: Unable to search master 3 for db tombstone: ' + + e.message['desc']) + + # Sleep a bit and give it chance to clean up... + time.sleep(5) + count += 1 + + if not clean: + log.fatal('Failed to clean replicas') + assert False + + log.info('Allow cleanallruv threads to finish...') + time.sleep(30) + + log.info('test_cleanallruv_clean_restart PASSED, restoring master 4...') + + # + # Cleanup - restore master 4 + # + restore_master4(topology) + + +def test_cleanallruv_clean_force(topology): + ''' + Disable a master, remove agreements to that master, and clean the RUVs on + the remaining replicas + ''' + + log.info('Running test_cleanallruv_clean_force...') + + # Stop master 3, while we update master 4, so that 3 is behind the other masters + topology.master3.stop(timeout=10) + + # Add a bunch of updates to master 4 + m4_add_users = AddUsers(topology.master4, 1500) + m4_add_users.start() + m4_add_users.join() + + # Disable master 4 + log.info('test_cleanallruv_clean_force: disable master 4...') + try: + topology.master4.replica.disableReplication(DEFAULT_SUFFIX) + except: + log.fatal('error!') + assert False + + # Start master 3, it should be out of sync with the other replicas... + topology.master3.start(timeout=10) + + # Remove the agreements from the other masters that point to master 4 + log.info('test_cleanallruv_clean_force: remove all the agreements to master 4...') + try: + topology.master1.agreement.delete(DEFAULT_SUFFIX, topology.master4) + except ldap.LDAPError as e: + log.fatal('test_cleanallruv_clean_force: Failed to delete agmt(m1 -> m4), error: ' + + e.message['desc']) + assert False + try: + topology.master2.agreement.delete(DEFAULT_SUFFIX, topology.master4) + except ldap.LDAPError as e: + log.fatal('test_cleanallruv_clean_force: Failed to delete agmt(m2 -> m4), error: ' + + e.message['desc']) + assert False + try: + topology.master3.agreement.delete(DEFAULT_SUFFIX, topology.master4) + except ldap.LDAPError as e: + log.fatal('test_cleanallruv_clean_force: Failed to delete agmt(m3 -> m4), error: ' + + e.message['desc']) + assert False + + # Run the task, use "force" because master 3 is not in sync with the other replicas + # in regards to the replica 4 RUV + log.info('test_cleanallruv_clean_force: run the cleanAllRUV task...') + try: + topology.master1.tasks.cleanAllRUV(suffix=DEFAULT_SUFFIX, replicaid='4', + force=True, args={TASK_WAIT: True}) + except ValueError as e: + log.fatal('test_cleanallruv_clean_force: Problem running cleanAllRuv task: ' + + e.message('desc')) + assert False + + # Check the other master's RUV for 'replica 4' + log.info('test_cleanallruv_clean_force: check all the masters have been cleaned...') + clean = False + count = 0 + while not clean and count < 5: + clean = True + + # Check master 1 + try: + entry = topology.master1.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, REPLICA_RUV_FILTER) + if not entry: + log.error('test_cleanallruv_clean_force: Failed to find db tombstone entry from master') + repl_fail(replica_inst) + elements = entry[0].getValues('nsds50ruv') + for ruv in elements: + if 'replica 4' in ruv: + # Not cleaned + log.error('test_cleanallruv_clean_force: Master 1 not cleaned!') + clean = False + if clean: + log.info('test_cleanallruv_clean_force: Master 1 is cleaned.') + except ldap.LDAPError as e: + log.fatal('test_cleanallruv_clean_force: Unable to search master 1 for db tombstone: ' + + e.message['desc']) + + # Check master 2 + try: + entry = topology.master2.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, REPLICA_RUV_FILTER) + if not entry: + log.error('test_cleanallruv_clean_force: Failed to find db tombstone entry from master') + repl_fail(replica_inst) + elements = entry[0].getValues('nsds50ruv') + for ruv in elements: + if 'replica 4' in ruv: + # Not cleaned + log.error('test_cleanallruv_clean_force: Master 1 not cleaned!') + clean = False + if clean: + log.info('Master 2 is cleaned.') + except ldap.LDAPError as e: + log.fatal('test_cleanallruv_clean_force: Unable to search master 2 for db tombstone: ' + + e.message['desc']) + + # Check master 3 + try: + entry = topology.master3.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, REPLICA_RUV_FILTER) + if not entry: + log.error('test_cleanallruv_clean_force: Failed to find db tombstone entry from master') + repl_fail(replica_inst) + elements = entry[0].getValues('nsds50ruv') + for ruv in elements: + if 'replica 4' in ruv: + # Not cleaned + log.error('test_cleanallruv_clean_force: Master 3 not cleaned!') + clean = False + if clean: + log.info('test_cleanallruv_clean_force: Master 3 is cleaned.') + except ldap.LDAPError as e: + log.fatal('test_cleanallruv_clean_force: Unable to search master 3 for db tombstone: ' + + e.message['desc']) + + # Sleep a bit and give it chance to clean up... + time.sleep(5) + count += 1 + + if not clean: + log.fatal('test_cleanallruv_clean_force: Failed to clean replicas') + assert False + + log.info('test_cleanallruv_clean_force: Allow cleanallruv threads to finish') + time.sleep(30) + + log.info('test_cleanallruv_clean_force PASSED, restoring master 4...') + + # + # Cleanup - restore master 4 + # + restore_master4(topology) + + +def test_cleanallruv_abort(topology): + ''' + Test the abort task. + + DIsable master 4 + Stop master 2 so that it can not be cleaned + Run the clean task + Wait a bit + Abort the task + Verify task is aborted + ''' + + log.info('Running test_cleanallruv_abort...') + + # Disable master 4 + log.info('test_cleanallruv_abort: disable replication on master 4...') + try: + topology.master4.replica.disableReplication(DEFAULT_SUFFIX) + except: + log.fatal('test_cleanallruv_abort: failed to disable replication') + assert False + + # Remove the agreements from the other masters that point to master 4 + log.info('test_cleanallruv_abort: remove all the agreements to master 4...)') + try: + topology.master1.agreement.delete(DEFAULT_SUFFIX, topology.master4) + except ldap.LDAPError as e: + log.fatal('test_cleanallruv_abort: Failed to delete agmt(m1 -> m4), error: ' + + e.message['desc']) + assert False + try: + topology.master2.agreement.delete(DEFAULT_SUFFIX, topology.master4) + except ldap.LDAPError as e: + log.fatal('test_cleanallruv_abort: Failed to delete agmt(m2 -> m4), error: ' + + e.message['desc']) + assert False + try: + topology.master3.agreement.delete(DEFAULT_SUFFIX, topology.master4) + except ldap.LDAPError as e: + log.fatal('test_cleanallruv_abort: Failed to delete agmt(m3 -> m4), error: ' + + e.message['desc']) + assert False + + # Stop master 2 + log.info('test_cleanallruv_abort: stop master 2 to freeze the cleanAllRUV task...') + topology.master2.stop(timeout=10) + + # Run the task + log.info('test_cleanallruv_abort: add the cleanAllRUV task...') + try: + (clean_task_dn, rc) = topology.master1.tasks.cleanAllRUV(suffix=DEFAULT_SUFFIX, + replicaid='4', args={TASK_WAIT: False}) + except ValueError as e: + log.fatal('test_cleanallruv_abort: Problem running cleanAllRuv task: ' + + e.message('desc')) + assert False + + # Wait a bit + time.sleep(10) + + # Abort the task + log.info('test_cleanallruv_abort: abort the cleanAllRUV task...') + try: + topology.master1.tasks.abortCleanAllRUV(suffix=DEFAULT_SUFFIX, replicaid='4', + args={TASK_WAIT: True}) + except ValueError as e: + log.fatal('test_cleanallruv_abort: Problem running abortCleanAllRuv task: ' + + e.message('desc')) + assert False + + # Check master 1 does not have the clean task running + log.info('test_cleanallruv_abort: check master 1 no longer has a cleanAllRUV task...') + attrlist = ['nsTaskLog', 'nsTaskStatus', 'nsTaskExitCode', + 'nsTaskCurrentItem', 'nsTaskTotalItems'] + done = False + count = 0 + while not done and count < 5: + entry = topology.master1.getEntry(clean_task_dn, attrlist=attrlist) + if not entry or entry.nsTaskExitCode: + done = True + break + time.sleep(1) + count += 1 + if not done: + log.fatal('test_cleanallruv_abort: CleanAllRUV task was not aborted') + assert False + + # Start master 2 + log.info('test_cleanallruv_abort: start master 2 to begin the restore process...') + topology.master2.start(timeout=10) + + # + # Now run the clean task task again to we can properly restore master 4 + # + log.info('test_cleanallruv_abort: run cleanAllRUV task so we can properly restore master 4...') + try: + topology.master1.tasks.cleanAllRUV(suffix=DEFAULT_SUFFIX, + replicaid='4', args={TASK_WAIT: True}) + except ValueError as e: + log.fatal('test_cleanallruv_abort: Problem running cleanAllRuv task: ' + e.message('desc')) + assert False + + log.info('test_cleanallruv_abort PASSED, restoring master 4...') + + # + # Cleanup - Restore master 4 + # + restore_master4(topology) + + +def test_cleanallruv_abort_restart(topology): + ''' + Test the abort task can handle a restart, and then resume + ''' + + log.info('Running test_cleanallruv_abort_restart...') + + # Disable master 4 + log.info('test_cleanallruv_abort_restart: disable replication on master 4...') + try: + topology.master4.replica.disableReplication(DEFAULT_SUFFIX) + except: + log.fatal('error!') + assert False + + # Remove the agreements from the other masters that point to master 4 + log.info('test_cleanallruv_abort_restart: remove all the agreements to master 4...)') + try: + topology.master1.agreement.delete(DEFAULT_SUFFIX, topology.master4) + except ldap.LDAPError as e: + log.fatal('test_cleanallruv_abort_restart: Failed to delete agmt(m1 -> m4), error: ' + + e.message['desc']) + assert False + try: + topology.master2.agreement.delete(DEFAULT_SUFFIX, topology.master4) + except ldap.LDAPError as e: + log.fatal('test_cleanallruv_abort_restart: Failed to delete agmt(m2 -> m4), error: ' + + e.message['desc']) + assert False + try: + topology.master3.agreement.delete(DEFAULT_SUFFIX, topology.master4) + except ldap.LDAPError as e: + log.fatal('test_cleanallruv_abort_restart: Failed to delete agmt(m3 -> m4), error: ' + + e.message['desc']) + assert False + + # Stop master 3 + log.info('test_cleanallruv_abort_restart: stop master 3 to freeze the cleanAllRUV task...') + topology.master3.stop(timeout=10) + + # Run the task + log.info('test_cleanallruv_abort_restart: add the cleanAllRUV task...') + try: + (clean_task_dn, rc) = topology.master1.tasks.cleanAllRUV(suffix=DEFAULT_SUFFIX, + replicaid='4', args={TASK_WAIT: False}) + except ValueError as e: + log.fatal('test_cleanallruv_abort_restart: Problem running cleanAllRuv task: ' + + e.message('desc')) + assert False + + # Wait a bit + time.sleep(5) + + # Abort the task + log.info('test_cleanallruv_abort_restart: abort the cleanAllRUV task...') + try: + topology.master1.tasks.abortCleanAllRUV(suffix=DEFAULT_SUFFIX, replicaid='4', + certify=True, args={TASK_WAIT: False}) + except ValueError as e: + log.fatal('test_cleanallruv_abort_restart: Problem running test_cleanallruv_abort_restart task: ' + + e.message('desc')) + assert False + + # Allow task to run for a bit: + time.sleep(5) + + # Check master 1 does not have the clean task running + log.info('test_cleanallruv_abort: check master 1 no longer has a cleanAllRUV task...') + attrlist = ['nsTaskLog', 'nsTaskStatus', 'nsTaskExitCode', + 'nsTaskCurrentItem', 'nsTaskTotalItems'] + done = False + count = 0 + while not done and count < 10: + entry = topology.master1.getEntry(clean_task_dn, attrlist=attrlist) + if not entry or entry.nsTaskExitCode: + done = True + break + time.sleep(1) + count += 1 + if not done: + log.fatal('test_cleanallruv_abort_restart: CleanAllRUV task was not aborted') + assert False + + # Now restart master 1, and make sure the abort process completes + topology.master1.restart(timeout=30) + if topology.master1.detectDisorderlyShutdown(): + log.fatal('test_cleanallruv_abort_restart: Master 1 previously crashed!') + assert False + + # Start master 3 + topology.master3.start(timeout=10) + + # Check master 1 tried to run abort task. We expect the abort task to be aborted. + if not topology.master1.searchErrorsLog('Aborting abort task'): + log.fatal('test_cleanallruv_abort_restart: Abort task did not restart') + assert False + + # + # Now run the clean task task again to we can properly restore master 4 + # + log.info('test_cleanallruv_abort_restart: run cleanAllRUV task so we can properly restore master 4...') + try: + topology.master1.tasks.cleanAllRUV(suffix=DEFAULT_SUFFIX, + replicaid='4', args={TASK_WAIT: True}) + except ValueError as e: + log.fatal('test_cleanallruv_abort_restart: Problem running cleanAllRuv task: ' + + e.message('desc')) + assert False + + log.info('test_cleanallruv_abort_restart PASSED, restoring master 4...') + + # + # Cleanup - Restore master 4 + # + restore_master4(topology) + + +def test_cleanallruv_abort_certify(topology): + ''' + Test the abort task. + + Disable master 4 + Stop master 2 so that it can not be cleaned + Run the clean task + Wait a bit + Abort the task + Verify task is aborted + ''' + + log.info('Running test_cleanallruv_abort_certify...') + + # Disable master 4 + log.info('test_cleanallruv_abort_certify: disable replication on master 4...') + try: + topology.master4.replica.disableReplication(DEFAULT_SUFFIX) + except: + log.fatal('error!') + assert False + + # Remove the agreements from the other masters that point to master 4 + log.info('test_cleanallruv_abort_certify: remove all the agreements to master 4...)') + try: + topology.master1.agreement.delete(DEFAULT_SUFFIX, topology.master4) + except ldap.LDAPError as e: + log.fatal('test_cleanallruv_abort_certify: Failed to delete agmt(m1 -> m4), error: ' + + e.message['desc']) + assert False + try: + topology.master2.agreement.delete(DEFAULT_SUFFIX, topology.master4) + except ldap.LDAPError as e: + log.fatal('test_cleanallruv_abort_certify: Failed to delete agmt(m2 -> m4), error: ' + + e.message['desc']) + assert False + try: + topology.master3.agreement.delete(DEFAULT_SUFFIX, topology.master4) + except ldap.LDAPError as e: + log.fatal('test_cleanallruv_abort_certify: Failed to delete agmt(m3 -> m4), error: ' + + e.message['desc']) + assert False + + # Stop master 2 + log.info('test_cleanallruv_abort_certify: stop master 2 to freeze the cleanAllRUV task...') + topology.master2.stop(timeout=10) + + # Run the task + log.info('test_cleanallruv_abort_certify: add the cleanAllRUV task...') + try: + (clean_task_dn, rc) = topology.master1.tasks.cleanAllRUV(suffix=DEFAULT_SUFFIX, + replicaid='4', args={TASK_WAIT: False}) + except ValueError as e: + log.fatal('test_cleanallruv_abort_certify: Problem running cleanAllRuv task: ' + + e.message('desc')) + assert False + + # Abort the task + log.info('test_cleanallruv_abort_certify: abort the cleanAllRUV task...') + try: + (abort_task_dn, rc) = topology.master1.tasks.abortCleanAllRUV(suffix=DEFAULT_SUFFIX, + replicaid='4', certify=True, args={TASK_WAIT: False}) + except ValueError as e: + log.fatal('test_cleanallruv_abort_certify: Problem running abortCleanAllRuv task: ' + + e.message('desc')) + assert False + + # Wait a while and make sure the abort task is still running + log.info('test_cleanallruv_abort_certify: sleep for 10 seconds') + time.sleep(10) + + attrlist = ['nsTaskLog', 'nsTaskStatus', 'nsTaskExitCode', + 'nsTaskCurrentItem', 'nsTaskTotalItems'] + entry = topology.master1.getEntry(abort_task_dn, attrlist=attrlist) + if not entry or entry.nsTaskExitCode: + log.fatal('test_cleanallruv_abort_certify: abort task incorrectly finished') + assert False + + # Now start master 2 so it can be aborted + log.info('test_cleanallruv_abort_certify: start master 2 to allow the abort task to finish...') + topology.master2.start(timeout=10) + + # Wait for the abort task to stop + done = False + count = 0 + while not done and count < 60: + entry = topology.master1.getEntry(abort_task_dn, attrlist=attrlist) + if not entry or entry.nsTaskExitCode: + done = True + break + time.sleep(1) + count += 1 + if not done: + log.fatal('test_cleanallruv_abort_certify: The abort CleanAllRUV task was not aborted') + assert False + + # Check master 1 does not have the clean task running + log.info('test_cleanallruv_abort_certify: check master 1 no longer has a cleanAllRUV task...') + attrlist = ['nsTaskLog', 'nsTaskStatus', 'nsTaskExitCode', + 'nsTaskCurrentItem', 'nsTaskTotalItems'] + done = False + count = 0 + while not done and count < 5: + entry = topology.master1.getEntry(clean_task_dn, attrlist=attrlist) + if not entry or entry.nsTaskExitCode: + done = True + break + time.sleep(1) + count += 1 + if not done: + log.fatal('test_cleanallruv_abort_certify: CleanAllRUV task was not aborted') + assert False + + # Start master 2 + log.info('test_cleanallruv_abort_certify: start master 2 to begin the restore process...') + topology.master2.start(timeout=10) + + # + # Now run the clean task task again to we can properly restore master 4 + # + log.info('test_cleanallruv_abort_certify: run cleanAllRUV task so we can properly restore master 4...') + try: + topology.master1.tasks.cleanAllRUV(suffix=DEFAULT_SUFFIX, + replicaid='4', args={TASK_WAIT: True}) + except ValueError as e: + log.fatal('test_cleanallruv_abort_certify: Problem running cleanAllRuv task: ' + + e.message('desc')) + assert False + + log.info('test_cleanallruv_abort_certify PASSED, restoring master 4...') + + # + # Cleanup - Restore master 4 + # + restore_master4(topology) + + +def test_cleanallruv_stress_clean(topology): + ''' + Put each server(m1 - m4) under stress, and perform the entire clean process + ''' + log.info('Running test_cleanallruv_stress_clean...') + log.info('test_cleanallruv_stress_clean: put all the masters under load...') + + # Put all the masters under load + m1_add_users = AddUsers(topology.master1, 4000) + m1_add_users.start() + m2_add_users = AddUsers(topology.master2, 4000) + m2_add_users.start() + m3_add_users = AddUsers(topology.master3, 4000) + m3_add_users.start() + m4_add_users = AddUsers(topology.master4, 4000) + m4_add_users.start() + + # Allow sometime to get replication flowing in all directions + log.info('test_cleanallruv_stress_clean: allow some time for replication to get flowing...') + time.sleep(5) + + # Put master 4 into read only mode + log.info('test_cleanallruv_stress_clean: put master 4 into read-only mode...') + try: + topology.master4.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-readonly', 'on')]) + except ldap.LDAPError as e: + log.fatal('test_cleanallruv_stress_clean: Failed to put master 4 into read-only mode: error ' + + e.message['desc']) + assert False + + # We need to wait for master 4 to push its changes out + log.info('test_cleanallruv_stress_clean: allow some time for master 4 to push changes out (30 seconds)...') + time.sleep(30) + + # Disable master 4 + log.info('test_cleanallruv_stress_clean: disable replication on master 4...') + try: + topology.master4.replica.disableReplication(DEFAULT_SUFFIX) + except: + log.fatal('test_cleanallruv_stress_clean: failed to diable replication') + assert False + + # Remove the agreements from the other masters that point to master 4 + log.info('test_cleanallruv_stress_clean: remove all the agreements to master 4...') + try: + topology.master1.agreement.delete(DEFAULT_SUFFIX, topology.master4) + except ldap.LDAPError as e: + log.fatal('test_cleanallruv_stress_clean: Failed to delete agmt(m1 -> m4), error: ' + + e.message['desc']) + assert False + try: + topology.master2.agreement.delete(DEFAULT_SUFFIX, topology.master4) + except ldap.LDAPError as e: + log.fatal('test_cleanallruv_stress_clean: Failed to delete agmt(m2 -> m4), error: ' + + e.message['desc']) + assert False + try: + topology.master3.agreement.delete(DEFAULT_SUFFIX, topology.master4) + except ldap.LDAPError as e: + log.fatal('test_cleanallruv_stress_clean: Failed to delete agmt(m3 -> m4), error: ' + + e.message['desc']) + assert False + + # Run the task + log.info('test_cleanallruv_stress_clean: Run the cleanAllRUV task...') + try: + topology.master1.tasks.cleanAllRUV(suffix=DEFAULT_SUFFIX, replicaid='4', + args={TASK_WAIT: True}) + except ValueError as e: + log.fatal('test_cleanallruv_stress_clean: Problem running cleanAllRuv task: ' + + e.message('desc')) + assert False + + # Wait for the update to finish + log.info('test_cleanallruv_stress_clean: wait for all the updates to finish...') + m1_add_users.join() + m2_add_users.join() + m3_add_users.join() + m4_add_users.join() + + # Check the other master's RUV for 'replica 4' + log.info('test_cleanallruv_stress_clean: check if all the replicas have been cleaned...') + clean = False + count = 0 + while not clean and count < 10: + clean = True + + # Check master 1 + try: + entry = topology.master1.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, REPLICA_RUV_FILTER) + if not entry: + log.error('test_cleanallruv_stress_clean: Failed to find db tombstone entry from master') + repl_fail(replica_inst) + elements = entry[0].getValues('nsds50ruv') + for ruv in elements: + if 'replica 4' in ruv: + # Not cleaned + log.error('test_cleanallruv_stress_clean: Master 1 not cleaned!') + clean = False + if clean: + log.info('test_cleanallruv_stress_clean: Master 1 is cleaned.') + except ldap.LDAPError as e: + log.fatal('test_cleanallruv_stress_clean: Unable to search master 1 for db tombstone: ' + + e.message['desc']) + + # Check master 2 + try: + entry = topology.master2.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, REPLICA_RUV_FILTER) + if not entry: + log.error('test_cleanallruv_stress_clean: Failed to find db tombstone entry from master') + repl_fail(replica_inst) + elements = entry[0].getValues('nsds50ruv') + for ruv in elements: + if 'replica 4' in ruv: + # Not cleaned + log.error('test_cleanallruv_stress_clean: Master 2 not cleaned!') + clean = False + if clean: + log.info('test_cleanallruv_stress_clean: Master 2 is cleaned.') + except ldap.LDAPError as e: + log.fatal('test_cleanallruv_stress_clean: Unable to search master 2 for db tombstone: ' + + e.message['desc']) + + # Check master 3 + try: + entry = topology.master3.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, REPLICA_RUV_FILTER) + if not entry: + log.error('test_cleanallruv_stress_clean: Failed to find db tombstone entry from master') + repl_fail(replica_inst) + elements = entry[0].getValues('nsds50ruv') + for ruv in elements: + if 'replica 4' in ruv: + # Not cleaned + log.error('test_cleanallruv_stress_clean: Master 3 not cleaned!') + clean = False + if clean: + log.info('test_cleanallruv_stress_clean: Master 3 is cleaned.') + except ldap.LDAPError as e: + log.fatal('test_cleanallruv_stress_clean: Unable to search master 3 for db tombstone: ' + + e.message['desc']) + + # Sleep a bit and give it chance to clean up... + time.sleep(5) + count += 1 + + if not clean: + log.fatal('test_cleanallruv_stress_clean: Failed to clean replicas') + assert False + + log.info('test_cleanallruv_stress_clean: PASSED, restoring master 4...') + + # + # Cleanup - restore master 4 + # + + # Turn off readonly mode + try: + topology.master4.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-readonly', 'off')]) + except ldap.LDAPError as e: + log.fatal('test_cleanallruv_stress_clean: Failed to put master 4 into read-only mode: error ' + + e.message['desc']) + assert False + + restore_master4(topology) + + +def test_cleanallruv_final(topology): + topology.master1.delete() + topology.master2.delete() + topology.master3.delete() + topology.master4.delete() + log.info('cleanAllRUV test suite PASSED') + + +def run_isolated(): + global installation1_prefix + installation1_prefix = None + topo = topology(True) + + test_cleanallruv_init(topo) + test_cleanallruv_clean(topo) + test_cleanallruv_clean_restart(topo) + test_cleanallruv_clean_force(topo) + test_cleanallruv_abort(topo) + test_cleanallruv_abort_restart(topo) + test_cleanallruv_abort_certify(topo) + test_cleanallruv_stress_clean(topo) + test_cleanallruv_final(topo) + + +if __name__ == '__main__': + run_isolated() + diff --git a/dirsrvtests/tests/suites/replication/wait_for_async_feature_test.py b/dirsrvtests/tests/suites/replication/wait_for_async_feature_test.py new file mode 100644 index 0000000..4905088 --- /dev/null +++ b/dirsrvtests/tests/suites/replication/wait_for_async_feature_test.py @@ -0,0 +1,280 @@ +import os +import sys +import time +import ldap +import logging +import pytest +from lib389 import DirSrv, Entry, tools, tasks +from lib389.tools import DirSrvTools +from lib389._constants import * +from lib389.properties import * +from lib389.tasks import * +from lib389.utils import * +from collections import Counter + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + +installation1_prefix = None + +WAITFOR_ASYNC_ATTR = "nsDS5ReplicaWaitForAsyncResults" + +class TopologyReplication(object): + def __init__(self, master1, master2, m1_m2_agmt, m2_m1_agmt): + master1.open() + master2.open() + self.masters = ((master1, m1_m2_agmt), + (master2, m2_m1_agmt)) + + +@pytest.fixture(scope="module") +def topology(request): + global installation1_prefix + if installation1_prefix: + args_instance[SER_DEPLOYED_DIR] = installation1_prefix + + # Creating master 1... + master1 = DirSrv(verbose=False) + args_instance[SER_HOST] = HOST_MASTER_1 + args_instance[SER_PORT] = PORT_MASTER_1 + args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1 + args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX + args_master = args_instance.copy() + master1.allocate(args_master) + instance_master1 = master1.exists() + if instance_master1: + master1.delete() + master1.create() + master1.open() + master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_1) + + # Creating master 2... + master2 = DirSrv(verbose=False) + args_instance[SER_HOST] = HOST_MASTER_2 + args_instance[SER_PORT] = PORT_MASTER_2 + args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_2 + args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX + args_master = args_instance.copy() + master2.allocate(args_master) + instance_master2 = master2.exists() + if instance_master2: + master2.delete() + master2.create() + master2.open() + master2.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_2) + + # + # Create all the agreements + # + # Creating agreement from master 1 to master 2 + properties = {RA_NAME: r'meTo_$host:$port', + RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], + RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], + RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], + RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} + m1_m2_agmt = master1.agreement.create(suffix=SUFFIX, host=master2.host, port=master2.port, properties=properties) + if not m1_m2_agmt: + log.fatal("Fail to create a master -> master replica agreement") + sys.exit(1) + log.debug("%s created" % m1_m2_agmt) + + # Creating agreement from master 2 to master 1 + properties = {RA_NAME: r'meTo_$host:$port', + RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], + RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], + RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], + RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} + m2_m1_agmt = master2.agreement.create(suffix=SUFFIX, host=master1.host, port=master1.port, properties=properties) + if not m2_m1_agmt: + log.fatal("Fail to create a master -> master replica agreement") + sys.exit(1) + log.debug("%s created" % m2_m1_agmt) + + # Allow the replicas to get situated with the new agreements... + time.sleep(5) + + # + # Initialize all the agreements + # + master1.agreement.init(SUFFIX, HOST_MASTER_2, PORT_MASTER_2) + master1.waitForReplInit(m1_m2_agmt) + master2.agreement.init(SUFFIX, HOST_MASTER_1, PORT_MASTER_1) + master2.waitForReplInit(m2_m1_agmt) + + # Check replication is working... + if master1.testReplication(DEFAULT_SUFFIX, master2): + log.info('Replication is working.') + else: + log.fatal('Replication is not working.') + assert False + + log.info("Set Replication Debugging loglevel for the errorlog") + master1.setLogLevel(lib389.LOG_REPLICA) + master2.setLogLevel(lib389.LOG_REPLICA) + + # Delete each instance in the end + def fin(): + master1.delete() + master2.delete() + request.addfinalizer(fin) + + # Clear out the tmp dir + master1.clearTmpDir(__file__) + + return TopologyReplication(master1, master2, m1_m2_agmt, m2_m1_agmt) + + +@pytest.fixture(params=[(None, (4, 10)), + ('2000', (0, 1)), + ('0', (4, 10)), + ('-5', (4, 10))]) +def waitfor_async_attr(topology, request): + """Sets attribute on all replicas""" + + attr_value = request.param[0] + expected_result = request.param[1] + + # Run through all masters + for master in topology.masters: + agmt = master[1] + try: + if attr_value: + log.info("Set %s: %s on %s" % ( + WAITFOR_ASYNC_ATTR, attr_value, master[0].serverid)) + mod = [(ldap.MOD_REPLACE, WAITFOR_ASYNC_ATTR, attr_value)] + else: + log.info("Delete %s from %s" % ( + WAITFOR_ASYNC_ATTR, master[0].serverid)) + mod = [(ldap.MOD_DELETE, WAITFOR_ASYNC_ATTR, None)] + master[0].modify_s(agmt, mod) + except ldap.LDAPError as e: + log.error('Failed to set or delete %s attribute: (%s)' % ( + WAITFOR_ASYNC_ATTR, e.message['desc'])) + + return (attr_value, expected_result) + + +@pytest.fixture +def entries(topology, request): + """Adds entries to the master1""" + + master1 = topology.masters[0][0] + + TEST_OU = "test" + test_dn = SUFFIX + test_list = [] + + log.info("Add 100 nested entries under replicated suffix on %s" % master1.serverid) + for i in xrange(100): + test_dn = 'ou=%s%s,%s' % (TEST_OU, i, test_dn) + test_list.insert(0, test_dn) + try: + master1.add_s(Entry((test_dn, + {'objectclass': 'top', + 'objectclass': 'organizationalUnit', + 'ou': TEST_OU}))) + except ldap.LDAPError as e: + log.error('Failed to add entry (%s): error (%s)' % (test_dn, + e.message['desc'])) + assert False + + log.info("Delete created entries") + for test_dn in test_list: + try: + master1.delete_s(test_dn) + except ldap.LDAPError, e: + log.error('Failed to delete entry (%s): error (%s)' % (test_dn, + e.message['desc'])) + assert False + + def fin(): + log.info("Clear the errors log in the end of the test case") + with open(master1.errlog, 'w') as errlog: + errlog.writelines("") + request.addfinalizer(fin) + + +def test_not_int_value(topology): + """Tests not integer value""" + + master1 = topology.masters[0][0] + agmt = topology.masters[0][1] + + log.info("Try to set %s: wv1" % WAITFOR_ASYNC_ATTR) + try: + mod = [(ldap.MOD_REPLACE, WAITFOR_ASYNC_ATTR, "wv1")] + master1.modify_s(agmt, mod) + except ldap.LDAPError as e: + assert e.message['desc'] == 'Invalid syntax' + + +def test_multi_value(topology): + """Tests multi value""" + + master1 = topology.masters[0][0] + agmt = topology.masters[0][1] + log.info("agmt: %s" % agmt) + + log.info("Try to set %s: 100 and 101 in the same time (multi value test)" % ( + WAITFOR_ASYNC_ATTR)) + try: + mod = [(ldap.MOD_ADD, WAITFOR_ASYNC_ATTR, "100")] + master1.modify_s(agmt, mod) + mod = [(ldap.MOD_ADD, WAITFOR_ASYNC_ATTR, "101")] + master1.modify_s(agmt, mod) + except ldap.LDAPError as e: + assert e.message['desc'] == 'Object class violation' + + +def test_value_check(topology, waitfor_async_attr): + """Checks that value has been set correctly""" + + attr_value = waitfor_async_attr[0] + + for master in topology.masters: + agmt = master[1] + + log.info("Check attr %s on %s" % (WAITFOR_ASYNC_ATTR, master[0].serverid)) + try: + if attr_value: + entry = master[0].search_s(agmt, ldap.SCOPE_BASE, "%s=%s" % ( + WAITFOR_ASYNC_ATTR, attr_value)) + assert entry + else: + entry = master[0].search_s(agmt, ldap.SCOPE_BASE, "%s=*" % WAITFOR_ASYNC_ATTR) + assert not entry + except ldap.LDAPError as e: + log.fatal('Search failed, error: ' + e.message['desc']) + assert False + + +def test_behavior_with_value(topology, waitfor_async_attr, entries): + """Tests replication behavior with valid + nsDS5ReplicaWaitForAsyncResults attribute values + """ + + master1 = topology.masters[0][0] + sync_dict = Counter() + min_ap = waitfor_async_attr[1][0] + max_ap = waitfor_async_attr[1][1] + + log.info("Gather all sync attempts within Counter dict, group by timestamp") + with open(master1.errlog, 'r') as errlog: + errlog_filtered = filter(lambda x: "waitfor_async_results" in x, errlog) + for line in errlog_filtered: + # Watch only over unsuccessful sync attempts + if line.split()[4] != line.split()[5]: + timestamp = line.split(']')[0] + sync_dict[timestamp] += 1 + + log.info("Take the most common timestamp and assert it has appeared " \ + "in the range from %s to %s times" % (min_ap, max_ap)) + most_common_val = sync_dict.most_common(1)[0][1] + assert min_ap <= most_common_val <= max_ap + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/replsync_plugin/repl_sync_test.py b/dirsrvtests/tests/suites/replsync_plugin/repl_sync_test.py new file mode 100644 index 0000000..bd52fb6 --- /dev/null +++ b/dirsrvtests/tests/suites/replsync_plugin/repl_sync_test.py @@ -0,0 +1,93 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2015 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import os +import sys +import time +import ldap +import logging +import pytest +from lib389 import DirSrv, Entry, tools, tasks +from lib389.tools import DirSrvTools +from lib389._constants import * +from lib389.properties import * +from lib389.tasks import * +from lib389.utils import * + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + +installation1_prefix = None + + +class TopologyStandalone(object): + def __init__(self, standalone): + standalone.open() + self.standalone = standalone + + +@pytest.fixture(scope="module") +def topology(request): + global installation1_prefix + if installation1_prefix: + args_instance[SER_DEPLOYED_DIR] = installation1_prefix + + # Creating standalone instance ... + standalone = DirSrv(verbose=False) + args_instance[SER_HOST] = HOST_STANDALONE + args_instance[SER_PORT] = PORT_STANDALONE + args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE + args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX + args_standalone = args_instance.copy() + standalone.allocate(args_standalone) + instance_standalone = standalone.exists() + if instance_standalone: + standalone.delete() + standalone.create() + standalone.open() + + # Clear out the tmp dir + standalone.clearTmpDir(__file__) + + return TopologyStandalone(standalone) + + +def test_repl_sync_init(topology): + ''' + Write any test suite initialization here(if needed) + ''' + + return + + +def test_repl_sync_(topology): + ''' + Write a single test here... + ''' + + return + + +def test_repl_sync_final(topology): + topology.standalone.delete() + log.info('repl_sync test suite PASSED') + + +def run_isolated(): + global installation1_prefix + installation1_prefix = None + + topo = topology(True) + test_repl_sync_init(topo) + test_repl_sync_(topo) + test_repl_sync_final(topo) + + +if __name__ == '__main__': + run_isolated() + diff --git a/dirsrvtests/tests/suites/resource_limits/res_limits_test.py b/dirsrvtests/tests/suites/resource_limits/res_limits_test.py new file mode 100644 index 0000000..672bebc --- /dev/null +++ b/dirsrvtests/tests/suites/resource_limits/res_limits_test.py @@ -0,0 +1,93 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2015 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import os +import sys +import time +import ldap +import logging +import pytest +from lib389 import DirSrv, Entry, tools, tasks +from lib389.tools import DirSrvTools +from lib389._constants import * +from lib389.properties import * +from lib389.tasks import * +from lib389.utils import * + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + +installation1_prefix = None + + +class TopologyStandalone(object): + def __init__(self, standalone): + standalone.open() + self.standalone = standalone + + +@pytest.fixture(scope="module") +def topology(request): + global installation1_prefix + if installation1_prefix: + args_instance[SER_DEPLOYED_DIR] = installation1_prefix + + # Creating standalone instance ... + standalone = DirSrv(verbose=False) + args_instance[SER_HOST] = HOST_STANDALONE + args_instance[SER_PORT] = PORT_STANDALONE + args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE + args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX + args_standalone = args_instance.copy() + standalone.allocate(args_standalone) + instance_standalone = standalone.exists() + if instance_standalone: + standalone.delete() + standalone.create() + standalone.open() + + # Clear out the tmp dir + standalone.clearTmpDir(__file__) + + return TopologyStandalone(standalone) + + +def test_res_limits_init(topology): + ''' + Write any test suite initialization here(if needed) + ''' + + return + + +def test_res_limits_(topology): + ''' + Write a single test here... + ''' + + return + + +def test_res_limits_final(topology): + topology.standalone.delete() + log.info('res_limits test suite PASSED') + + +def run_isolated(): + global installation1_prefix + installation1_prefix = None + + topo = topology(True) + test_res_limits_init(topo) + test_res_limits_(topo) + test_res_limits_final(topo) + + +if __name__ == '__main__': + run_isolated() + diff --git a/dirsrvtests/tests/suites/retrocl_plugin/retrocl_test.py b/dirsrvtests/tests/suites/retrocl_plugin/retrocl_test.py new file mode 100644 index 0000000..2d8b61f --- /dev/null +++ b/dirsrvtests/tests/suites/retrocl_plugin/retrocl_test.py @@ -0,0 +1,93 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2015 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import os +import sys +import time +import ldap +import logging +import pytest +from lib389 import DirSrv, Entry, tools, tasks +from lib389.tools import DirSrvTools +from lib389._constants import * +from lib389.properties import * +from lib389.tasks import * +from lib389.utils import * + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + +installation1_prefix = None + + +class TopologyStandalone(object): + def __init__(self, standalone): + standalone.open() + self.standalone = standalone + + +@pytest.fixture(scope="module") +def topology(request): + global installation1_prefix + if installation1_prefix: + args_instance[SER_DEPLOYED_DIR] = installation1_prefix + + # Creating standalone instance ... + standalone = DirSrv(verbose=False) + args_instance[SER_HOST] = HOST_STANDALONE + args_instance[SER_PORT] = PORT_STANDALONE + args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE + args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX + args_standalone = args_instance.copy() + standalone.allocate(args_standalone) + instance_standalone = standalone.exists() + if instance_standalone: + standalone.delete() + standalone.create() + standalone.open() + + # Clear out the tmp dir + standalone.clearTmpDir(__file__) + + return TopologyStandalone(standalone) + + +def test_retrocl_init(topology): + ''' + Write any test suite initialization here(if needed) + ''' + + return + + +def test_retrocl_(topology): + ''' + Write a single test here... + ''' + + return + + +def test_retrocl_final(topology): + topology.standalone.delete() + log.info('retrocl test suite PASSED') + + +def run_isolated(): + global installation1_prefix + installation1_prefix = None + + topo = topology(True) + test_retrocl_init(topo) + test_retrocl_(topo) + test_retrocl_final(topo) + + +if __name__ == '__main__': + run_isolated() + diff --git a/dirsrvtests/tests/suites/reverpwd_plugin/reverpwd_test.py b/dirsrvtests/tests/suites/reverpwd_plugin/reverpwd_test.py new file mode 100644 index 0000000..ae79bb5 --- /dev/null +++ b/dirsrvtests/tests/suites/reverpwd_plugin/reverpwd_test.py @@ -0,0 +1,93 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2015 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import os +import sys +import time +import ldap +import logging +import pytest +from lib389 import DirSrv, Entry, tools, tasks +from lib389.tools import DirSrvTools +from lib389._constants import * +from lib389.properties import * +from lib389.tasks import * +from lib389.utils import * + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + +installation1_prefix = None + + +class TopologyStandalone(object): + def __init__(self, standalone): + standalone.open() + self.standalone = standalone + + +@pytest.fixture(scope="module") +def topology(request): + global installation1_prefix + if installation1_prefix: + args_instance[SER_DEPLOYED_DIR] = installation1_prefix + + # Creating standalone instance ... + standalone = DirSrv(verbose=False) + args_instance[SER_HOST] = HOST_STANDALONE + args_instance[SER_PORT] = PORT_STANDALONE + args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE + args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX + args_standalone = args_instance.copy() + standalone.allocate(args_standalone) + instance_standalone = standalone.exists() + if instance_standalone: + standalone.delete() + standalone.create() + standalone.open() + + # Clear out the tmp dir + standalone.clearTmpDir(__file__) + + return TopologyStandalone(standalone) + + +def test_reverpwd_init(topology): + ''' + Write any test suite initialization here(if needed) + ''' + + return + + +def test_reverpwd_(topology): + ''' + Write a single test here... + ''' + + return + + +def test_reverpwd_final(topology): + topology.standalone.delete() + log.info('reverpwd test suite PASSED') + + +def run_isolated(): + global installation1_prefix + installation1_prefix = None + + topo = topology(True) + test_reverpwd_init(topo) + test_reverpwd_(topo) + test_reverpwd_final(topo) + + +if __name__ == '__main__': + run_isolated() + diff --git a/dirsrvtests/tests/suites/roles_plugin/roles_test.py b/dirsrvtests/tests/suites/roles_plugin/roles_test.py new file mode 100644 index 0000000..704f2b7 --- /dev/null +++ b/dirsrvtests/tests/suites/roles_plugin/roles_test.py @@ -0,0 +1,93 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2015 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import os +import sys +import time +import ldap +import logging +import pytest +from lib389 import DirSrv, Entry, tools, tasks +from lib389.tools import DirSrvTools +from lib389._constants import * +from lib389.properties import * +from lib389.tasks import * +from lib389.utils import * + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + +installation1_prefix = None + + +class TopologyStandalone(object): + def __init__(self, standalone): + standalone.open() + self.standalone = standalone + + +@pytest.fixture(scope="module") +def topology(request): + global installation1_prefix + if installation1_prefix: + args_instance[SER_DEPLOYED_DIR] = installation1_prefix + + # Creating standalone instance ... + standalone = DirSrv(verbose=False) + args_instance[SER_HOST] = HOST_STANDALONE + args_instance[SER_PORT] = PORT_STANDALONE + args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE + args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX + args_standalone = args_instance.copy() + standalone.allocate(args_standalone) + instance_standalone = standalone.exists() + if instance_standalone: + standalone.delete() + standalone.create() + standalone.open() + + # Clear out the tmp dir + standalone.clearTmpDir(__file__) + + return TopologyStandalone(standalone) + + +def test_roles_init(topology): + ''' + Write any test suite initialization here(if needed) + ''' + + return + + +def test_roles_(topology): + ''' + Write a single test here... + ''' + + return + + +def test_roles_final(topology): + topology.standalone.delete() + log.info('roles test suite PASSED') + + +def run_isolated(): + global installation1_prefix + installation1_prefix = None + + topo = topology(True) + test_roles_init(topo) + test_roles_(topo) + test_roles_final(topo) + + +if __name__ == '__main__': + run_isolated() + diff --git a/dirsrvtests/tests/suites/rootdn_plugin/rootdn_plugin_test.py b/dirsrvtests/tests/suites/rootdn_plugin/rootdn_plugin_test.py new file mode 100644 index 0000000..2e70656 --- /dev/null +++ b/dirsrvtests/tests/suites/rootdn_plugin/rootdn_plugin_test.py @@ -0,0 +1,778 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2015 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import os +import sys +import time +import ldap +import logging +import pytest +from lib389 import DirSrv, Entry, tools, tasks +from lib389.tools import DirSrvTools +from lib389._constants import * +from lib389.properties import * +from lib389.tasks import * + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + +installation1_prefix = None + +PLUGIN_DN = 'cn=' + PLUGIN_ROOTDN_ACCESS + ',cn=plugins,cn=config' +USER1_DN = 'uid=user1,' + DEFAULT_SUFFIX + + +class TopologyStandalone(object): + def __init__(self, standalone): + standalone.open() + self.standalone = standalone + + +@pytest.fixture(scope="module") +def topology(request): + global installation1_prefix + if installation1_prefix: + args_instance[SER_DEPLOYED_DIR] = installation1_prefix + + # Creating standalone instance ... + standalone = DirSrv(verbose=False) + args_instance[SER_HOST] = HOST_STANDALONE + args_instance[SER_PORT] = PORT_STANDALONE + args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE + args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX + args_standalone = args_instance.copy() + standalone.allocate(args_standalone) + instance_standalone = standalone.exists() + if instance_standalone: + standalone.delete() + standalone.create() + standalone.open() + + # Clear out the tmp dir + standalone.clearTmpDir(__file__) + + return TopologyStandalone(standalone) + + +def test_rootdn_init(topology): + ''' + Initialize our setup to test the ROot DN Access Control Plugin + + Test the following access control type: + + - Allowed IP address * + - Denied IP address * + - Specific time window + - Days allowed access + - Allowed host * + - Denied host * + + * means mulitple valued + ''' + + log.info('Initializing root DN test suite...') + + # + # Set an aci so we can modify the plugin after we deny the Root DN + # + ACI = ('(target ="ldap:///cn=config")(targetattr = "*")(version 3.0' + + ';acl "all access";allow (all)(userdn="ldap:///anyone");)') + try: + topology.standalone.modify_s(DN_CONFIG, [(ldap.MOD_ADD, 'aci', ACI)]) + except ldap.LDAPError as e: + log.fatal('test_rootdn_init: Failed to add aci to config: error ' + + e.message['desc']) + assert False + + # + # Create a user to modify the config + # + try: + topology.standalone.add_s(Entry((USER1_DN, {'objectclass': "top extensibleObject".split(), + 'uid': 'user1', + 'userpassword': PASSWORD}))) + except ldap.LDAPError as e: + log.fatal('test_rootdn_init: Failed to add test user ' + USER1_DN + ': error ' + + e.message['desc']) + assert False + + # + # Enable dynamic plugins + # + try: + topology.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-dynamic-plugins', 'on')]) + except ldap.LDAPError as e: + log.fatal('test_rootdn_init: Failed to set dynamic plugins: error ' + e.message['desc']) + assert False + + # + # Enable the plugin (aftewr enabling dynamic plugins) + # + topology.standalone.plugins.enable(PLUGIN_ROOTDN_ACCESS) + + log.info('test_rootdn_init: Initialized root DN test suite.') + + +def test_rootdn_access_specific_time(topology): + ''' + Test binding inside and outside of a specific time + ''' + + log.info('Running test_rootdn_access_specific_time...') + + # Get the current time, and bump it ahead twohours + current_hour = time.strftime("%H") + if int(current_hour) > 12: + open_time = '0200' + close_time = '0400' + else: + open_time = '1600' + close_time = '1800' + + try: + topology.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_ADD, 'rootdn-open-time', open_time), + (ldap.MOD_ADD, 'rootdn-close-time', close_time)]) + except ldap.LDAPError as e: + log.fatal('test_rootdn_access_specific_time: Failed to set (blocking) open/close times: error ' + + e.message['desc']) + assert False + + # + # Bind as Root DN - should fail + # + try: + topology.standalone.simple_bind_s(DN_DM, PASSWORD) + succeeded = True + except ldap.LDAPError as e: + succeeded = False + + if succeeded: + log.fatal('test_rootdn_access_specific_time: Root DN was incorrectly able to bind') + assert False + + # + # Set config to allow the entire day + # + try: + topology.standalone.simple_bind_s(USER1_DN, PASSWORD) + except ldap.LDAPError as e: + log.fatal('test_rootdn_access_specific_time: test_rootdn: failed to bind as user1') + assert False + + try: + topology.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, 'rootdn-open-time', '0000'), + (ldap.MOD_REPLACE, 'rootdn-close-time', '2359')]) + except ldap.LDAPError as e: + log.fatal('test_rootdn_access_specific_time: Failed to set (open) open/close times: error ' + + e.message['desc']) + assert False + + try: + topology.standalone.simple_bind_s(DN_DM, PASSWORD) + except ldap.LDAPError as e: + log.fatal('test_rootdn_access_specific_time: Root DN bind failed unexpectedly failed: error ' + + e.message['desc']) + assert False + + # + # Cleanup - undo the changes we made so the next test has a clean slate + # + try: + topology.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_DELETE, 'rootdn-open-time', None), + (ldap.MOD_DELETE, 'rootdn-close-time', None)]) + except ldap.LDAPError as e: + log.fatal('test_rootdn_access_specific_time: Failed to delete open and close time: error ' + + e.message['desc']) + assert False + + try: + topology.standalone.simple_bind_s(DN_DM, PASSWORD) + except ldap.LDAPError as e: + log.fatal('test_rootdn_access_specific_time: Root DN bind failed unexpectedly failed: error ' + + e.message['desc']) + assert False + + log.info('test_rootdn_access_specific_time: PASSED') + + +def test_rootdn_access_day_of_week(topology): + ''' + Test the days of week feature + ''' + + log.info('Running test_rootdn_access_day_of_week...') + + days = ('Sun', 'Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat') + day = int(time.strftime("%w", time.gmtime())) + + if day > 3: + deny_days = days[0] + ', ' + days[1] + allow_days = days[day] + ',' + days[day - 1] + else: + deny_days = days[4] + ',' + days[5] + allow_days = days[day] + ',' + days[day + 1] + + # + # Set the deny days + # + try: + topology.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, 'rootdn-days-allowed', + deny_days)]) + except ldap.LDAPError as e: + log.fatal('test_rootdn_access_day_of_week: Failed to set the deny days: error ' + + e.message['desc']) + assert False + + # + # Bind as Root DN - should fail + # + try: + topology.standalone.simple_bind_s(DN_DM, PASSWORD) + succeeded = True + except ldap.LDAPError as e: + succeeded = False + + if succeeded: + log.fatal('test_rootdn_access_day_of_week: Root DN was incorrectly able to bind') + assert False + + # + # Set the allow days + # + try: + topology.standalone.simple_bind_s(USER1_DN, PASSWORD) + except ldap.LDAPError as e: + log.fatal('test_rootdn_access_day_of_week: : failed to bind as user1') + assert False + + try: + topology.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, 'rootdn-days-allowed', + allow_days)]) + except ldap.LDAPError as e: + log.fatal('test_rootdn_access_day_of_week: Failed to set the deny days: error ' + + e.message['desc']) + assert False + + try: + topology.standalone.simple_bind_s(DN_DM, PASSWORD) + except ldap.LDAPError as e: + log.fatal('test_rootdn_access_day_of_week: Root DN bind failed unexpectedly failed: error ' + + e.message['desc']) + assert False + + # + # Cleanup - undo the changes we made so the next test has a clean slate + # + try: + topology.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_DELETE, 'rootdn-days-allowed', None)]) + except ldap.LDAPError as e: + log.fatal('test_rootdn_access_day_of_week: Failed to set rootDN plugin config: error ' + + e.message['desc']) + assert False + + try: + topology.standalone.simple_bind_s(DN_DM, PASSWORD) + except ldap.LDAPError as e: + log.fatal('test_rootdn_access_day_of_week: Root DN bind failed unexpectedly failed: error ' + + e.message['desc']) + assert False + + log.info('test_rootdn_access_day_of_week: PASSED') + + +def test_rootdn_access_denied_ip(topology): + ''' + Test denied IP feature - we can just test denying 127.0.01 + ''' + + log.info('Running test_rootdn_access_denied_ip...') + + try: + topology.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, 'rootdn-deny-ip', '127.0.0.1'), + (ldap.MOD_ADD, 'rootdn-deny-ip', '::1')]) + except ldap.LDAPError as e: + log.fatal('test_rootdn_access_denied_ip: Failed to set rootDN plugin config: error ' + + e.message['desc']) + assert False + + # + # Bind as Root DN - should fail + # + try: + topology.standalone.simple_bind_s(DN_DM, PASSWORD) + succeeded = True + except ldap.LDAPError as e: + succeeded = False + + if succeeded: + log.fatal('test_rootdn_access_denied_ip: Root DN was incorrectly able to bind') + assert False + + # + # Change the denied IP so root DN succeeds + # + try: + topology.standalone.simple_bind_s(USER1_DN, PASSWORD) + except ldap.LDAPError as e: + log.fatal('test_rootdn_access_denied_ip: : failed to bind as user1') + assert False + + try: + topology.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, 'rootdn-deny-ip', '255.255.255.255')]) + except ldap.LDAPError as e: + log.fatal('test_rootdn_access_denied_ip: Failed to set rootDN plugin config: error ' + + e.message['desc']) + assert False + + try: + topology.standalone.simple_bind_s(DN_DM, PASSWORD) + except ldap.LDAPError as e: + log.fatal('test_rootdn_access_denied_ip: Root DN bind failed unexpectedly failed: error ' + + e.message['desc']) + assert False + + # + # Cleanup - undo the changes we made so the next test has a clean slate + # + try: + topology.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_DELETE, 'rootdn-deny-ip', None)]) + except ldap.LDAPError as e: + log.fatal('test_rootdn_access_denied_ip: Failed to set rootDN plugin config: error ' + + e.message['desc']) + assert False + + try: + topology.standalone.simple_bind_s(DN_DM, PASSWORD) + except ldap.LDAPError as e: + log.fatal('test_rootdn_access_denied_ip: Root DN bind failed unexpectedly failed: error ' + + e.message['desc']) + assert False + + log.info('test_rootdn_access_denied_ip: PASSED') + + +def test_rootdn_access_denied_host(topology): + ''' + Test denied Host feature - we can just test denying localhost + ''' + + log.info('Running test_rootdn_access_denied_host...') + + try: + topology.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_ADD, 'rootdn-deny-host', 'localhost.localdomain')]) + except ldap.LDAPError as e: + log.fatal('test_rootdn_access_denied_host: Failed to set deny host: error ' + + e.message['desc']) + assert False + + # + # Bind as Root DN - should fail + # + try: + topology.standalone.simple_bind_s(DN_DM, PASSWORD) + succeeded = True + except ldap.LDAPError as e: + succeeded = False + + if succeeded: + log.fatal('test_rootdn_access_denied_host: Root DN was incorrectly able to bind') + assert False + + # + # Change the denied host so root DN succeeds + # + try: + topology.standalone.simple_bind_s(USER1_DN, PASSWORD) + except ldap.LDAPError as e: + log.fatal('test_rootdn_access_denied_host: : failed to bind as user1') + assert False + + try: + topology.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, 'rootdn-deny-host', 'i.dont.exist.com')]) + except ldap.LDAPError as e: + log.fatal('test_rootdn_access_denied_host: Failed to set rootDN plugin config: error ' + + e.message['desc']) + assert False + + try: + topology.standalone.simple_bind_s(DN_DM, PASSWORD) + except ldap.LDAPError as e: + log.fatal('test_rootdn_access_denied_host: Root DN bind failed unexpectedly failed: error ' + + e.message['desc']) + assert False + + # + # Cleanup - undo the changes we made so the next test has a clean slate + # + try: + topology.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_DELETE, 'rootdn-deny-host', None)]) + except ldap.LDAPError as e: + log.fatal('test_rootdn_access_denied_host: Failed to set rootDN plugin config: error ' + + e.message['desc']) + assert False + + try: + topology.standalone.simple_bind_s(DN_DM, PASSWORD) + except ldap.LDAPError as e: + log.fatal('test_rootdn_access_denied_host: Root DN bind failed unexpectedly failed: error ' + + e.message['desc']) + assert False + + log.info('test_rootdn_access_denied_host: PASSED') + + +def test_rootdn_access_allowed_ip(topology): + ''' + Test allowed ip feature + ''' + + log.info('Running test_rootdn_access_allowed_ip...') + + # + # Set allowed host to an unknown host - blocks the Root DN + # + try: + topology.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, 'rootdn-allow-ip', '255.255.255.255')]) + except ldap.LDAPError as e: + log.fatal('test_rootdn_access_allowed_ip: Failed to set allowed host: error ' + + e.message['desc']) + assert False + + # + # Bind as Root DN - should fail + # + try: + topology.standalone.simple_bind_s(DN_DM, PASSWORD) + succeeded = True + except ldap.LDAPError as e: + succeeded = False + + if succeeded: + log.fatal('test_rootdn_access_allowed_ip: Root DN was incorrectly able to bind') + assert False + + # + # Allow localhost + # + try: + topology.standalone.simple_bind_s(USER1_DN, PASSWORD) + except ldap.LDAPError as e: + log.fatal('test_rootdn_access_allowed_ip: : failed to bind as user1') + assert False + + try: + topology.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, 'rootdn-allow-ip', '127.0.0.1'), + (ldap.MOD_ADD, 'rootdn-allow-ip', '::1')]) + except ldap.LDAPError as e: + log.fatal('test_rootdn_access_allowed_ip: Failed to set allowed host: error ' + + e.message['desc']) + assert False + + try: + topology.standalone.simple_bind_s(DN_DM, PASSWORD) + except ldap.LDAPError as e: + log.fatal('test_rootdn_access_allowed_ip: Root DN bind failed unexpectedly failed: error ' + + e.message['desc']) + assert False + + # + # Cleanup - undo everything we did so the next test has a clean slate + # + try: + topology.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_DELETE, 'rootdn-allow-ip', None)]) + except ldap.LDAPError as e: + log.fatal('test_rootdn_access_allowed_ip: Failed to delete(rootdn-allow-ip): error ' + + e.message['desc']) + assert False + + try: + topology.standalone.simple_bind_s(DN_DM, PASSWORD) + except ldap.LDAPError as e: + log.fatal('test_rootdn_access_allowed_ip: Root DN bind failed unexpectedly failed: error ' + + e.message['desc']) + assert False + + log.info('test_rootdn_access_allowed_ip: PASSED') + + +def test_rootdn_access_allowed_host(topology): + ''' + Test allowed ip feature + ''' + + log.info('Running test_rootdn_access_allowed_host...') + + # + # Set allowed host to an unknown host - blocks the Root DN + # + try: + topology.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, 'rootdn-allow-host', 'i.dont.exist.com')]) + except ldap.LDAPError as e: + log.fatal('test_rootdn_access_allowed_host: Failed to set allowed host: error ' + + e.message['desc']) + assert False + + # + # Bind as Root DN - should fail + # + try: + topology.standalone.simple_bind_s(DN_DM, PASSWORD) + succeeded = True + except ldap.LDAPError as e: + succeeded = False + + if succeeded: + log.fatal('test_rootdn_access_allowed_host: Root DN was incorrectly able to bind') + assert False + + # + # Allow localhost + # + try: + topology.standalone.simple_bind_s(USER1_DN, PASSWORD) + except ldap.LDAPError as e: + log.fatal('test_rootdn_access_allowed_host: : failed to bind as user1') + assert False + + try: + topology.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_ADD, 'rootdn-allow-host', 'localhost.localdomain')]) + except ldap.LDAPError as e: + log.fatal('test_rootdn_access_allowed_host: Failed to set allowed host: error ' + + e.message['desc']) + assert False + + try: + topology.standalone.simple_bind_s(DN_DM, PASSWORD) + except ldap.LDAPError as e: + log.fatal('test_rootdn_access_allowed_host: Root DN bind failed unexpectedly failed: error ' + + e.message['desc']) + assert False + + # + # Cleanup - undo everything we did so the next test has a clean slate + # + try: + topology.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_DELETE, 'rootdn-allow-host', None)]) + except ldap.LDAPError as e: + log.fatal('test_rootdn_access_allowed_host: Failed to delete(rootdn-allow-host): error ' + + e.message['desc']) + assert False + + try: + topology.standalone.simple_bind_s(DN_DM, PASSWORD) + except ldap.LDAPError as e: + log.fatal('test_rootdn_access_allowed_host: Root DN bind failed unexpectedly failed: error ' + + e.message['desc']) + assert False + + log.info('test_rootdn_access_allowed_host: PASSED') + + +def test_rootdn_config_validate(topology): + ''' + Test configuration validation + + test single valued attributes: rootdn-open-time, + rootdn-close-time, + rootdn-days-allowed + + ''' + + log.info('Running test_rootdn_config_validate...') + + # + # Test rootdn-open-time + # + try: + topology.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, 'rootdn-open-time', '0000')]) + log.fatal('test_rootdn_config_validate: Incorrectly allowed to just add "rootdn-open-time" ') + assert False + except ldap.LDAPError: + pass + + try: + topology.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_ADD, 'rootdn-open-time', '0000'), + (ldap.MOD_ADD, 'rootdn-open-time', '0001')]) + log.fatal('test_rootdn_config_validate: Incorrectly allowed to add multiple "rootdn-open-time"') + assert False + except ldap.LDAPError: + pass + + try: + topology.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, 'rootdn-open-time', '-1'), + (ldap.MOD_REPLACE, 'rootdn-close-time', '0000')]) + log.fatal('test_rootdn_config_validate: Incorrectly allowed to add invalid "rootdn-open-time: -1"') + assert False + except ldap.LDAPError: + pass + + try: + topology.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, 'rootdn-open-time', '2400'), + (ldap.MOD_REPLACE, 'rootdn-close-time', '0000')]) + log.fatal('test_rootdn_config_validate: Incorrectly allowed to add invalid "rootdn-open-time: 2400"') + assert False + except ldap.LDAPError: + pass + + try: + topology.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, 'rootdn-open-time', 'aaaaa'), + (ldap.MOD_REPLACE, 'rootdn-close-time', '0000')]) + log.fatal('test_rootdn_config_validate: Incorrectly allowed to add invalid "rootdn-open-time: aaaaa"') + assert False + except ldap.LDAPError: + pass + + # + # Test rootdn-close-time + # + try: + topology.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, 'rootdn-close-time', '0000')]) + log.fatal('test_rootdn_config_validate: Incorrectly allowed to add just "rootdn-close-time"') + assert False + except ldap.LDAPError: + pass + + try: + topology.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_ADD, 'rootdn-close-time', '0000'), + (ldap.MOD_ADD, 'rootdn-close-time', '0001')]) + log.fatal('test_rootdn_config_validate: Incorrectly allowed to add multiple "rootdn-open-time"') + assert False + except ldap.LDAPError: + pass + + try: + topology.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, 'rootdn-open-time', '0000'), + (ldap.MOD_REPLACE, 'rootdn-close-time', '-1')]) + log.fatal('test_rootdn_config_validate: Incorrectly allowed to add invalid "rootdn-close-time: -1"') + assert False + except ldap.LDAPError: + pass + + try: + topology.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, 'rootdn-open-time', '0000'), + (ldap.MOD_REPLACE, 'rootdn-close-time', '2400')]) + log.fatal('test_rootdn_config_validate: Incorrectly allowed to add invalid "rootdn-close-time: 2400"') + assert False + except ldap.LDAPError: + pass + + try: + topology.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, 'rootdn-open-time', '0000'), + (ldap.MOD_REPLACE, 'rootdn-close-time', 'aaaaa')]) + log.fatal('test_rootdn_config_validate: Incorrectly allowed to add invalid "rootdn-close-time: aaaaa"') + assert False + except ldap.LDAPError: + pass + + # + # Test days allowed + # + try: + topology.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_ADD, 'rootdn-days-allowed', 'Mon'), + (ldap.MOD_ADD, 'rootdn-days-allowed', 'Tue')]) + log.fatal('test_rootdn_config_validate: Incorrectly allowed to add two "rootdn-days-allowed"') + assert False + except ldap.LDAPError: + pass + + try: + topology.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, 'rootdn-days-allowed', 'Mon1')]) + log.fatal('test_rootdn_config_validate: Incorrectly allowed to add invalid "rootdn-days-allowed: Mon1"') + assert False + except ldap.LDAPError: + pass + + try: + topology.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, 'rootdn-days-allowed', 'Tue, Mon1')]) + log.fatal('test_rootdn_config_validate: Incorrectly allowed to add invalid "rootdn-days-allowed: Tue, Mon1"') + assert False + except ldap.LDAPError: + pass + + try: + topology.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, 'rootdn-days-allowed', 'm111m')]) + log.fatal('test_rootdn_config_validate: Incorrectly allowed to add invalid "rootdn-days-allowed: 111"') + assert False + except ldap.LDAPError: + pass + + try: + topology.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, 'rootdn-days-allowed', 'Gur')]) + log.fatal('test_rootdn_config_validate: Incorrectly allowed to add invalid "rootdn-days-allowed: Gur"') + assert False + except ldap.LDAPError: + pass + + # + # Test allow ips + # + try: + topology.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, 'rootdn-allow-ip', '12.12.Z.12')]) + log.fatal('test_rootdn_config_validate: Incorrectly allowed to add invalid "rootdn-allow-ip: 12.12.Z.12"') + assert False + except ldap.LDAPError: + pass + + # + # Test deny ips + # + try: + topology.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, 'rootdn-deny-ip', '12.12.Z.12')]) + log.fatal('test_rootdn_config_validate: Incorrectly allowed to add invalid "rootdn-deny-ip: 12.12.Z.12"') + assert False + except ldap.LDAPError: + pass + + # + # Test allow hosts + # + try: + topology.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, 'rootdn-allow-host', 'host._.com')]) + log.fatal('test_rootdn_config_validate: Incorrectly allowed to add invalid "rootdn-allow-host: host._.com"') + assert False + except ldap.LDAPError: + pass + + # + # Test deny hosts + # + try: + topology.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, 'rootdn-deny-host', 'host.####.com')]) + log.fatal('test_rootdn_config_validate: Incorrectly allowed to add invalid "rootdn-deny-host: host.####.com"') + assert False + except ldap.LDAPError: + pass + + log.info('test_rootdn_config_validate: PASSED') + + +def test_rootdn_final(topology): + topology.standalone.delete() + log.info('Root DN Access Control test suite PASSED') + + +def run_isolated(): + global installation1_prefix + installation1_prefix = None + + topo = topology(True) + test_rootdn_init(topo) + test_rootdn_access_specific_time(topo) + test_rootdn_access_day_of_week(topo) + test_rootdn_access_allowed_ip(topo) + test_rootdn_access_denied_ip(topo) + test_rootdn_access_allowed_host(topo) + test_rootdn_access_denied_host(topo) + test_rootdn_config_validate(topo) + + test_rootdn_final(topo) + + +if __name__ == '__main__': + run_isolated() + diff --git a/dirsrvtests/tests/suites/sasl/sasl_test.py b/dirsrvtests/tests/suites/sasl/sasl_test.py new file mode 100644 index 0000000..2f5e18c --- /dev/null +++ b/dirsrvtests/tests/suites/sasl/sasl_test.py @@ -0,0 +1,93 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2015 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import os +import sys +import time +import ldap +import logging +import pytest +from lib389 import DirSrv, Entry, tools, tasks +from lib389.tools import DirSrvTools +from lib389._constants import * +from lib389.properties import * +from lib389.tasks import * +from lib389.utils import * + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + +installation1_prefix = None + + +class TopologyStandalone(object): + def __init__(self, standalone): + standalone.open() + self.standalone = standalone + + +@pytest.fixture(scope="module") +def topology(request): + global installation1_prefix + if installation1_prefix: + args_instance[SER_DEPLOYED_DIR] = installation1_prefix + + # Creating standalone instance ... + standalone = DirSrv(verbose=False) + args_instance[SER_HOST] = HOST_STANDALONE + args_instance[SER_PORT] = PORT_STANDALONE + args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE + args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX + args_standalone = args_instance.copy() + standalone.allocate(args_standalone) + instance_standalone = standalone.exists() + if instance_standalone: + standalone.delete() + standalone.create() + standalone.open() + + # Clear out the tmp dir + standalone.clearTmpDir(__file__) + + return TopologyStandalone(standalone) + + +def test_sasl_init(topology): + ''' + Write any test suite initialization here(if needed) + ''' + + return + + +def test_sasl_(topology): + ''' + Write a single test here... + ''' + + return + + +def test_sasl_final(topology): + topology.standalone.delete() + log.info('sasl test suite PASSED') + + +def run_isolated(): + global installation1_prefix + installation1_prefix = None + + topo = topology(True) + test_sasl_init(topo) + test_sasl_(topo) + test_sasl_final(topo) + + +if __name__ == '__main__': + run_isolated() + diff --git a/dirsrvtests/tests/suites/schema/test_schema.py b/dirsrvtests/tests/suites/schema/test_schema.py new file mode 100644 index 0000000..f23391a --- /dev/null +++ b/dirsrvtests/tests/suites/schema/test_schema.py @@ -0,0 +1,228 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2015 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +''' +Created on Dec 18, 2013 + +@author: rmeggins +''' +import os +import sys +import time +import ldap +import six +from ldap.cidict import cidict +from ldap.schema import SubSchema +import logging +import pytest +from lib389 import DirSrv, Entry, tools +from lib389.tools import DirSrvTools +from lib389._constants import * +from lib389.properties import * + + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + +installation_prefix = None + +attrclass = ldap.schema.models.AttributeType +occlass = ldap.schema.models.ObjectClass +syntax_len_supported = False + + +class TopologyStandalone(object): + def __init__(self, standalone): + standalone.open() + self.standalone = standalone + + +@pytest.fixture(scope="module") +def topology(request): + ''' + This fixture is used to create a DirSrv instance for the 'module'. + ''' + global installation_prefix + + if installation_prefix: + args_instance[SER_DEPLOYED_DIR] = installation_prefix + schemainst = DirSrv(verbose=False) + + # Args for the master instance + args_instance[SER_HOST] = HOST_STANDALONE + args_instance[SER_PORT] = PORT_STANDALONE + args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE + schemainst.allocate(args_instance) + + # Remove all the instance + if schemainst.exists(): + schemainst.delete() + + # Create the instance + schemainst.create() + schemainst.open() + + return TopologyStandalone(schemainst) + + +def ochasattr(subschema, oc, mustormay, attr, key): + """See if the oc and any of its parents and ancestors have the + given attr""" + rc = False + if not key in oc.__dict__: + dd = cidict() + for ii in oc.__dict__[mustormay]: + dd[ii] = ii + oc.__dict__[key] = dd + if attr in oc.__dict__[key]: + rc = True + else: + # look in parents + for noroid in oc.sup: + ocpar = subschema.get_obj(occlass, noroid) + assert(ocpar) + rc = ochasattr(subschema, ocpar, mustormay, attr, key) + if rc: + break + return rc + + +def ochasattrs(subschema, oc, mustormay, attrs): + key = mustormay + "dict" + ret = [] + for attr in attrs: + if not ochasattr(subschema, oc, mustormay, attr, key): + ret.append(attr) + return ret + + +def mycmp(v1, v2): + v1ary, v2ary = [v1], [v2] + if isinstance(v1, list) or isinstance(v1, tuple): + v1ary, v2ary = list(set([x.lower() for x in v1])), list(set([x.lower() for x in v2])) + if not len(v1ary) == len(v2ary): + return False + for v1, v2 in zip(v1ary, v2ary): + if isinstance(v1, six.string_types): + if not len(v1) == len(v2): + return False + if not v1 == v2: + return False + return True + + +def ocgetdiffs(ldschema, oc1, oc2): + fields = ['obsolete', 'names', 'desc', 'must', 'may', 'kind', 'sup'] + ret = '' + for field in fields: + v1, v2 = oc1.__dict__[field], oc2.__dict__[field] + if field == 'may' or field == 'must': + missing = ochasattrs(ldschema, oc1, field, oc2.__dict__[field]) + if missing: + ret = ret + '\t%s is missing %s\n' % (field, missing) + missing = ochasattrs(ldschema, oc2, field, oc1.__dict__[field]) + if missing: + ret = ret + '\t%s is missing %s\n' % (field, missing) + elif not mycmp(v1, v2): + ret = ret + '\t%s differs: [%s] vs. [%s]\n' % (field, oc1.__dict__[field], oc2.__dict__[field]) + return ret + + +def atgetparfield(subschema, at, field): + v = None + for nameoroid in at.sup: + atpar = subschema.get_obj(attrclass, nameoroid) + assert(atpar) + v = atpar.__dict__.get(field, atgetparfield(subschema, atpar, field)) + if v is not None: + break + return v + + +def atgetdiffs(ldschema, at1, at2): + fields = ['names', 'desc', 'obsolete', 'sup', 'equality', 'ordering', 'substr', 'syntax', + 'single_value', 'collective', 'no_user_mod', 'usage'] + if syntax_len_supported: + fields.append('syntax_len') + ret = '' + for field in fields: + v1 = at1.__dict__.get(field) or atgetparfield(ldschema, at1, field) + v2 = at2.__dict__.get(field) or atgetparfield(ldschema, at2, field) + if not mycmp(v1, v2): + ret = ret + '\t%s differs: [%s] vs. [%s]\n' % (field, at1.__dict__[field], at2.__dict__[field]) + return ret + + +def test_schema_comparewithfiles(topology): + '''Compare the schema from ldap cn=schema with the schema files''' + + log.info('Running test_schema_comparewithfiles...') + + retval = True + schemainst = topology.standalone + ldschema = schemainst.schema.get_subschema() + assert ldschema + for fn in schemainst.schema.list_files(): + fschema = schemainst.schema.file_to_subschema(fn) + if not fschema: + log.warn("Unable to parse %s as a schema file - skipping" % fn) + continue + assert fschema + for oid in fschema.listall(occlass): + se = fschema.get_obj(occlass, oid) + assert se + ldse = ldschema.get_obj(occlass, oid) + if not ldse: + log.error("objectclass in %s but not in %s: %s" % (fn, DN_SCHEMA, se)) + retval = False + continue + ret = ocgetdiffs(ldschema, ldse, se) + if ret: + log.error("name %s oid %s\n%s" % (se.names[0], oid, ret)) + retval = False + for oid in fschema.listall(attrclass): + se = fschema.get_obj(attrclass, oid) + assert se + ldse = ldschema.get_obj(attrclass, oid) + if not ldse: + log.error("attributetype in %s but not in %s: %s" % (fn, DN_SCHEMA, se)) + retval = False + continue + ret = atgetdiffs(ldschema, ldse, se) + if ret: + log.error("name %s oid %s\n%s" % (se.names[0], oid, ret)) + retval = False + assert retval + + log.info('test_schema_comparewithfiles: PASSED') + + +def test_schema_final(topology): + topology.standalone.delete() + + +def run_isolated(): + ''' + run_isolated is used to run these test cases independently of a test scheduler (xunit, py.test..) + To run isolated without py.test, you need to + - edit this file and comment '@pytest.fixture' line before 'topology' function. + - set the installation prefix + - run this program + ''' + global installation_prefix + installation_prefix = os.environ.get('PREFIX') + + topo = topology(True) + + test_schema_comparewithfiles(topo) + + test_schema_final(topo) + +if __name__ == '__main__': + run_isolated() + diff --git a/dirsrvtests/tests/suites/schema_reload_plugin/schema_reload_test.py b/dirsrvtests/tests/suites/schema_reload_plugin/schema_reload_test.py new file mode 100644 index 0000000..c516745 --- /dev/null +++ b/dirsrvtests/tests/suites/schema_reload_plugin/schema_reload_test.py @@ -0,0 +1,93 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2015 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import os +import sys +import time +import ldap +import logging +import pytest +from lib389 import DirSrv, Entry, tools, tasks +from lib389.tools import DirSrvTools +from lib389._constants import * +from lib389.properties import * +from lib389.tasks import * +from lib389.utils import * + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + +installation1_prefix = None + + +class TopologyStandalone(object): + def __init__(self, standalone): + standalone.open() + self.standalone = standalone + + +@pytest.fixture(scope="module") +def topology(request): + global installation1_prefix + if installation1_prefix: + args_instance[SER_DEPLOYED_DIR] = installation1_prefix + + # Creating standalone instance ... + standalone = DirSrv(verbose=False) + args_instance[SER_HOST] = HOST_STANDALONE + args_instance[SER_PORT] = PORT_STANDALONE + args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE + args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX + args_standalone = args_instance.copy() + standalone.allocate(args_standalone) + instance_standalone = standalone.exists() + if instance_standalone: + standalone.delete() + standalone.create() + standalone.open() + + # Clear out the tmp dir + standalone.clearTmpDir(__file__) + + return TopologyStandalone(standalone) + + +def test_schema_reload_init(topology): + ''' + Write any test suite initialization here(if needed) + ''' + + return + + +def test_schema_reload_(topology): + ''' + Write a single test here... + ''' + + return + + +def test_schema_reload_final(topology): + topology.standalone.delete() + log.info('schema_reload test suite PASSED') + + +def run_isolated(): + global installation1_prefix + installation1_prefix = None + + topo = topology(True) + test_schema_reload_init(topo) + test_schema_reload_(topo) + test_schema_reload_final(topo) + + +if __name__ == '__main__': + run_isolated() + diff --git a/dirsrvtests/tests/suites/snmp/snmp_test.py b/dirsrvtests/tests/suites/snmp/snmp_test.py new file mode 100644 index 0000000..a442efc --- /dev/null +++ b/dirsrvtests/tests/suites/snmp/snmp_test.py @@ -0,0 +1,93 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2015 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import os +import sys +import time +import ldap +import logging +import pytest +from lib389 import DirSrv, Entry, tools, tasks +from lib389.tools import DirSrvTools +from lib389._constants import * +from lib389.properties import * +from lib389.tasks import * +from lib389.utils import * + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + +installation1_prefix = None + + +class TopologyStandalone(object): + def __init__(self, standalone): + standalone.open() + self.standalone = standalone + + +@pytest.fixture(scope="module") +def topology(request): + global installation1_prefix + if installation1_prefix: + args_instance[SER_DEPLOYED_DIR] = installation1_prefix + + # Creating standalone instance ... + standalone = DirSrv(verbose=False) + args_instance[SER_HOST] = HOST_STANDALONE + args_instance[SER_PORT] = PORT_STANDALONE + args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE + args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX + args_standalone = args_instance.copy() + standalone.allocate(args_standalone) + instance_standalone = standalone.exists() + if instance_standalone: + standalone.delete() + standalone.create() + standalone.open() + + # Clear out the tmp dir + standalone.clearTmpDir(__file__) + + return TopologyStandalone(standalone) + + +def test_snmp_init(topology): + ''' + Write any test suite initialization here(if needed) + ''' + + return + + +def test_snmp_(topology): + ''' + Write a single test here... + ''' + + return + + +def test_snmp_final(topology): + topology.standalone.delete() + log.info('snmp test suite PASSED') + + +def run_isolated(): + global installation1_prefix + installation1_prefix = None + + topo = topology(True) + test_snmp_init(topo) + test_snmp_(topo) + test_snmp_final(topo) + + +if __name__ == '__main__': + run_isolated() + diff --git a/dirsrvtests/tests/suites/ssl/ssl_test.py b/dirsrvtests/tests/suites/ssl/ssl_test.py new file mode 100644 index 0000000..d0b36b5 --- /dev/null +++ b/dirsrvtests/tests/suites/ssl/ssl_test.py @@ -0,0 +1,93 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2015 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import os +import sys +import time +import ldap +import logging +import pytest +from lib389 import DirSrv, Entry, tools, tasks +from lib389.tools import DirSrvTools +from lib389._constants import * +from lib389.properties import * +from lib389.tasks import * +from lib389.utils import * + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + +installation1_prefix = None + + +class TopologyStandalone(object): + def __init__(self, standalone): + standalone.open() + self.standalone = standalone + + +@pytest.fixture(scope="module") +def topology(request): + global installation1_prefix + if installation1_prefix: + args_instance[SER_DEPLOYED_DIR] = installation1_prefix + + # Creating standalone instance ... + standalone = DirSrv(verbose=False) + args_instance[SER_HOST] = HOST_STANDALONE + args_instance[SER_PORT] = PORT_STANDALONE + args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE + args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX + args_standalone = args_instance.copy() + standalone.allocate(args_standalone) + instance_standalone = standalone.exists() + if instance_standalone: + standalone.delete() + standalone.create() + standalone.open() + + # Clear out the tmp dir + standalone.clearTmpDir(__file__) + + return TopologyStandalone(standalone) + + +def test_ssl_init(topology): + ''' + Write any test suite initialization here(if needed) + ''' + + return + + +def test_ssl_(topology): + ''' + Write a single test here... + ''' + + return + + +def test_ssl_final(topology): + topology.standalone.delete() + log.info('ssl test suite PASSED') + + +def run_isolated(): + global installation1_prefix + installation1_prefix = None + + topo = topology(True) + test_ssl_init(topo) + test_ssl_(topo) + test_ssl_final(topo) + + +if __name__ == '__main__': + run_isolated() + diff --git a/dirsrvtests/tests/suites/syntax_plugin/syntax_test.py b/dirsrvtests/tests/suites/syntax_plugin/syntax_test.py new file mode 100644 index 0000000..8f801ca --- /dev/null +++ b/dirsrvtests/tests/suites/syntax_plugin/syntax_test.py @@ -0,0 +1,93 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2015 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import os +import sys +import time +import ldap +import logging +import pytest +from lib389 import DirSrv, Entry, tools, tasks +from lib389.tools import DirSrvTools +from lib389._constants import * +from lib389.properties import * +from lib389.tasks import * +from lib389.utils import * + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + +installation1_prefix = None + + +class TopologyStandalone(object): + def __init__(self, standalone): + standalone.open() + self.standalone = standalone + + +@pytest.fixture(scope="module") +def topology(request): + global installation1_prefix + if installation1_prefix: + args_instance[SER_DEPLOYED_DIR] = installation1_prefix + + # Creating standalone instance ... + standalone = DirSrv(verbose=False) + args_instance[SER_HOST] = HOST_STANDALONE + args_instance[SER_PORT] = PORT_STANDALONE + args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE + args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX + args_standalone = args_instance.copy() + standalone.allocate(args_standalone) + instance_standalone = standalone.exists() + if instance_standalone: + standalone.delete() + standalone.create() + standalone.open() + + # Clear out the tmp dir + standalone.clearTmpDir(__file__) + + return TopologyStandalone(standalone) + + +def test_syntax_init(topology): + ''' + Write any test suite initialization here(if needed) + ''' + + return + + +def test_syntax_(topology): + ''' + Write a single test here... + ''' + + return + + +def test_syntax_final(topology): + topology.standalone.delete() + log.info('syntax test suite PASSED') + + +def run_isolated(): + global installation1_prefix + installation1_prefix = None + + topo = topology(True) + test_syntax_init(topo) + test_syntax_(topo) + test_syntax_final(topo) + + +if __name__ == '__main__': + run_isolated() + diff --git a/dirsrvtests/tests/suites/usn_plugin/usn_test.py b/dirsrvtests/tests/suites/usn_plugin/usn_test.py new file mode 100644 index 0000000..bd57835 --- /dev/null +++ b/dirsrvtests/tests/suites/usn_plugin/usn_test.py @@ -0,0 +1,93 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2015 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import os +import sys +import time +import ldap +import logging +import pytest +from lib389 import DirSrv, Entry, tools, tasks +from lib389.tools import DirSrvTools +from lib389._constants import * +from lib389.properties import * +from lib389.tasks import * +from lib389.utils import * + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + +installation1_prefix = None + + +class TopologyStandalone(object): + def __init__(self, standalone): + standalone.open() + self.standalone = standalone + + +@pytest.fixture(scope="module") +def topology(request): + global installation1_prefix + if installation1_prefix: + args_instance[SER_DEPLOYED_DIR] = installation1_prefix + + # Creating standalone instance ... + standalone = DirSrv(verbose=False) + args_instance[SER_HOST] = HOST_STANDALONE + args_instance[SER_PORT] = PORT_STANDALONE + args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE + args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX + args_standalone = args_instance.copy() + standalone.allocate(args_standalone) + instance_standalone = standalone.exists() + if instance_standalone: + standalone.delete() + standalone.create() + standalone.open() + + # Clear out the tmp dir + standalone.clearTmpDir(__file__) + + return TopologyStandalone(standalone) + + +def test_usn_init(topology): + ''' + Write any test suite initialization here(if needed) + ''' + + return + + +def test_usn_(topology): + ''' + Write a single test here... + ''' + + return + + +def test_usn_final(topology): + topology.standalone.delete() + log.info('usn test suite PASSED') + + +def run_isolated(): + global installation1_prefix + installation1_prefix = None + + topo = topology(True) + test_usn_init(topo) + test_usn_(topo) + test_usn_final(topo) + + +if __name__ == '__main__': + run_isolated() + diff --git a/dirsrvtests/tests/suites/views_plugin/views_test.py b/dirsrvtests/tests/suites/views_plugin/views_test.py new file mode 100644 index 0000000..28afcc8 --- /dev/null +++ b/dirsrvtests/tests/suites/views_plugin/views_test.py @@ -0,0 +1,93 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2015 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import os +import sys +import time +import ldap +import logging +import pytest +from lib389 import DirSrv, Entry, tools, tasks +from lib389.tools import DirSrvTools +from lib389._constants import * +from lib389.properties import * +from lib389.tasks import * +from lib389.utils import * + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + +installation1_prefix = None + + +class TopologyStandalone(object): + def __init__(self, standalone): + standalone.open() + self.standalone = standalone + + +@pytest.fixture(scope="module") +def topology(request): + global installation1_prefix + if installation1_prefix: + args_instance[SER_DEPLOYED_DIR] = installation1_prefix + + # Creating standalone instance ... + standalone = DirSrv(verbose=False) + args_instance[SER_HOST] = HOST_STANDALONE + args_instance[SER_PORT] = PORT_STANDALONE + args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE + args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX + args_standalone = args_instance.copy() + standalone.allocate(args_standalone) + instance_standalone = standalone.exists() + if instance_standalone: + standalone.delete() + standalone.create() + standalone.open() + + # Clear out the tmp dir + standalone.clearTmpDir(__file__) + + return TopologyStandalone(standalone) + + +def test_views_init(topology): + ''' + Write any test suite initialization here(if needed) + ''' + + return + + +def test_views_(topology): + ''' + Write a single test here... + ''' + + return + + +def test_views_final(topology): + topology.standalone.delete() + log.info('views test suite PASSED') + + +def run_isolated(): + global installation1_prefix + installation1_prefix = None + + topo = topology(True) + test_views_init(topo) + test_views_(topo) + test_views_final(topo) + + +if __name__ == '__main__': + run_isolated() + diff --git a/dirsrvtests/tests/suites/vlv/vlv_test.py b/dirsrvtests/tests/suites/vlv/vlv_test.py new file mode 100644 index 0000000..ee8b86e --- /dev/null +++ b/dirsrvtests/tests/suites/vlv/vlv_test.py @@ -0,0 +1,93 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2015 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import os +import sys +import time +import ldap +import logging +import pytest +from lib389 import DirSrv, Entry, tools, tasks +from lib389.tools import DirSrvTools +from lib389._constants import * +from lib389.properties import * +from lib389.tasks import * +from lib389.utils import * + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + +installation1_prefix = None + + +class TopologyStandalone(object): + def __init__(self, standalone): + standalone.open() + self.standalone = standalone + + +@pytest.fixture(scope="module") +def topology(request): + global installation1_prefix + if installation1_prefix: + args_instance[SER_DEPLOYED_DIR] = installation1_prefix + + # Creating standalone instance ... + standalone = DirSrv(verbose=False) + args_instance[SER_HOST] = HOST_STANDALONE + args_instance[SER_PORT] = PORT_STANDALONE + args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE + args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX + args_standalone = args_instance.copy() + standalone.allocate(args_standalone) + instance_standalone = standalone.exists() + if instance_standalone: + standalone.delete() + standalone.create() + standalone.open() + + # Clear out the tmp dir + standalone.clearTmpDir(__file__) + + return TopologyStandalone(standalone) + + +def test_vlv_init(topology): + ''' + Write any test suite initialization here(if needed) + ''' + + return + + +def test_vlv_(topology): + ''' + Write a single test here... + ''' + + return + + +def test_vlv_final(topology): + topology.standalone.delete() + log.info('vlv test suite PASSED') + + +def run_isolated(): + global installation1_prefix + installation1_prefix = None + + topo = topology(True) + test_vlv_init(topo) + test_vlv_(topo) + test_vlv_final(topo) + + +if __name__ == '__main__': + run_isolated() + diff --git a/dirsrvtests/tests/suites/whoami_plugin/whoami_test.py b/dirsrvtests/tests/suites/whoami_plugin/whoami_test.py new file mode 100644 index 0000000..af6f19f --- /dev/null +++ b/dirsrvtests/tests/suites/whoami_plugin/whoami_test.py @@ -0,0 +1,93 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2015 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import os +import sys +import time +import ldap +import logging +import pytest +from lib389 import DirSrv, Entry, tools, tasks +from lib389.tools import DirSrvTools +from lib389._constants import * +from lib389.properties import * +from lib389.tasks import * +from lib389.utils import * + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + +installation1_prefix = None + + +class TopologyStandalone(object): + def __init__(self, standalone): + standalone.open() + self.standalone = standalone + + +@pytest.fixture(scope="module") +def topology(request): + global installation1_prefix + if installation1_prefix: + args_instance[SER_DEPLOYED_DIR] = installation1_prefix + + # Creating standalone instance ... + standalone = DirSrv(verbose=False) + args_instance[SER_HOST] = HOST_STANDALONE + args_instance[SER_PORT] = PORT_STANDALONE + args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE + args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX + args_standalone = args_instance.copy() + standalone.allocate(args_standalone) + instance_standalone = standalone.exists() + if instance_standalone: + standalone.delete() + standalone.create() + standalone.open() + + # Clear out the tmp dir + standalone.clearTmpDir(__file__) + + return TopologyStandalone(standalone) + + +def test_whoami_init(topology): + ''' + Write any test suite initialization here(if needed) + ''' + + return + + +def test_whoami_(topology): + ''' + Write a single test here... + ''' + + return + + +def test_whoami_final(topology): + topology.standalone.delete() + log.info('whoami test suite PASSED') + + +def run_isolated(): + global installation1_prefix + installation1_prefix = None + + topo = topology(True) + test_whoami_init(topo) + test_whoami_(topo) + test_whoami_final(topo) + + +if __name__ == '__main__': + run_isolated() + diff --git a/dirsrvtests/tests/tickets/finalizer.py b/dirsrvtests/tests/tickets/finalizer.py new file mode 100644 index 0000000..bfbeadd --- /dev/null +++ b/dirsrvtests/tests/tickets/finalizer.py @@ -0,0 +1,64 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2015 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +''' +Created on Nov 5, 2013 + +@author: tbordaz +''' +import os +import sys +import time +import ldap +import logging +import socket +import time +import logging +import pytest +from lib389 import DirSrv, Entry, tools +from lib389.tools import DirSrvTools +from lib389._constants import DN_DM +from lib389.properties import * + +log = logging.getLogger(__name__) + +global installation_prefix +installation_prefix=os.getenv('PREFIX') + +def test_finalizer(): + global installation_prefix + + # for each defined instance, remove it + for args_instance in ALL_INSTANCES: + if installation_prefix: + # overwrite the environment setting + args_instance[SER_DEPLOYED_DIR] = installation_prefix + + instance = DirSrv(verbose=True) + instance.allocate(args_instance) + if instance.exists(): + instance.delete() + + # remove any existing backup for this instance + instance.clearBackupFS() + +def run_isolated(): + ''' + run_isolated is used to run these test cases independently of a test scheduler (xunit, py.test..) + To run isolated without py.test, you need to + - set the installation prefix + - run this program + ''' + global installation_prefix + installation_prefix = None + + test_finalizer() + +if __name__ == '__main__': + run_isolated() + diff --git a/dirsrvtests/tests/tickets/ticket365_test.py b/dirsrvtests/tests/tickets/ticket365_test.py new file mode 100644 index 0000000..44aa3e8 --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket365_test.py @@ -0,0 +1,169 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2015 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import os +import sys +import time +import ldap +import logging +import pytest +from lib389 import DirSrv, Entry, tools, tasks +from lib389.tools import DirSrvTools +from lib389._constants import * +from lib389.properties import * +from lib389.tasks import * + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + +installation1_prefix = None + + +class TopologyStandalone(object): + def __init__(self, standalone): + standalone.open() + self.standalone = standalone + + +@pytest.fixture(scope="module") +def topology(request): + global installation1_prefix + if installation1_prefix: + args_instance[SER_DEPLOYED_DIR] = installation1_prefix + + # Creating standalone instance ... + standalone = DirSrv(verbose=False) + args_instance[SER_HOST] = HOST_STANDALONE + args_instance[SER_PORT] = PORT_STANDALONE + args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE + args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX + args_standalone = args_instance.copy() + standalone.allocate(args_standalone) + instance_standalone = standalone.exists() + if instance_standalone: + standalone.delete() + standalone.create() + standalone.open() + + # Clear out the tmp dir + standalone.clearTmpDir(__file__) + + return TopologyStandalone(standalone) + + +def test_ticket365(topology): + ''' + Write your testcase here... + + nsslapd-auditlog-logging-hide-unhashed-pw + + and test + + nsslapd-unhashed-pw-switch ticket 561 + + on, off, nolog? + ''' + + USER_DN = 'uid=test_entry,' + DEFAULT_SUFFIX + + # + # Add the test entry + # + try: + topology.standalone.add_s(Entry((USER_DN, { + 'objectclass': 'top extensibleObject'.split(), + 'uid': 'test_entry', + 'userpassword': 'password' + }))) + except ldap.LDAPError as e: + log.error('Failed to add test user: error ' + e.message['desc']) + assert False + + # + # Enable the audit log + # + try: + topology.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-auditlog-logging-enabled', 'on')]) + except ldap.LDAPError as e: + log.fatal('Failed to enable audit log, error: ' + e.message['desc']) + assert False + ''' + try: + ent = topology.standalone.getEntry(DN_CONFIG, attrlist=[ + 'nsslapd-instancedir', + 'nsslapd-errorlog', + 'nsslapd-accesslog', + 'nsslapd-certdir', + 'nsslapd-schemadir']) + ''' + # + # Allow the unhashed password to be written to audit log + # + try: + topology.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, + 'nsslapd-auditlog-logging-hide-unhashed-pw', 'off')]) + except ldap.LDAPError as e: + log.fatal('Failed to enable writing unhashed password to audit log, error: ' + e.message['desc']) + assert False + + # + # Set new password, and check the audit log + # + try: + topology.standalone.modify_s(USER_DN, [(ldap.MOD_REPLACE, 'userpassword', 'mypassword')]) + except ldap.LDAPError as e: + log.fatal('Failed to enable writing unhashed password to audit log, error: ' + e.message['desc']) + assert False + + # Check audit log + if not topology.standalone.searchAuditLog('unhashed#user#password: mypassword'): + log.fatal('failed to find unhashed password in auditlog') + assert False + + # + # Hide unhashed password in audit log + # + try: + topology.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-auditlog-logging-hide-unhashed-pw', 'on')]) + except ldap.LDAPError as e: + log.fatal('Failed to deny writing unhashed password to audit log, error: ' + e.message['desc']) + assert False + log.info('Test complete') + + # + # Modify password, and check the audit log + # + try: + topology.standalone.modify_s(USER_DN, [(ldap.MOD_REPLACE, 'userpassword', 'hidepassword')]) + except ldap.LDAPError as e: + log.fatal('Failed to enable writing unhashed password to audit log, error: ' + e.message['desc']) + assert False + + # Check audit log + if topology.standalone.searchAuditLog('unhashed#user#password: hidepassword'): + log.fatal('Found unhashed password in auditlog') + assert False + + +def test_ticket365_final(topology): + topology.standalone.delete() + log.info('Testcase PASSED') + + +def run_isolated(): + global installation1_prefix + installation1_prefix = None + + topo = topology(True) + test_ticket365(topo) + test_ticket365_final(topo) + + +if __name__ == '__main__': + run_isolated() + diff --git a/dirsrvtests/tests/tickets/ticket47313_test.py b/dirsrvtests/tests/tickets/ticket47313_test.py new file mode 100644 index 0000000..35f2456 --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket47313_test.py @@ -0,0 +1,174 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2015 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import os +import sys +import time +import ldap +import logging +import time +import pytest +from lib389 import DirSrv, Entry, tools +from lib389.tools import DirSrvTools +from lib389._constants import * +from lib389.properties import * + +log = logging.getLogger(__name__) + +installation_prefix = None + +ENTRY_NAME = 'test_entry' + + +class TopologyStandalone(object): + def __init__(self, standalone): + standalone.open() + self.standalone = standalone + + +@pytest.fixture(scope="module") +def topology(request): + ''' + This fixture is used to standalone topology for the 'module'. + ''' + global installation_prefix + + if installation_prefix: + args_instance[SER_DEPLOYED_DIR] = installation_prefix + + standalone = DirSrv(verbose=False) + + # Args for the standalone instance + args_instance[SER_HOST] = HOST_STANDALONE + args_instance[SER_PORT] = PORT_STANDALONE + args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE + args_standalone = args_instance.copy() + standalone.allocate(args_standalone) + + # Get the status of the instance + instance_standalone = standalone.exists() + + # Remove the instance + if instance_standalone: + standalone.delete() + + # Create the instance + standalone.create() + + # Used to retrieve configuration information (dbdir, confdir...) + standalone.open() + + # clear the tmp directory + standalone.clearTmpDir(__file__) + + return TopologyStandalone(standalone) + + +def test_ticket47313_run(topology): + """ + It adds 2 test entries + Search with filters including subtype and ! + It deletes the added entries + """ + + # bind as directory manager + topology.standalone.log.info("Bind as %s" % DN_DM) + topology.standalone.simple_bind_s(DN_DM, PASSWORD) + + # enable filter error logging + #mod = [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', '32')] + #topology.standalone.modify_s(DN_CONFIG, mod) + + topology.standalone.log.info("\n\n######################### ADD ######################\n") + + # Prepare the entry with cn;fr & cn;en + entry_name_fr = '%s fr' % (ENTRY_NAME) + entry_name_en = '%s en' % (ENTRY_NAME) + entry_name_both = '%s both' % (ENTRY_NAME) + entry_dn_both = 'cn=%s, %s' % (entry_name_both, SUFFIX) + entry_both = Entry(entry_dn_both) + entry_both.setValues('objectclass', 'top', 'person') + entry_both.setValues('sn', entry_name_both) + entry_both.setValues('cn', entry_name_both) + entry_both.setValues('cn;fr', entry_name_fr) + entry_both.setValues('cn;en', entry_name_en) + + # Prepare the entry with one member + entry_name_en_only = '%s en only' % (ENTRY_NAME) + entry_dn_en_only = 'cn=%s, %s' % (entry_name_en_only, SUFFIX) + entry_en_only = Entry(entry_dn_en_only) + entry_en_only.setValues('objectclass', 'top', 'person') + entry_en_only.setValues('sn', entry_name_en_only) + entry_en_only.setValues('cn', entry_name_en_only) + entry_en_only.setValues('cn;en', entry_name_en) + + topology.standalone.log.info("Try to add Add %s: %r" % (entry_dn_both, entry_both)) + topology.standalone.add_s(entry_both) + + topology.standalone.log.info("Try to add Add %s: %r" % (entry_dn_en_only, entry_en_only)) + topology.standalone.add_s(entry_en_only) + + topology.standalone.log.info("\n\n######################### SEARCH ######################\n") + + # filter: (&(cn=test_entry en only)(!(cn=test_entry fr))) + myfilter = '(&(sn=%s)(!(cn=%s)))' % (entry_name_en_only, entry_name_fr) + topology.standalone.log.info("Try to search with filter %s" % myfilter) + ents = topology.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, myfilter) + assert len(ents) == 1 + assert ents[0].sn == entry_name_en_only + topology.standalone.log.info("Found %s" % ents[0].dn) + + # filter: (&(cn=test_entry en only)(!(cn;fr=test_entry fr))) + myfilter = '(&(sn=%s)(!(cn;fr=%s)))' % (entry_name_en_only, entry_name_fr) + topology.standalone.log.info("Try to search with filter %s" % myfilter) + ents = topology.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, myfilter) + assert len(ents) == 1 + assert ents[0].sn == entry_name_en_only + topology.standalone.log.info("Found %s" % ents[0].dn) + + # filter: (&(cn=test_entry en only)(!(cn;en=test_entry en))) + myfilter = '(&(sn=%s)(!(cn;en=%s)))' % (entry_name_en_only, entry_name_en) + topology.standalone.log.info("Try to search with filter %s" % myfilter) + ents = topology.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, myfilter) + assert len(ents) == 0 + topology.standalone.log.info("Found none") + + topology.standalone.log.info("\n\n######################### DELETE ######################\n") + + topology.standalone.log.info("Try to delete %s " % entry_dn_both) + topology.standalone.delete_s(entry_dn_both) + + topology.standalone.log.info("Try to delete %s " % entry_dn_en_only) + topology.standalone.delete_s(entry_dn_en_only) + + +def test_ticket47313_final(topology): + topology.standalone.delete() + log.info('Testcase PASSED') + + +def run_isolated(): + ''' + run_isolated is used to run these test cases independently of a test scheduler (xunit, py.test..) + To run isolated without py.test, you need to + - edit this file and comment '@pytest.fixture' line before 'topology' function. + - set the installation prefix + - run this program + ''' + global installation_prefix + installation_prefix = None + + topo = topology(True) + test_ticket47313_run(topo) + + test_ticket47313_final(topo) + + +if __name__ == '__main__': + run_isolated() + diff --git a/dirsrvtests/tests/tickets/ticket47384_test.py b/dirsrvtests/tests/tickets/ticket47384_test.py new file mode 100644 index 0000000..e5dc354 --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket47384_test.py @@ -0,0 +1,167 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2015 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import os +import sys +import time +import ldap +import logging +import pytest +import shutil +from lib389 import DirSrv, Entry, tools, tasks +from lib389.tools import DirSrvTools +from lib389._constants import * +from lib389.properties import * +from lib389.tasks import * +from lib389.utils import * + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + +installation1_prefix = None + + +class TopologyStandalone(object): + def __init__(self, standalone): + standalone.open() + self.standalone = standalone + + +@pytest.fixture(scope="module") +def topology(request): + global installation1_prefix + if installation1_prefix: + args_instance[SER_DEPLOYED_DIR] = installation1_prefix + + # Creating standalone instance ... + standalone = DirSrv(verbose=False) + args_instance[SER_HOST] = HOST_STANDALONE + args_instance[SER_PORT] = PORT_STANDALONE + args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE + args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX + args_standalone = args_instance.copy() + standalone.allocate(args_standalone) + instance_standalone = standalone.exists() + if instance_standalone: + standalone.delete() + standalone.create() + standalone.open() + + # Clear out the tmp dir + standalone.clearTmpDir(__file__) + + return TopologyStandalone(standalone) + + +def test_ticket47384(topology): + ''' + Test pluginpath validation: relative and absolute paths + + With the inclusion of ticket 47601 - we do allow plugin paths + outside the default location + ''' + PLUGIN_DN = 'cn=%s,cn=plugins,cn=config' % PLUGIN_WHOAMI + tmp_dir = topology.standalone.getDir(__file__, TMP_DIR) + plugin_dir = get_plugin_dir(topology.standalone.prefix) + + # Copy the library to our tmp directory + try: + shutil.copy('%s/libwhoami-plugin.so' % plugin_dir, tmp_dir) + except IOError as e: + log.fatal('Failed to copy libwhoami-plugin.so to the tmp directory, error: ' + + e.strerror) + assert False + try: + shutil.copy('%s/libwhoami-plugin.la' % plugin_dir, tmp_dir) + except IOError as e: + log.fatal('Failed to copy libwhoami-plugin.la to the tmp directory, error: ' + + e.strerror) + assert False + + # + # Test adding valid plugin paths + # + # Try using the absolute path to the current library + try: + topology.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, + 'nsslapd-pluginPath', '%s/libwhoami-plugin' % plugin_dir)]) + except ldap.LDAPError as e: + log.error('Failed to set valid plugin path (%s): error (%s)' % + ('%s/libwhoami-plugin' % plugin_dir, e.message['desc'])) + assert False + + # Try using new remote location + try: + topology.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, + 'nsslapd-pluginPath', '%s/libwhoami-plugin' % tmp_dir)]) + except ldap.LDAPError as e: + log.error('Failed to set valid plugin path (%s): error (%s)' % + ('%s/libwhoami-plugin' % tmp_dir, e.message['desc'])) + assert False + + # Set plugin path back to the default + try: + topology.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, + 'nsslapd-pluginPath', 'libwhoami-plugin')]) + except ldap.LDAPError as e: + log.error('Failed to set valid relative plugin path (%s): error (%s)' % + ('libwhoami-plugin' % tmp_dir, e.message['desc'])) + assert False + + # + # Test invalid path (no library present) + # + try: + topology.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, + 'nsslapd-pluginPath', '/bin/libwhoami-plugin')]) + # No exception?! This is an error + log.error('Invalid plugin path was incorrectly accepted by the server!') + assert False + except ldap.UNWILLING_TO_PERFORM: + # Correct, operation should be rejected + pass + except ldap.LDAPError as e: + log.error('Failed to set invalid plugin path (%s): error (%s)' % + ('/bin/libwhoami-plugin', e.message['desc'])) + + # + # Test invalid relative path (no library present) + # + try: + topology.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, + 'nsslapd-pluginPath', '../libwhoami-plugin')]) + # No exception?! This is an error + log.error('Invalid plugin path was incorrectly accepted by the server!') + assert False + except ldap.UNWILLING_TO_PERFORM: + # Correct, operation should be rejected + pass + except ldap.LDAPError as e: + log.error('Failed to set invalid plugin path (%s): error (%s)' % + ('../libwhoami-plugin', e.message['desc'])) + + log.info('Test complete') + + +def test_ticket47384_final(topology): + topology.standalone.delete() + log.info('Testcase PASSED') + + +def run_isolated(): + global installation1_prefix + installation1_prefix = None + + topo = topology(True) + test_ticket47384(topo) + test_ticket47384_final(topo) + + +if __name__ == '__main__': + run_isolated() + diff --git a/dirsrvtests/tests/tickets/ticket47431_test.py b/dirsrvtests/tests/tickets/ticket47431_test.py new file mode 100644 index 0000000..a102248 --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket47431_test.py @@ -0,0 +1,259 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2015 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import os +import sys +import time +import ldap +import logging +import pytest +from lib389 import DirSrv, Entry, tools, tasks +from lib389.tools import DirSrvTools +from lib389._constants import * +from lib389.properties import * +from lib389.tasks import * +from lib389.utils import * + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + +installation1_prefix = None + +DN_7BITPLUGIN="cn=7-bit check,%s" % DN_PLUGIN +ATTRS = ["uid", "mail", "userpassword", ",", SUFFIX, None] + +class TopologyStandalone(object): + def __init__(self, standalone): + standalone.open() + self.standalone = standalone + + +@pytest.fixture(scope="module") +def topology(request): + global installation1_prefix + if installation1_prefix: + args_instance[SER_DEPLOYED_DIR] = installation1_prefix + + # Creating standalone instance ... + standalone = DirSrv(verbose=False) + args_instance[SER_HOST] = HOST_STANDALONE + args_instance[SER_PORT] = PORT_STANDALONE + args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE + args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX + args_standalone = args_instance.copy() + standalone.allocate(args_standalone) + instance_standalone = standalone.exists() + if instance_standalone: + standalone.delete() + standalone.create() + standalone.open() + + # Clear out the tmp dir + standalone.clearTmpDir(__file__) + + return TopologyStandalone(standalone) + + +def test_ticket47431_0(topology): + ''' + Enable 7 bit plugin + ''' + log.info("Ticket 47431 - 0: Enable 7bit plugin...") + topology.standalone.plugins.enable(name=PLUGIN_7_BIT_CHECK) + + +def test_ticket47431_1(topology): + ''' + nsslapd-pluginarg0: uid + nsslapd-pluginarg1: mail + nsslapd-pluginarg2: userpassword <== repeat 27 times + nsslapd-pluginarg3: , + nsslapd-pluginarg4: dc=example,dc=com + + The duplicated values are removed by str2entry_dupcheck as follows: + [..] - str2entry_dupcheck: 27 duplicate values for attribute type nsslapd-pluginarg2 + detected in entry cn=7-bit check,cn=plugins,cn=config. Extra values ignored. + ''' + + log.info("Ticket 47431 - 1: Check 26 duplicate values are treated as one...") + expected = "str2entry_dupcheck: .* duplicate values for attribute type nsslapd-pluginarg2 detected in entry cn=7-bit check,cn=plugins,cn=config." + + log.debug('modify_s %s' % DN_7BITPLUGIN) + try: + topology.standalone.modify_s(DN_7BITPLUGIN, + [(ldap.MOD_REPLACE, 'nsslapd-pluginarg0', "uid"), + (ldap.MOD_REPLACE, 'nsslapd-pluginarg1', "mail"), + (ldap.MOD_REPLACE, 'nsslapd-pluginarg2', "userpassword"), + (ldap.MOD_REPLACE, 'nsslapd-pluginarg3', ","), + (ldap.MOD_REPLACE, 'nsslapd-pluginarg4', SUFFIX)]) + except ValueError: + log.error('modify failed: Some problem occured with a value that was provided') + assert False + + arg2 = "nsslapd-pluginarg2: userpassword" + topology.standalone.stop(timeout=10) + dse_ldif = topology.standalone.confdir + '/dse.ldif' + os.system('mv %s %s.47431' % (dse_ldif, dse_ldif)) + os.system('sed -e "s/\\(%s\\)/\\1\\n\\1\\n\\1\\n\\1\\n\\1\\n\\1\\n\\1\\n\\1\\n\\1\\n\\1\\n\\1\\n\\1\\n\\1\\n\\1\\n\\1\\n\\1\\n\\1\\n\\1\\n\\1\\n\\1\\n\\1\\n\\1\\n\\1\\n\\1\\n\\1\\n\\1\\n\\1/" %s.47431 > %s' % (arg2, dse_ldif, dse_ldif)) + topology.standalone.start(timeout=10) + + cmdline = 'egrep -i "%s" %s' % (expected, topology.standalone.errlog) + p = os.popen(cmdline, "r") + line = p.readline() + if line == "": + log.error('Expected error "%s" not logged in %s' % (expected, topology.standalone.errlog)) + assert False + else: + log.debug('line: %s' % line) + log.info('Expected error "%s" logged in %s' % (expected, topology.standalone.errlog)) + + + log.info("Ticket 47431 - 1: done") + + +def test_ticket47431_2(topology): + ''' + nsslapd-pluginarg0: uid + nsslapd-pluginarg0: mail + nsslapd-pluginarg1: userpassword + nsslapd-pluginarg2: , + nsslapd-pluginarg3: dc=example,dc=com + ==> + nsslapd-pluginarg0: uid + nsslapd-pluginarg1: mail + nsslapd-pluginarg2: userpassword + nsslapd-pluginarg3: , + nsslapd-pluginarg4: dc=example,dc=com + Should be logged in error log: + [..] NS7bitAttr_Init - 0: uid + [..] NS7bitAttr_Init - 1: userpassword + [..] NS7bitAttr_Init - 2: mail + [..] NS7bitAttr_Init - 3: , + [..] NS7bitAttr_Init - 4: dc=example,dc=com + ''' + + log.info("Ticket 47431 - 2: Check two values belonging to one arg is fixed...") + + try: + topology.standalone.modify_s(DN_7BITPLUGIN, + [(ldap.MOD_REPLACE, 'nsslapd-pluginarg0', "uid"), + (ldap.MOD_ADD, 'nsslapd-pluginarg0', "mail"), + (ldap.MOD_REPLACE, 'nsslapd-pluginarg1', "userpassword"), + (ldap.MOD_REPLACE, 'nsslapd-pluginarg2', ","), + (ldap.MOD_REPLACE, 'nsslapd-pluginarg3', SUFFIX), + (ldap.MOD_DELETE, 'nsslapd-pluginarg4', None)]) + except ValueError: + log.error('modify failed: Some problem occured with a value that was provided') + assert False + + # PLUGIN LOG LEVEL + topology.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', '65536')]) + + topology.standalone.restart(timeout=10) + + cmdline = 'egrep -i %s %s' % ("NS7bitAttr_Init", topology.standalone.errlog) + p = os.popen(cmdline, "r") + i = 0 + while ATTRS[i]: + line = p.readline() + log.debug('line - %s' % line) + log.debug('ATTRS[%d] %s' % (i, ATTRS[i])) + if line == "": + break + elif line.find(ATTRS[i]) >= 0: + log.debug('%s was logged' % ATTRS[i]) + else: + log.error('%s was not logged.' % ATTRS[i]) + assert False + i = i + 1 + + log.info("Ticket 47431 - 2: done") + + +def test_ticket47431_3(topology): + ''' + nsslapd-pluginarg1: uid + nsslapd-pluginarg3: mail + nsslapd-pluginarg5: userpassword + nsslapd-pluginarg7: , + nsslapd-pluginarg9: dc=example,dc=com + ==> + nsslapd-pluginarg0: uid + nsslapd-pluginarg1: mail + nsslapd-pluginarg2: userpassword + nsslapd-pluginarg3: , + nsslapd-pluginarg4: dc=example,dc=com + Should be logged in error log: + [..] NS7bitAttr_Init - 0: uid + [..] NS7bitAttr_Init - 1: userpassword + [..] NS7bitAttr_Init - 2: mail + [..] NS7bitAttr_Init - 3: , + [..] NS7bitAttr_Init - 4: dc=example,dc=com + ''' + + log.info("Ticket 47431 - 3: Check missing args are fixed...") + + try: + topology.standalone.modify_s(DN_7BITPLUGIN, + [(ldap.MOD_DELETE, 'nsslapd-pluginarg0', None), + (ldap.MOD_REPLACE, 'nsslapd-pluginarg1', "uid"), + (ldap.MOD_DELETE, 'nsslapd-pluginarg2', None), + (ldap.MOD_REPLACE, 'nsslapd-pluginarg3', "mail"), + (ldap.MOD_REPLACE, 'nsslapd-pluginarg5', "userpassword"), + (ldap.MOD_REPLACE, 'nsslapd-pluginarg7', ","), + (ldap.MOD_REPLACE, 'nsslapd-pluginarg9', SUFFIX)]) + except ValueError: + log.error('modify failed: Some problem occured with a value that was provided') + assert False + + # PLUGIN LOG LEVEL + topology.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', '65536')]) + + topology.standalone.stop(timeout=10) + os.system('mv %s %s.47431' % (topology.standalone.errlog, topology.standalone.errlog)) + os.system('touch %s' % (topology.standalone.errlog)) + topology.standalone.start(timeout=10) + + cmdline = 'egrep -i %s %s' % ("NS7bitAttr_Init", topology.standalone.errlog) + p = os.popen(cmdline, "r") + i = 0 + while ATTRS[i]: + line = p.readline() + if line == "": + break + elif line.find(ATTRS[i]) >= 0: + log.debug('%s was logged' % ATTRS[i]) + else: + log.error('%s was not logged.' % ATTRS[i]) + assert False + i = i + 1 + + log.info("Ticket 47431 - 3: done") + log.info('Test complete') + + +def test_ticket47431_final(topology): + topology.standalone.delete() + log.info('Testcase PASSED') + + +def run_isolated(): + global installation1_prefix + installation1_prefix = None + + topo = topology(True) + test_ticket47431_0(topo) + test_ticket47431_1(topo) + test_ticket47431_2(topo) + test_ticket47431_3(topo) + test_ticket47431_final(topo) + + +if __name__ == '__main__': + run_isolated() + diff --git a/dirsrvtests/tests/tickets/ticket47462_test.py b/dirsrvtests/tests/tickets/ticket47462_test.py new file mode 100644 index 0000000..c88cf43 --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket47462_test.py @@ -0,0 +1,365 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2015 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import sys +import time +import ldap +import logging +import pytest +from lib389 import DirSrv, Entry, tools +from lib389.tools import DirSrvTools +from lib389._constants import * +from lib389.properties import * + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + +# +# important part. We can deploy Master1 and Master2 on different versions +# +installation1_prefix = None +installation2_prefix = None + +DES_PLUGIN = 'cn=DES,cn=Password Storage Schemes,cn=plugins,cn=config' +AES_PLUGIN = 'cn=AES,cn=Password Storage Schemes,cn=plugins,cn=config' +MMR_PLUGIN = 'cn=Multimaster Replication Plugin,cn=plugins,cn=config' +AGMT_DN = '' +USER_DN = 'cn=test_user,' + DEFAULT_SUFFIX +USER1_DN = 'cn=test_user1,' + DEFAULT_SUFFIX +TEST_REPL_DN = 'cn=test repl,' + DEFAULT_SUFFIX + + +class TopologyMaster1Master2(object): + def __init__(self, master1, master2): + master1.open() + self.master1 = master1 + + master2.open() + self.master2 = master2 + + +@pytest.fixture(scope="module") +def topology(request): + ''' + This fixture is used to create a replicated topology for the 'module'. + The replicated topology is MASTER1 <-> Master2. + ''' + global installation1_prefix + global installation2_prefix + + # allocate master1 on a given deployement + master1 = DirSrv(verbose=False) + if installation1_prefix: + args_instance[SER_DEPLOYED_DIR] = installation1_prefix + + # Args for the master1 instance + args_instance[SER_HOST] = HOST_MASTER_1 + args_instance[SER_PORT] = PORT_MASTER_1 + args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1 + args_master = args_instance.copy() + master1.allocate(args_master) + + # allocate master1 on a given deployement + master2 = DirSrv(verbose=False) + if installation2_prefix: + args_instance[SER_DEPLOYED_DIR] = installation2_prefix + + # Args for the consumer instance + args_instance[SER_HOST] = HOST_MASTER_2 + args_instance[SER_PORT] = PORT_MASTER_2 + args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_2 + args_master = args_instance.copy() + master2.allocate(args_master) + + # Get the status of the instance and restart it if it exists + instance_master1 = master1.exists() + instance_master2 = master2.exists() + + # Remove all the instances + if instance_master1: + master1.delete() + if instance_master2: + master2.delete() + + # Create the instances + master1.create() + master1.open() + master2.create() + master2.open() + + # + # Now prepare the Master-Consumer topology + # + # First Enable replication + master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_1) + master2.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_2) + + # Initialize the supplier->consumer + + properties = {RA_NAME: r'meTo_$host:$port', + RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], + RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], + RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], + RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} + AGMT_DN = master1.agreement.create(suffix=SUFFIX, host=master2.host, port=master2.port, properties=properties) + master1.agreement + if not AGMT_DN: + log.fatal("Fail to create a replica agreement") + sys.exit(1) + + log.debug("%s created" % AGMT_DN) + + properties = {RA_NAME: r'meTo_$host:$port', + RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], + RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], + RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], + RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} + master2.agreement.create(suffix=DEFAULT_SUFFIX, host=master1.host, port=master1.port, properties=properties) + + master1.agreement.init(SUFFIX, HOST_MASTER_2, PORT_MASTER_2) + master1.waitForReplInit(AGMT_DN) + + # Check replication is working fine + if master1.testReplication(DEFAULT_SUFFIX, master2): + log.info('Replication is working.') + else: + log.fatal('Replication is not working.') + assert False + + # clear the tmp directory + master1.clearTmpDir(__file__) + + return TopologyMaster1Master2(master1, master2) + + +def test_ticket47462(topology): + """ + Test that AES properly replaces DES during an update/restart, and that + replication also works correctly. + """ + + # + # First set config as if it's an older version. Set DES to use libdes-plugin, + # MMR to depend on DES, delete the existing AES plugin, and set a DES password + # for the replication agreement. + # + + # + # Add an extra attribute to the DES plugin args + # + try: + topology.master1.modify_s(DES_PLUGIN, + [(ldap.MOD_REPLACE, 'nsslapd-pluginEnabled', 'on')]) + except ldap.LDAPError as e: + log.fatal('Failed to enable DES plugin, error: ' + e.message['desc']) + assert False + + try: + topology.master1.modify_s(DES_PLUGIN, + [(ldap.MOD_ADD, 'nsslapd-pluginarg2', 'description')]) + except ldap.LDAPError as e: + log.fatal('Failed to reset DES plugin, error: ' + e.message['desc']) + assert False + + try: + topology.master1.modify_s(MMR_PLUGIN, + [(ldap.MOD_DELETE, 'nsslapd-plugin-depends-on-named', 'AES')]) + + except ldap.NO_SUCH_ATTRIBUTE: + pass + except ldap.LDAPError as e: + log.fatal('Failed to reset MMR plugin, error: ' + e.message['desc']) + assert False + + # + # Delete the AES plugin + # + try: + topology.master1.delete_s(AES_PLUGIN) + except ldap.NO_SUCH_OBJECT: + pass + except ldap.LDAPError as e: + log.fatal('Failed to delete AES plugin, error: ' + e.message['desc']) + assert False + + # restart the server so we must use DES plugin + topology.master1.restart(timeout=10) + + # + # Get the agmt dn, and set the password + # + try: + entry = topology.master1.search_s('cn=config', ldap.SCOPE_SUBTREE, 'objectclass=nsDS5ReplicationAgreement') + if entry: + agmt_dn = entry[0].dn + log.info('Found agmt dn (%s)' % agmt_dn) + else: + log.fatal('No replication agreements!') + assert False + except ldap.LDAPError as e: + log.fatal('Failed to search for replica credentials: ' + e.message['desc']) + assert False + + try: + properties = {RA_BINDPW: "password"} + topology.master1.agreement.setProperties(None, agmt_dn, None, properties) + log.info('Successfully modified replication agreement') + except ValueError: + log.error('Failed to update replica agreement: ' + AGMT_DN) + assert False + + # + # Check replication works with the new DES password + # + try: + topology.master1.add_s(Entry((USER1_DN, + {'objectclass': "top person".split(), + 'sn': 'sn', + 'cn': 'test_user'}))) + loop = 0 + ent = None + while loop <= 10: + try: + ent = topology.master2.getEntry(USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)") + break + except ldap.NO_SUCH_OBJECT: + time.sleep(1) + loop += 1 + if not ent: + log.fatal('Replication test failed fo user1!') + assert False + else: + log.info('Replication test passed') + except ldap.LDAPError as e: + log.fatal('Failed to add test user: ' + e.message['desc']) + assert False + + # + # Run the upgrade... + # + topology.master1.upgrade('online') + topology.master1.restart(timeout=10) + topology.master2.restart(timeout=10) + + # + # Check that the restart converted existing DES credentials + # + try: + entry = topology.master1.search_s('cn=config', ldap.SCOPE_SUBTREE, 'nsDS5ReplicaCredentials=*') + if entry: + val = entry[0].getValue('nsDS5ReplicaCredentials') + if val.startswith('{AES-'): + log.info('The DES credentials have been converted to AES') + else: + log.fatal('Failed to convert credentials from DES to AES!') + assert False + else: + log.fatal('Failed to find any entries with nsDS5ReplicaCredentials ') + assert False + except ldap.LDAPError as e: + log.fatal('Failed to search for replica credentials: ' + e.message['desc']) + assert False + + # + # Check that the AES plugin exists, and has all the attributes listed in DES plugin. + # The attributes might not be in the expected order so check all the attributes. + # + try: + entry = topology.master1.search_s(AES_PLUGIN, ldap.SCOPE_BASE, 'objectclass=*') + if not entry[0].hasValue('nsslapd-pluginarg0', 'description') and \ + not entry[0].hasValue('nsslapd-pluginarg1', 'description') and \ + not entry[0].hasValue('nsslapd-pluginarg2', 'description'): + log.fatal('The AES plugin did not have the DES attribute copied over correctly') + assert False + else: + log.info('The AES plugin was correctly setup') + except ldap.LDAPError as e: + log.fatal('Failed to find AES plugin: ' + e.message['desc']) + assert False + + # + # Check that the MMR plugin was updated + # + try: + entry = topology.master1.search_s(MMR_PLUGIN, ldap.SCOPE_BASE, 'objectclass=*') + if not entry[0].hasValue('nsslapd-plugin-depends-on-named', 'AES'): + log.fatal('The MMR Plugin was not correctly updated') + assert False + else: + log.info('The MMR plugin was correctly updated') + except ldap.LDAPError as e: + log.fatal('Failed to find AES plugin: ' + e.message['desc']) + assert False + + # + # Check that the DES plugin was correctly updated + # + try: + entry = topology.master1.search_s(DES_PLUGIN, ldap.SCOPE_BASE, 'objectclass=*') + if not entry[0].hasValue('nsslapd-pluginPath', 'libpbe-plugin'): + log.fatal('The DES Plugin was not correctly updated') + assert False + else: + log.info('The DES plugin was correctly updated') + except ldap.LDAPError as e: + log.fatal('Failed to find AES plugin: ' + e.message['desc']) + assert False + + # + # Check replication one last time + # + try: + topology.master1.add_s(Entry((USER_DN, + {'objectclass': "top person".split(), + 'sn': 'sn', + 'cn': 'test_user'}))) + loop = 0 + ent = None + while loop <= 10: + try: + ent = topology.master2.getEntry(USER_DN, ldap.SCOPE_BASE, "(objectclass=*)") + break + except ldap.NO_SUCH_OBJECT: + time.sleep(1) + loop += 1 + if not ent: + log.fatal('Replication test failed!') + assert False + else: + log.info('Replication test passed') + except ldap.LDAPError as e: + log.fatal('Failed to add test user: ' + e.message['desc']) + assert False + + +def test_ticket47462_final(topology): + topology.master1.delete() + topology.master2.delete() + log.info('Testcase PASSED') + + +def run_isolated(): + ''' + run_isolated is used to run these test cases independently of a test scheduler (xunit, py.test..) + To run isolated without py.test, you need to + - edit this file and comment '@pytest.fixture' line before 'topology' function. + - set the installation prefix + - run this program + ''' + global installation1_prefix + global installation2_prefix + installation1_prefix = None + installation2_prefix = None + + topo = topology(True) + test_ticket47462(topo) + test_ticket47462_final(topo) + + +if __name__ == '__main__': + run_isolated() diff --git a/dirsrvtests/tests/tickets/ticket47490_test.py b/dirsrvtests/tests/tickets/ticket47490_test.py new file mode 100644 index 0000000..b61d443 --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket47490_test.py @@ -0,0 +1,691 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2015 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +''' +Created on Nov 7, 2013 + +@author: tbordaz +''' +import os +import sys +import ldap +import socket +import time +import logging +import pytest +import re +from lib389 import DirSrv, Entry, tools +from lib389.tools import DirSrvTools +from lib389._constants import * +from lib389.properties import * + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + +installation_prefix = None + +TEST_REPL_DN = "cn=test_repl, %s" % SUFFIX +ENTRY_DN = "cn=test_entry, %s" % SUFFIX +MUST_OLD = "(postalAddress $ preferredLocale)" +MUST_NEW = "(postalAddress $ preferredLocale $ telexNumber)" +MAY_OLD = "(postalCode $ street)" +MAY_NEW = "(postalCode $ street $ postOfficeBox)" + + +class TopologyMasterConsumer(object): + def __init__(self, master, consumer): + master.open() + self.master = master + + consumer.open() + self.consumer = consumer + + +def _header(topology, label): + topology.master.log.info("\n\n###############################################") + topology.master.log.info("#######") + topology.master.log.info("####### %s" % label) + topology.master.log.info("#######") + topology.master.log.info("###################################################") + + +def pattern_errorlog(file, log_pattern): + try: + pattern_errorlog.last_pos += 1 + except AttributeError: + pattern_errorlog.last_pos = 0 + + found = None + log.debug("_pattern_errorlog: start at offset %d" % pattern_errorlog.last_pos) + file.seek(pattern_errorlog.last_pos) + + # Use a while true iteration because 'for line in file: hit a + # python bug that break file.tell() + while True: + line = file.readline() + log.debug("_pattern_errorlog: [%d] %s" % (file.tell(), line)) + found = log_pattern.search(line) + if ((line == '') or (found)): + break + + log.debug("_pattern_errorlog: end at offset %d" % file.tell()) + pattern_errorlog.last_pos = file.tell() + return found + + +def _oc_definition(oid_ext, name, must=None, may=None): + oid = "1.2.3.4.5.6.7.8.9.10.%d" % oid_ext + desc = 'To test ticket 47490' + sup = 'person' + if not must: + must = MUST_OLD + if not may: + may = MAY_OLD + + new_oc = "( %s NAME '%s' DESC '%s' SUP %s AUXILIARY MUST %s MAY %s )" % (oid, name, desc, sup, must, may) + return new_oc + + +def add_OC(instance, oid_ext, name): + new_oc = _oc_definition(oid_ext, name) + instance.schema.add_schema('objectClasses', new_oc) + + +def mod_OC(instance, oid_ext, name, old_must=None, old_may=None, new_must=None, new_may=None): + old_oc = _oc_definition(oid_ext, name, old_must, old_may) + new_oc = _oc_definition(oid_ext, name, new_must, new_may) + instance.schema.del_schema('objectClasses', old_oc) + instance.schema.add_schema('objectClasses', new_oc) + + +def support_schema_learning(topology): + """ + with https://fedorahosted.org/389/ticket/47721, the supplier and consumer can learn + schema definitions when a replication occurs. + Before that ticket: replication of the schema fails requiring administrative operation + In the test the schemaCSN (master consumer) differs + + After that ticket: replication of the schema succeeds (after an initial phase of learning) + In the test the schema CSN (master consumer) are in sync + + This function returns True if 47721 is fixed in the current release + False else + """ + ent = topology.consumer.getEntry(DN_CONFIG, ldap.SCOPE_BASE, "(cn=config)", ['nsslapd-versionstring']) + if ent.hasAttr('nsslapd-versionstring'): + val = ent.getValue('nsslapd-versionstring') + version = val.split('/')[1].split('.') # something like ['1', '3', '1', '23', 'final_fix'] + major = int(version[0]) + minor = int(version[1]) + if major > 1: + return True + if minor > 3: + # version is 1.4 or after + return True + if minor == 3: + if version[2].isdigit(): + if int(version[2]) >= 3: + return True + return False + + +def trigger_update(topology): + """ + It triggers an update on the supplier. This will start a replication + session and a schema push + """ + try: + trigger_update.value += 1 + except AttributeError: + trigger_update.value = 1 + replace = [(ldap.MOD_REPLACE, 'telephonenumber', str(trigger_update.value))] + topology.master.modify_s(ENTRY_DN, replace) + + # wait 10 seconds that the update is replicated + loop = 0 + while loop <= 10: + try: + ent = topology.consumer.getEntry(ENTRY_DN, ldap.SCOPE_BASE, "(objectclass=*)", ['telephonenumber']) + val = ent.telephonenumber or "0" + if int(val) == trigger_update.value: + return + # the expected value is not yet replicated. try again + time.sleep(1) + loop += 1 + log.debug("trigger_update: receive %s (expected %d)" % (val, trigger_update.value)) + except ldap.NO_SUCH_OBJECT: + time.sleep(1) + loop += 1 + + +def trigger_schema_push(topology): + ''' + Trigger update to create a replication session. + In case of 47721 is fixed and the replica needs to learn the missing definition, then + the first replication session learn the definition and the second replication session + push the schema (and the schemaCSN. + This is why there is two updates and replica agreement is stopped/start (to create a second session) + ''' + agreements = topology.master.agreement.list(suffix=SUFFIX, consumer_host=topology.consumer.host, consumer_port=topology.consumer.port) + assert(len(agreements) == 1) + ra = agreements[0] + trigger_update(topology) + topology.master.agreement.pause(ra.dn) + topology.master.agreement.resume(ra.dn) + trigger_update(topology) + + +@pytest.fixture(scope="module") +def topology(request): + ''' + This fixture is used to create a replicated topology for the 'module'. + The replicated topology is MASTER -> Consumer. + ''' + global installation_prefix + + if installation_prefix: + args_instance[SER_DEPLOYED_DIR] = installation_prefix + + master = DirSrv(verbose=False) + consumer = DirSrv(verbose=False) + + # Args for the master instance + args_instance[SER_HOST] = HOST_MASTER_1 + args_instance[SER_PORT] = PORT_MASTER_1 + args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1 + args_master = args_instance.copy() + master.allocate(args_master) + + # Args for the consumer instance + args_instance[SER_HOST] = HOST_CONSUMER_1 + args_instance[SER_PORT] = PORT_CONSUMER_1 + args_instance[SER_SERVERID_PROP] = SERVERID_CONSUMER_1 + args_consumer = args_instance.copy() + consumer.allocate(args_consumer) + + # Get the status of the instance + instance_master = master.exists() + instance_consumer = consumer.exists() + + # Remove all the instances + if instance_master: + master.delete() + if instance_consumer: + consumer.delete() + + # Create the instances + master.create() + master.open() + consumer.create() + consumer.open() + + # + # Now prepare the Master-Consumer topology + # + # First Enable replication + master.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_1) + consumer.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_CONSUMER) + + # Initialize the supplier->consumer + properties = {RA_NAME: r'meTo_$host:$port', + RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], + RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], + RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], + RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} + repl_agreement = master.agreement.create(suffix=SUFFIX, host=consumer.host, port=consumer.port, properties=properties) + + if not repl_agreement: + log.fatal("Fail to create a replica agreement") + sys.exit(1) + + log.debug("%s created" % repl_agreement) + master.agreement.init(SUFFIX, HOST_CONSUMER_1, PORT_CONSUMER_1) + master.waitForReplInit(repl_agreement) + + # Check replication is working fine + if master.testReplication(DEFAULT_SUFFIX, consumer): + log.info('Replication is working.') + else: + log.fatal('Replication is not working.') + assert False + + # clear the tmp directory + master.clearTmpDir(__file__) + + # + # Here we have two instances master and consumer + # with replication working. + return TopologyMasterConsumer(master, consumer) + + +def test_ticket47490_init(topology): + """ + Initialize the test environment + """ + log.debug("test_ticket47490_init topology %r (master %r, consumer %r" % (topology, topology.master, topology.consumer)) + # the test case will check if a warning message is logged in the + # error log of the supplier + topology.master.errorlog_file = open(topology.master.errlog, "r") + + # This entry will be used to trigger attempt of schema push + topology.master.add_s(Entry((ENTRY_DN, { + 'objectclass': "top person".split(), + 'sn': 'test_entry', + 'cn': 'test_entry'}))) + + +def test_ticket47490_one(topology): + """ + Summary: Extra OC Schema is pushed - no error + + If supplier schema is a superset (one extra OC) of consumer schema, then + schema is pushed and there is no message in the error log + State at startup: + - supplier default schema + - consumer default schema + Final state + - supplier +masterNewOCA + - consumer +masterNewOCA + + """ + _header(topology, "Extra OC Schema is pushed - no error") + + log.debug("test_ticket47490_one topology %r (master %r, consumer %r" % (topology, topology.master, topology.consumer)) + # update the schema of the supplier so that it is a superset of + # consumer. Schema should be pushed + add_OC(topology.master, 2, 'masterNewOCA') + + trigger_schema_push(topology) + master_schema_csn = topology.master.schema.get_schema_csn() + consumer_schema_csn = topology.consumer.schema.get_schema_csn() + + # Check the schemaCSN was updated on the consumer + log.debug("test_ticket47490_one master_schema_csn=%s", master_schema_csn) + log.debug("ctest_ticket47490_one onsumer_schema_csn=%s", consumer_schema_csn) + assert master_schema_csn == consumer_schema_csn + + # Check the error log of the supplier does not contain an error + regex = re.compile("must not be overwritten \(set replication log for additional info\)") + res = pattern_errorlog(topology.master.errorlog_file, regex) + if res is not None: + assert False + + +def test_ticket47490_two(topology): + """ + Summary: Extra OC Schema is pushed - (ticket 47721 allows to learn missing def) + + If consumer schema is a superset (one extra OC) of supplier schema, then + schema is pushed and there is a message in the error log + State at startup + - supplier +masterNewOCA + - consumer +masterNewOCA + Final state + - supplier +masterNewOCA +masterNewOCB + - consumer +masterNewOCA +consumerNewOCA + + """ + + _header(topology, "Extra OC Schema is pushed - (ticket 47721 allows to learn missing def)") + + # add this OC on consumer. Supplier will no push the schema + add_OC(topology.consumer, 1, 'consumerNewOCA') + + # add a new OC on the supplier so that its nsSchemaCSN is larger than the consumer (wait 2s) + time.sleep(2) + add_OC(topology.master, 3, 'masterNewOCB') + + # now push the scheam + trigger_schema_push(topology) + master_schema_csn = topology.master.schema.get_schema_csn() + consumer_schema_csn = topology.consumer.schema.get_schema_csn() + + # Check the schemaCSN was NOT updated on the consumer + # with 47721, supplier learns the missing definition + log.debug("test_ticket47490_two master_schema_csn=%s", master_schema_csn) + log.debug("test_ticket47490_two consumer_schema_csn=%s", consumer_schema_csn) + if support_schema_learning(topology): + assert master_schema_csn == consumer_schema_csn + else: + assert master_schema_csn != consumer_schema_csn + + # Check the error log of the supplier does not contain an error + # This message may happen during the learning phase + regex = re.compile("must not be overwritten \(set replication log for additional info\)") + res = pattern_errorlog(topology.master.errorlog_file, regex) + + +def test_ticket47490_three(topology): + """ + Summary: Extra OC Schema is pushed - no error + + If supplier schema is again a superset (one extra OC), then + schema is pushed and there is no message in the error log + State at startup + - supplier +masterNewOCA +masterNewOCB + - consumer +masterNewOCA +consumerNewOCA + Final state + - supplier +masterNewOCA +masterNewOCB +consumerNewOCA + - consumer +masterNewOCA +masterNewOCB +consumerNewOCA + + """ + _header(topology, "Extra OC Schema is pushed - no error") + + # Do an upate to trigger the schema push attempt + # add this OC on consumer. Supplier will no push the schema + add_OC(topology.master, 1, 'consumerNewOCA') + + # now push the scheam + trigger_schema_push(topology) + master_schema_csn = topology.master.schema.get_schema_csn() + consumer_schema_csn = topology.consumer.schema.get_schema_csn() + + # Check the schemaCSN was NOT updated on the consumer + log.debug("test_ticket47490_three master_schema_csn=%s", master_schema_csn) + log.debug("test_ticket47490_three consumer_schema_csn=%s", consumer_schema_csn) + assert master_schema_csn == consumer_schema_csn + + # Check the error log of the supplier does not contain an error + regex = re.compile("must not be overwritten \(set replication log for additional info\)") + res = pattern_errorlog(topology.master.errorlog_file, regex) + if res is not None: + assert False + + +def test_ticket47490_four(topology): + """ + Summary: Same OC - extra MUST: Schema is pushed - no error + + If supplier schema is again a superset (OC with more MUST), then + schema is pushed and there is no message in the error log + State at startup + - supplier +masterNewOCA +masterNewOCB +consumerNewOCA + - consumer +masterNewOCA +masterNewOCB +consumerNewOCA + Final state + - supplier +masterNewOCA +masterNewOCB +consumerNewOCA + +must=telexnumber + - consumer +masterNewOCA +masterNewOCB +consumerNewOCA + +must=telexnumber + + """ + _header(topology, "Same OC - extra MUST: Schema is pushed - no error") + + mod_OC(topology.master, 2, 'masterNewOCA', old_must=MUST_OLD, new_must=MUST_NEW, old_may=MAY_OLD, new_may=MAY_OLD) + + trigger_schema_push(topology) + master_schema_csn = topology.master.schema.get_schema_csn() + consumer_schema_csn = topology.consumer.schema.get_schema_csn() + + # Check the schemaCSN was updated on the consumer + log.debug("test_ticket47490_four master_schema_csn=%s", master_schema_csn) + log.debug("ctest_ticket47490_four onsumer_schema_csn=%s", consumer_schema_csn) + assert master_schema_csn == consumer_schema_csn + + # Check the error log of the supplier does not contain an error + regex = re.compile("must not be overwritten \(set replication log for additional info\)") + res = pattern_errorlog(topology.master.errorlog_file, regex) + if res is not None: + assert False + + +def test_ticket47490_five(topology): + """ + Summary: Same OC - extra MUST: Schema is pushed - (fix for 47721) + + If consumer schema is a superset (OC with more MUST), then + schema is pushed (fix for 47721) and there is a message in the error log + State at startup + - supplier +masterNewOCA +masterNewOCB +consumerNewOCA + +must=telexnumber + - consumer +masterNewOCA +masterNewOCB +consumerNewOCA + +must=telexnumber + Final state + - supplier +masterNewOCA +masterNewOCB +consumerNewOCA +masterNewOCC + +must=telexnumber + - consumer +masterNewOCA +masterNewOCB +consumerNewOCA + +must=telexnumber +must=telexnumber + + Note: replication log is enabled to get more details + """ + _header(topology, "Same OC - extra MUST: Schema is pushed - (fix for 47721)") + + # get more detail why it fails + topology.master.enableReplLogging() + + # add telenumber to 'consumerNewOCA' on the consumer + mod_OC(topology.consumer, 1, 'consumerNewOCA', old_must=MUST_OLD, new_must=MUST_NEW, old_may=MAY_OLD, new_may=MAY_OLD) + # add a new OC on the supplier so that its nsSchemaCSN is larger than the consumer (wait 2s) + time.sleep(2) + add_OC(topology.master, 4, 'masterNewOCC') + + trigger_schema_push(topology) + master_schema_csn = topology.master.schema.get_schema_csn() + consumer_schema_csn = topology.consumer.schema.get_schema_csn() + + # Check the schemaCSN was NOT updated on the consumer + # with 47721, supplier learns the missing definition + log.debug("test_ticket47490_five master_schema_csn=%s", master_schema_csn) + log.debug("ctest_ticket47490_five onsumer_schema_csn=%s", consumer_schema_csn) + if support_schema_learning(topology): + assert master_schema_csn == consumer_schema_csn + else: + assert master_schema_csn != consumer_schema_csn + + # Check the error log of the supplier does not contain an error + # This message may happen during the learning phase + regex = re.compile("must not be overwritten \(set replication log for additional info\)") + res = pattern_errorlog(topology.master.errorlog_file, regex) + + +def test_ticket47490_six(topology): + """ + Summary: Same OC - extra MUST: Schema is pushed - no error + + If supplier schema is again a superset (OC with more MUST), then + schema is pushed and there is no message in the error log + State at startup + - supplier +masterNewOCA +masterNewOCB +consumerNewOCA +masterNewOCC + +must=telexnumber + - consumer +masterNewOCA +masterNewOCB +consumerNewOCA + +must=telexnumber +must=telexnumber + Final state + + - supplier +masterNewOCA +masterNewOCB +consumerNewOCA +masterNewOCC + +must=telexnumber +must=telexnumber + - consumer +masterNewOCA +masterNewOCB +consumerNewOCA +masterNewOCC + +must=telexnumber +must=telexnumber + + Note: replication log is enabled to get more details + """ + _header(topology, "Same OC - extra MUST: Schema is pushed - no error") + + # add telenumber to 'consumerNewOCA' on the consumer + mod_OC(topology.master, 1, 'consumerNewOCA', old_must=MUST_OLD, new_must=MUST_NEW, old_may=MAY_OLD, new_may=MAY_OLD) + + trigger_schema_push(topology) + master_schema_csn = topology.master.schema.get_schema_csn() + consumer_schema_csn = topology.consumer.schema.get_schema_csn() + + # Check the schemaCSN was NOT updated on the consumer + log.debug("test_ticket47490_six master_schema_csn=%s", master_schema_csn) + log.debug("ctest_ticket47490_six onsumer_schema_csn=%s", consumer_schema_csn) + assert master_schema_csn == consumer_schema_csn + + # Check the error log of the supplier does not contain an error + # This message may happen during the learning phase + regex = re.compile("must not be overwritten \(set replication log for additional info\)") + res = pattern_errorlog(topology.master.errorlog_file, regex) + if res is not None: + assert False + + +def test_ticket47490_seven(topology): + """ + Summary: Same OC - extra MAY: Schema is pushed - no error + + If supplier schema is again a superset (OC with more MAY), then + schema is pushed and there is no message in the error log + State at startup + - supplier +masterNewOCA +masterNewOCB +consumerNewOCA +masterNewOCC + +must=telexnumber +must=telexnumber + - consumer +masterNewOCA +masterNewOCB +consumerNewOCA +masterNewOCC + +must=telexnumber +must=telexnumber + Final stat + - supplier +masterNewOCA +masterNewOCB +consumerNewOCA +masterNewOCC + +must=telexnumber +must=telexnumber + +may=postOfficeBox + - consumer +masterNewOCA +masterNewOCB +consumerNewOCA +masterNewOCC + +must=telexnumber +must=telexnumber + +may=postOfficeBox + """ + _header(topology, "Same OC - extra MAY: Schema is pushed - no error") + + mod_OC(topology.master, 2, 'masterNewOCA', old_must=MUST_NEW, new_must=MUST_NEW, old_may=MAY_OLD, new_may=MAY_NEW) + + trigger_schema_push(topology) + master_schema_csn = topology.master.schema.get_schema_csn() + consumer_schema_csn = topology.consumer.schema.get_schema_csn() + + # Check the schemaCSN was updated on the consumer + log.debug("test_ticket47490_seven master_schema_csn=%s", master_schema_csn) + log.debug("ctest_ticket47490_seven consumer_schema_csn=%s", consumer_schema_csn) + assert master_schema_csn == consumer_schema_csn + + # Check the error log of the supplier does not contain an error + regex = re.compile("must not be overwritten \(set replication log for additional info\)") + res = pattern_errorlog(topology.master.errorlog_file, regex) + if res is not None: + assert False + + +def test_ticket47490_eight(topology): + """ + Summary: Same OC - extra MAY: Schema is pushed (fix for 47721) + + If consumer schema is a superset (OC with more MAY), then + schema is pushed (fix for 47721) and there is message in the error log + State at startup + - supplier +masterNewOCA +masterNewOCB +consumerNewOCA +masterNewOCC + +must=telexnumber +must=telexnumber + +may=postOfficeBox + - consumer +masterNewOCA +masterNewOCB +consumerNewOCA +masterNewOCC + +must=telexnumber +must=telexnumber + +may=postOfficeBox + Final state + - supplier +masterNewOCA +masterNewOCB +consumerNewOCA +masterNewOCC + +must=telexnumber +must=telexnumber + +may=postOfficeBox +may=postOfficeBox + - consumer +masterNewOCA +masterNewOCB +consumerNewOCA +masterNewOCC + +must=telexnumber +must=telexnumber + +may=postOfficeBox +may=postOfficeBox + """ + _header(topology, "Same OC - extra MAY: Schema is pushed (fix for 47721)") + + mod_OC(topology.consumer, 1, 'consumerNewOCA', old_must=MUST_NEW, new_must=MUST_NEW, old_may=MAY_OLD, new_may=MAY_NEW) + + # modify OC on the supplier so that its nsSchemaCSN is larger than the consumer (wait 2s) + time.sleep(2) + mod_OC(topology.master, 4, 'masterNewOCC', old_must=MUST_OLD, new_must=MUST_OLD, old_may=MAY_OLD, new_may=MAY_NEW) + + trigger_schema_push(topology) + master_schema_csn = topology.master.schema.get_schema_csn() + consumer_schema_csn = topology.consumer.schema.get_schema_csn() + + # Check the schemaCSN was not updated on the consumer + # with 47721, supplier learns the missing definition + log.debug("test_ticket47490_eight master_schema_csn=%s", master_schema_csn) + log.debug("ctest_ticket47490_eight onsumer_schema_csn=%s", consumer_schema_csn) + if support_schema_learning(topology): + assert master_schema_csn == consumer_schema_csn + else: + assert master_schema_csn != consumer_schema_csn + + # Check the error log of the supplier does not contain an error + # This message may happen during the learning phase + regex = re.compile("must not be overwritten \(set replication log for additional info\)") + res = pattern_errorlog(topology.master.errorlog_file, regex) + + +def test_ticket47490_nine(topology): + """ + Summary: Same OC - extra MAY: Schema is pushed - no error + + If consumer schema is a superset (OC with more MAY), then + schema is not pushed and there is message in the error log + State at startup + - supplier +masterNewOCA +masterNewOCB +consumerNewOCA +masterNewOCC + +must=telexnumber +must=telexnumber + +may=postOfficeBox +may=postOfficeBox + - consumer +masterNewOCA +masterNewOCB +consumerNewOCA +masterNewOCC + +must=telexnumber +must=telexnumber + +may=postOfficeBox +may=postOfficeBox + + Final state + + - supplier +masterNewOCA +masterNewOCB +consumerNewOCA +masterNewOCC + +must=telexnumber +must=telexnumber + +may=postOfficeBox +may=postOfficeBox +may=postOfficeBox + - consumer +masterNewOCA +masterNewOCB +consumerNewOCA +masterNewOCC + +must=telexnumber +must=telexnumber + +may=postOfficeBox +may=postOfficeBox +may=postOfficeBox + """ + _header(topology, "Same OC - extra MAY: Schema is pushed - no error") + + mod_OC(topology.master, 1, 'consumerNewOCA', old_must=MUST_NEW, new_must=MUST_NEW, old_may=MAY_OLD, new_may=MAY_NEW) + + trigger_schema_push(topology) + master_schema_csn = topology.master.schema.get_schema_csn() + consumer_schema_csn = topology.consumer.schema.get_schema_csn() + + # Check the schemaCSN was updated on the consumer + log.debug("test_ticket47490_nine master_schema_csn=%s", master_schema_csn) + log.debug("ctest_ticket47490_nine onsumer_schema_csn=%s", consumer_schema_csn) + assert master_schema_csn == consumer_schema_csn + + # Check the error log of the supplier does not contain an error + regex = re.compile("must not be overwritten \(set replication log for additional info\)") + res = pattern_errorlog(topology.master.errorlog_file, regex) + if res is not None: + assert False + + +def test_ticket47490_final(topology): + topology.master.delete() + topology.consumer.delete() + log.info('Testcase PASSED') + + +def run_isolated(): + ''' + run_isolated is used to run these test cases independently of a test scheduler (xunit, py.test..) + To run isolated without py.test, you need to + - edit this file and comment '@pytest.fixture' line before 'topology' function. + - set the installation prefix + - run this program + ''' + global installation_prefix + installation_prefix = None + + topo = topology(True) + test_ticket47490_init(topo) + test_ticket47490_one(topo) + test_ticket47490_two(topo) + test_ticket47490_three(topo) + test_ticket47490_four(topo) + test_ticket47490_five(topo) + test_ticket47490_six(topo) + test_ticket47490_seven(topo) + test_ticket47490_eight(topo) + test_ticket47490_nine(topo) + + test_ticket47490_final(topo) + + +if __name__ == '__main__': + run_isolated() + diff --git a/dirsrvtests/tests/tickets/ticket47553_test.py b/dirsrvtests/tests/tickets/ticket47553_test.py new file mode 100644 index 0000000..84d462d --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket47553_test.py @@ -0,0 +1,166 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2015 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import os +import sys +import time +import ldap +import logging +import pytest +from lib389 import DirSrv, Entry, tools, tasks +from lib389.tools import DirSrvTools +from lib389._constants import * +from lib389.properties import * +from lib389.tasks import * +from lib389.utils import * + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + +installation1_prefix = None + +CONTAINER_1_OU = 'test_ou_1' +CONTAINER_2_OU = 'test_ou_2' +CONTAINER_1 = 'ou=%s,dc=example,dc=com' % CONTAINER_1_OU +CONTAINER_2 = 'ou=%s,dc=example,dc=com' % CONTAINER_2_OU +USER_CN = 'test_user' +USER_PWD = 'Secret123' +USER = 'cn=%s,%s' % (USER_CN, CONTAINER_1) + + +class TopologyStandalone(object): + def __init__(self, standalone): + standalone.open() + self.standalone = standalone + + +@pytest.fixture(scope="module") +def topology(request): + global installation1_prefix + if installation1_prefix: + args_instance[SER_DEPLOYED_DIR] = installation1_prefix + + # Creating standalone instance ... + standalone = DirSrv(verbose=False) + args_instance[SER_HOST] = HOST_STANDALONE + args_instance[SER_PORT] = PORT_STANDALONE + args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE + args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX + args_standalone = args_instance.copy() + standalone.allocate(args_standalone) + instance_standalone = standalone.exists() + if instance_standalone: + standalone.delete() + standalone.create() + standalone.open() + + # Delete each instance in the end + def fin(): + standalone.delete() + request.addfinalizer(fin) + + # Clear out the tmp dir + standalone.clearTmpDir(__file__) + + return TopologyStandalone(standalone) + + +@pytest.fixture(scope="module") +def env_setup(topology): + """Adds two containers, one user and two ACI rules""" + + try: + log.info("Add a container: %s" % CONTAINER_1) + topology.standalone.add_s(Entry((CONTAINER_1, + {'objectclass': 'top', + 'objectclass': 'organizationalunit', + 'ou': CONTAINER_1_OU, + }))) + + log.info("Add a container: %s" % CONTAINER_2) + topology.standalone.add_s(Entry((CONTAINER_2, + {'objectclass': 'top', + 'objectclass': 'organizationalunit', + 'ou': CONTAINER_2_OU, + }))) + + log.info("Add a user: %s" % USER) + topology.standalone.add_s(Entry((USER, + {'objectclass': 'top person'.split(), + 'cn': USER_CN, + 'sn': USER_CN, + 'userpassword': USER_PWD + }))) + except ldap.LDAPError as e: + log.error('Failed to add object to database: %s' % e.message['desc']) + assert False + + ACI_TARGET = '(targetattr="*")' + ACI_ALLOW = '(version 3.0; acl "All rights for %s"; allow (all) ' % USER + ACI_SUBJECT = 'userdn="ldap:///%s";)' % USER + ACI_BODY = ACI_TARGET + ACI_ALLOW + ACI_SUBJECT + mod = [(ldap.MOD_ADD, 'aci', ACI_BODY)] + + try: + log.info("Add an ACI 'allow (all)' by %s to the %s" % (USER, + CONTAINER_1)) + topology.standalone.modify_s(CONTAINER_1, mod) + + log.info("Add an ACI 'allow (all)' by %s to the %s" % (USER, + CONTAINER_2)) + topology.standalone.modify_s(CONTAINER_2, mod) + except ldap.LDAPError as e: + log.fatal('Failed to add ACI: error (%s)' % (e.message['desc'])) + assert False + + +def test_ticket47553(topology, env_setup): + """Tests, that MODRDN operation is allowed, + if user has ACI right '(all)' under superior entries, + but doesn't have '(modrdn)' + """ + + log.info("Bind as %s" % USER) + try: + topology.standalone.simple_bind_s(USER, USER_PWD) + except ldap.LDAPError as e: + log.error('Bind failed for %s, error %s' % (USER, e.message['desc'])) + assert False + + log.info("User MODRDN operation from %s to %s" % (CONTAINER_1, + CONTAINER_2)) + try: + topology.standalone.rename_s(USER, "cn=%s" % USER_CN, + newsuperior=CONTAINER_2, delold=1) + except ldap.LDAPError as e: + log.error('MODRDN failed for %s, error %s' % (USER, e.message['desc'])) + assert False + + try: + log.info("Check there is no user in %s" % CONTAINER_1) + entries = topology.standalone.search_s(CONTAINER_1, + ldap.SCOPE_ONELEVEL, + 'cn=%s' % USER_CN) + assert not entries + + log.info("Check there is our user in %s" % CONTAINER_2) + entries = topology.standalone.search_s(CONTAINER_2, + ldap.SCOPE_ONELEVEL, + 'cn=%s' % USER_CN) + assert entries + except ldap.LDAPError as e: + log.fatal('Search failed, error: ' + e.message['desc']) + assert False + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + # -v for additional verbose + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s -v %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket47560_test.py b/dirsrvtests/tests/tickets/ticket47560_test.py new file mode 100644 index 0000000..da86217 --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket47560_test.py @@ -0,0 +1,253 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2015 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import os +import sys +import time +import ldap +import logging +import pytest +from lib389 import DirSrv, Entry, tools +from lib389.tools import DirSrvTools +from lib389._constants import * +from lib389.properties import * + +log = logging.getLogger(__name__) + +installation_prefix = None + + +class TopologyStandalone(object): + def __init__(self, standalone): + standalone.open() + self.standalone = standalone + + +@pytest.fixture(scope="module") +def topology(request): + ''' + This fixture is used to standalone topology for the 'module'. + ''' + global installation_prefix + + if installation_prefix: + args_instance[SER_DEPLOYED_DIR] = installation_prefix + + standalone = DirSrv(verbose=False) + + # Args for the standalone instance + args_instance[SER_HOST] = HOST_STANDALONE + args_instance[SER_PORT] = PORT_STANDALONE + args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE + args_standalone = args_instance.copy() + standalone.allocate(args_standalone) + + # Get the status of the instance + instance_standalone = standalone.exists() + + # Remove the instance + if instance_standalone: + standalone.delete() + + # Create the instance + standalone.create() + + # Used to retrieve configuration information (dbdir, confdir...) + standalone.open() + + # clear the tmp directory + standalone.clearTmpDir(__file__) + + # Here we have standalone instance up and running + return TopologyStandalone(standalone) + + +def test_ticket47560(topology): + """ + This test case does the following: + SETUP + - Create entry cn=group,SUFFIX + - Create entry cn=member,SUFFIX + - Update 'cn=member,SUFFIX' to add "memberOf: cn=group,SUFFIX" + - Enable Memberof Plugins + + # Here the cn=member entry has a 'memberOf' but + # cn=group entry does not contain 'cn=member' in its member + + TEST CASE + - start the fixupmemberof task + - read the cn=member entry + - check 'memberOf is now empty + + TEARDOWN + - Delete entry cn=group,SUFFIX + - Delete entry cn=member,SUFFIX + - Disable Memberof Plugins + """ + + def _enable_disable_mbo(value): + """ + Enable or disable mbo plugin depending on 'value' ('on'/'off') + """ + # enable/disable the mbo plugin + if value == 'on': + topology.standalone.plugins.enable(name=PLUGIN_MEMBER_OF) + else: + topology.standalone.plugins.disable(name=PLUGIN_MEMBER_OF) + + log.debug("-------------> _enable_disable_mbo(%s)" % value) + + topology.standalone.stop(timeout=120) + time.sleep(1) + topology.standalone.start(timeout=120) + time.sleep(3) + + # need to reopen a connection toward the instance + topology.standalone.open() + + def _test_ticket47560_setup(): + """ + - Create entry cn=group,SUFFIX + - Create entry cn=member,SUFFIX + - Update 'cn=member,SUFFIX' to add "memberOf: cn=group,SUFFIX" + - Enable Memberof Plugins + """ + log.debug("-------- > _test_ticket47560_setup\n") + + # + # By default the memberof plugin is disabled create + # - create a group entry + # - create a member entry + # - set the member entry as memberof the group entry + # + entry = Entry(group_DN) + entry.setValues('objectclass', 'top', 'groupOfNames', 'inetUser') + entry.setValues('cn', 'group') + try: + topology.standalone.add_s(entry) + except ldap.ALREADY_EXISTS: + log.debug("Entry %s already exists" % (group_DN)) + + entry = Entry(member_DN) + entry.setValues('objectclass', 'top', 'person', 'organizationalPerson', 'inetorgperson', 'inetUser') + entry.setValues('uid', 'member') + entry.setValues('cn', 'member') + entry.setValues('sn', 'member') + try: + topology.standalone.add_s(entry) + except ldap.ALREADY_EXISTS: + log.debug("Entry %s already exists" % (member_DN)) + + replace = [(ldap.MOD_REPLACE, 'memberof', group_DN)] + topology.standalone.modify_s(member_DN, replace) + + # + # enable the memberof plugin and restart the instance + # + _enable_disable_mbo('on') + + # + # check memberof attribute is still present + # + filt = 'uid=member' + ents = topology.standalone.search_s(member_DN, ldap.SCOPE_BASE, filt) + assert len(ents) == 1 + ent = ents[0] + #print ent + value = ent.getValue('memberof') + #print "memberof: %s" % (value) + assert value == group_DN + + def _test_ticket47560_teardown(): + """ + - Delete entry cn=group,SUFFIX + - Delete entry cn=member,SUFFIX + - Disable Memberof Plugins + """ + log.debug("-------- > _test_ticket47560_teardown\n") + # remove the entries group_DN and member_DN + try: + topology.standalone.delete_s(group_DN) + except: + log.warning("Entry %s fail to delete" % (group_DN)) + try: + topology.standalone.delete_s(member_DN) + except: + log.warning("Entry %s fail to delete" % (member_DN)) + # + # disable the memberof plugin and restart the instance + # + _enable_disable_mbo('off') + + group_DN = "cn=group,%s" % (SUFFIX) + member_DN = "uid=member,%s" % (SUFFIX) + + # + # Initialize the test case + # + _test_ticket47560_setup() + + # + # start the test + # - start the fixup task + # - check the entry is fixed (no longer memberof the group) + # + log.debug("-------- > Start ticket tests\n") + + filt = 'uid=member' + ents = topology.standalone.search_s(member_DN, ldap.SCOPE_BASE, filt) + assert len(ents) == 1 + ent = ents[0] + log.debug("Unfixed entry %r\n" % ent) + + # run the fixup task + topology.standalone.tasks.fixupMemberOf(suffix=SUFFIX, args={TASK_WAIT: True}) + + ents = topology.standalone.search_s(member_DN, ldap.SCOPE_BASE, filt) + assert len(ents) == 1 + ent = ents[0] + log.debug("Fixed entry %r\n" % ent) + + if ent.getValue('memberof') == group_DN: + log.warning("Error the fixupMemberOf did not fix %s" % (member_DN)) + result_successful = False + else: + result_successful = True + + # + # cleanup up the test case + # + _test_ticket47560_teardown() + + assert result_successful is True + + +def test_ticket47560_final(topology): + topology.standalone.delete() + log.info('Testcase PASSED') + + +def run_isolated(): + ''' + run_isolated is used to run these test cases independently of a test scheduler (xunit, py.test..) + To run isolated without py.test, you need to + - edit this file and comment '@pytest.fixture' line before 'topology' function. + - set the installation prefix + - run this program + ''' + global installation_prefix + installation_prefix = None + + topo = topology(True) + test_ticket47560(topo) + test_ticket47560_final(topo) + + +if __name__ == '__main__': + run_isolated() + diff --git a/dirsrvtests/tests/tickets/ticket47573_test.py b/dirsrvtests/tests/tickets/ticket47573_test.py new file mode 100644 index 0000000..8edf113 --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket47573_test.py @@ -0,0 +1,347 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2015 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +''' +Created on Nov 7, 2013 + +@author: tbordaz +''' +import os +import sys +import time +import ldap +import logging +import pytest +import re +from lib389 import DirSrv, Entry, tools +from lib389.tools import DirSrvTools +from lib389._constants import * +from lib389.properties import * + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + +installation_prefix = None + +TEST_REPL_DN = "cn=test_repl, %s" % SUFFIX +ENTRY_DN = "cn=test_entry, %s" % SUFFIX + +MUST_OLD = "(postalAddress $ preferredLocale $ telexNumber)" +MAY_OLD = "(postalCode $ street)" + +MUST_NEW = "(postalAddress $ preferredLocale)" +MAY_NEW = "(telexNumber $ postalCode $ street)" + + +class TopologyMasterConsumer(object): + def __init__(self, master, consumer): + master.open() + self.master = master + + consumer.open() + self.consumer = consumer + + +def pattern_errorlog(file, log_pattern): + try: + pattern_errorlog.last_pos += 1 + except AttributeError: + pattern_errorlog.last_pos = 0 + + found = None + log.debug("_pattern_errorlog: start at offset %d" % pattern_errorlog.last_pos) + file.seek(pattern_errorlog.last_pos) + + # Use a while true iteration because 'for line in file: hit a + # python bug that break file.tell() + while True: + line = file.readline() + log.debug("_pattern_errorlog: [%d] %s" % (file.tell(), line)) + found = log_pattern.search(line) + if ((line == '') or (found)): + break + + log.debug("_pattern_errorlog: end at offset %d" % file.tell()) + pattern_errorlog.last_pos = file.tell() + return found + + +def _oc_definition(oid_ext, name, must=None, may=None): + oid = "1.2.3.4.5.6.7.8.9.10.%d" % oid_ext + desc = 'To test ticket 47573' + sup = 'person' + if not must: + must = MUST_OLD + if not may: + may = MAY_OLD + + new_oc = "( %s NAME '%s' DESC '%s' SUP %s AUXILIARY MUST %s MAY %s )" % (oid, name, desc, sup, must, may) + return new_oc + + +def add_OC(instance, oid_ext, name): + new_oc = _oc_definition(oid_ext, name) + instance.schema.add_schema('objectClasses', new_oc) + + +def mod_OC(instance, oid_ext, name, old_must=None, old_may=None, new_must=None, new_may=None): + old_oc = _oc_definition(oid_ext, name, old_must, old_may) + new_oc = _oc_definition(oid_ext, name, new_must, new_may) + instance.schema.del_schema('objectClasses', old_oc) + instance.schema.add_schema('objectClasses', new_oc) + + +def trigger_schema_push(topology): + """ + It triggers an update on the supplier. This will start a replication + session and a schema push + """ + try: + trigger_schema_push.value += 1 + except AttributeError: + trigger_schema_push.value = 1 + replace = [(ldap.MOD_REPLACE, 'telephonenumber', str(trigger_schema_push.value))] + topology.master.modify_s(ENTRY_DN, replace) + + # wait 10 seconds that the update is replicated + loop = 0 + while loop <= 10: + try: + ent = topology.consumer.getEntry(ENTRY_DN, ldap.SCOPE_BASE, "(objectclass=*)", ['telephonenumber']) + val = ent.telephonenumber or "0" + if int(val) == trigger_schema_push.value: + return + # the expected value is not yet replicated. try again + time.sleep(1) + loop += 1 + log.debug("trigger_schema_push: receive %s (expected %d)" % (val, trigger_schema_push.value)) + except ldap.NO_SUCH_OBJECT: + time.sleep(1) + loop += 1 + + +@pytest.fixture(scope="module") +def topology(request): + ''' + This fixture is used to create a replicated topology for the 'module'. + The replicated topology is MASTER -> Consumer. + ''' + global installation_prefix + + if installation_prefix: + args_instance[SER_DEPLOYED_DIR] = installation_prefix + + master = DirSrv(verbose=False) + consumer = DirSrv(verbose=False) + + # Args for the master instance + args_instance[SER_HOST] = HOST_MASTER_1 + args_instance[SER_PORT] = PORT_MASTER_1 + args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1 + args_master = args_instance.copy() + master.allocate(args_master) + + # Args for the consumer instance + args_instance[SER_HOST] = HOST_CONSUMER_1 + args_instance[SER_PORT] = PORT_CONSUMER_1 + args_instance[SER_SERVERID_PROP] = SERVERID_CONSUMER_1 + args_consumer = args_instance.copy() + consumer.allocate(args_consumer) + + # Get the status of the instance + instance_master = master.exists() + instance_consumer = consumer.exists() + + # Remove all the instances + if instance_master: + master.delete() + if instance_consumer: + consumer.delete() + + # Create the instances + master.create() + master.open() + consumer.create() + consumer.open() + + # + # Now prepare the Master-Consumer topology + # + # First Enable replication + master.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_1) + consumer.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_CONSUMER) + + # Initialize the supplier->consumer + + properties = {RA_NAME: r'meTo_$host:$port', + RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], + RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], + RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], + RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} + repl_agreement = master.agreement.create(suffix=SUFFIX, host=consumer.host, port=consumer.port, properties=properties) + + if not repl_agreement: + log.fatal("Fail to create a replica agreement") + sys.exit(1) + + log.debug("%s created" % repl_agreement) + master.agreement.init(SUFFIX, HOST_CONSUMER_1, PORT_CONSUMER_1) + master.waitForReplInit(repl_agreement) + + # Check replication is working fine + if master.testReplication(DEFAULT_SUFFIX, consumer): + log.info('Replication is working.') + else: + log.fatal('Replication is not working.') + assert False + + # clear the tmp directory + master.clearTmpDir(__file__) + + # Here we have two instances master and consumer + # with replication working. + return TopologyMasterConsumer(master, consumer) + + +def test_ticket47573_init(topology): + """ + Initialize the test environment + """ + log.debug("test_ticket47573_init topology %r (master %r, consumer %r" % + (topology, topology.master, topology.consumer)) + # the test case will check if a warning message is logged in the + # error log of the supplier + topology.master.errorlog_file = open(topology.master.errlog, "r") + + # This entry will be used to trigger attempt of schema push + topology.master.add_s(Entry((ENTRY_DN, { + 'objectclass': "top person".split(), + 'sn': 'test_entry', + 'cn': 'test_entry'}))) + + +def test_ticket47573_one(topology): + """ + Summary: Add a custom OC with MUST and MAY + MUST = postalAddress $ preferredLocale + MAY = telexNumber $ postalCode $ street + + Final state + - supplier +OCwithMayAttr + - consumer +OCwithMayAttr + + """ + log.debug("test_ticket47573_one topology %r (master %r, consumer %r" % (topology, topology.master, topology.consumer)) + # update the schema of the supplier so that it is a superset of + # consumer. Schema should be pushed + new_oc = _oc_definition(2, 'OCwithMayAttr', + must = MUST_OLD, + may = MAY_OLD) + topology.master.schema.add_schema('objectClasses', new_oc) + + trigger_schema_push(topology) + master_schema_csn = topology.master.schema.get_schema_csn() + consumer_schema_csn = topology.consumer.schema.get_schema_csn() + + # Check the schemaCSN was updated on the consumer + log.debug("test_ticket47573_one master_schema_csn=%s", master_schema_csn) + log.debug("ctest_ticket47573_one onsumer_schema_csn=%s", consumer_schema_csn) + assert master_schema_csn == consumer_schema_csn + + # Check the error log of the supplier does not contain an error + regex = re.compile("must not be overwritten \(set replication log for additional info\)") + res = pattern_errorlog(topology.master.errorlog_file, regex) + assert res is None + + +def test_ticket47573_two(topology): + """ + Summary: Change OCwithMayAttr to move a MAY attribute to a MUST attribute + + + Final state + - supplier OCwithMayAttr updated + - consumer OCwithMayAttr updated + + """ + + # Update the objectclass so that a MAY attribute is moved to MUST attribute + mod_OC(topology.master, 2, 'OCwithMayAttr', old_must=MUST_OLD, new_must=MUST_NEW, old_may=MAY_OLD, new_may=MAY_NEW) + + # now push the scheam + trigger_schema_push(topology) + master_schema_csn = topology.master.schema.get_schema_csn() + consumer_schema_csn = topology.consumer.schema.get_schema_csn() + + # Check the schemaCSN was NOT updated on the consumer + log.debug("test_ticket47573_two master_schema_csn=%s", master_schema_csn) + log.debug("test_ticket47573_two consumer_schema_csn=%s", consumer_schema_csn) + assert master_schema_csn == consumer_schema_csn + + # Check the error log of the supplier does not contain an error + regex = re.compile("must not be overwritten \(set replication log for additional info\)") + res = pattern_errorlog(topology.master.errorlog_file, regex) + assert res is None + + +def test_ticket47573_three(topology): + ''' + Create a entry with OCwithMayAttr OC + ''' + # Check replication is working fine + dn = "cn=ticket47573, %s" % SUFFIX + topology.master.add_s(Entry((dn, + {'objectclass': "top person OCwithMayAttr".split(), + 'sn': 'test_repl', + 'cn': 'test_repl', + 'postalAddress': 'here', + 'preferredLocale': 'en', + 'telexNumber': '12$us$21', + 'postalCode': '54321'}))) + loop = 0 + ent = None + while loop <= 10: + try: + ent = topology.consumer.getEntry(dn, ldap.SCOPE_BASE, "(objectclass=*)") + break + except ldap.NO_SUCH_OBJECT: + time.sleep(1) + loop += 1 + if ent is None: + assert False + + +def test_ticket47573_final(topology): + topology.master.delete() + topology.consumer.delete() + log.info('Testcase PASSED') + + +def run_isolated(): + ''' + run_isolated is used to run these test cases independently of a test scheduler (xunit, py.test..) + To run isolated without py.test, you need to + - edit this file and comment '@pytest.fixture' line before 'topology' function. + - set the installation prefix + - run this program + ''' + global installation_prefix + installation_prefix = None + + topo = topology(True) + test_ticket47573_init(topo) + test_ticket47573_one(topo) + test_ticket47573_two(topo) + test_ticket47573_three(topo) + + test_ticket47573_final(topo) + + +if __name__ == '__main__': + run_isolated() + diff --git a/dirsrvtests/tests/tickets/ticket47619_test.py b/dirsrvtests/tests/tickets/ticket47619_test.py new file mode 100644 index 0000000..0b9961e --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket47619_test.py @@ -0,0 +1,220 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2015 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +''' +Created on Nov 7, 2013 + +@author: tbordaz +''' +import os +import sys +import time +import ldap +import logging +import pytest +from lib389 import DirSrv, Entry, tools +from lib389.tools import DirSrvTools +from lib389._constants import * +from lib389.properties import * + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + +installation_prefix = None + +TEST_REPL_DN = "cn=test_repl, %s" % SUFFIX +ENTRY_DN = "cn=test_entry, %s" % SUFFIX + +OTHER_NAME = 'other_entry' +MAX_OTHERS = 100 + +ATTRIBUTES = ['street', 'countryName', 'description', 'postalAddress', 'postalCode', 'title', 'l', 'roomNumber'] + + +class TopologyMasterConsumer(object): + def __init__(self, master, consumer): + master.open() + self.master = master + + consumer.open() + self.consumer = consumer + + def __repr__(self): + return "Master[%s] -> Consumer[%s" % (self.master, self.consumer) + + +@pytest.fixture(scope="module") +def topology(request): + ''' + This fixture is used to create a replicated topology for the 'module'. + The replicated topology is MASTER -> Consumer. + ''' + global installation_prefix + + if installation_prefix: + args_instance[SER_DEPLOYED_DIR] = installation_prefix + + master = DirSrv(verbose=False) + consumer = DirSrv(verbose=False) + + # Args for the master instance + args_instance[SER_HOST] = HOST_MASTER_1 + args_instance[SER_PORT] = PORT_MASTER_1 + args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1 + args_master = args_instance.copy() + master.allocate(args_master) + + # Args for the consumer instance + args_instance[SER_HOST] = HOST_CONSUMER_1 + args_instance[SER_PORT] = PORT_CONSUMER_1 + args_instance[SER_SERVERID_PROP] = SERVERID_CONSUMER_1 + args_consumer = args_instance.copy() + consumer.allocate(args_consumer) + + # Get the status of the instance + instance_master = master.exists() + instance_consumer = consumer.exists() + + # Remove all the instances + if instance_master: + master.delete() + if instance_consumer: + consumer.delete() + + # Create the instances + master.create() + master.open() + consumer.create() + consumer.open() + + # + # Now prepare the Master-Consumer topology + # + # First Enable replication + master.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_1) + consumer.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_CONSUMER) + + # Initialize the supplier->consumer + properties = {RA_NAME: r'meTo_$host:$port', + RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], + RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], + RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], + RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} + repl_agreement = master.agreement.create(suffix=SUFFIX, host=consumer.host, port=consumer.port, properties=properties) + + if not repl_agreement: + log.fatal("Fail to create a replica agreement") + sys.exit(1) + + log.debug("%s created" % repl_agreement) + master.agreement.init(SUFFIX, HOST_CONSUMER_1, PORT_CONSUMER_1) + master.waitForReplInit(repl_agreement) + + # Check replication is working fine + if master.testReplication(DEFAULT_SUFFIX, consumer): + log.info('Replication is working.') + else: + log.fatal('Replication is not working.') + assert False + + # clear the tmp directory + master.clearTmpDir(__file__) + + # Here we have two instances master and consumer + # with replication working. + return TopologyMasterConsumer(master, consumer) + + +def test_ticket47619_init(topology): + """ + Initialize the test environment + """ + topology.master.plugins.enable(name=PLUGIN_RETRO_CHANGELOG) + #topology.master.plugins.enable(name=PLUGIN_MEMBER_OF) + #topology.master.plugins.enable(name=PLUGIN_REFER_INTEGRITY) + topology.master.stop(timeout=10) + topology.master.start(timeout=10) + + topology.master.log.info("test_ticket47619_init topology %r" % (topology)) + # the test case will check if a warning message is logged in the + # error log of the supplier + topology.master.errorlog_file = open(topology.master.errlog, "r") + + # add dummy entries + for cpt in range(MAX_OTHERS): + name = "%s%d" % (OTHER_NAME, cpt) + topology.master.add_s(Entry(("cn=%s,%s" % (name, SUFFIX), { + 'objectclass': "top person".split(), + 'sn': name, + 'cn': name}))) + + topology.master.log.info("test_ticket47619_init: %d entries ADDed %s[0..%d]" % (MAX_OTHERS, OTHER_NAME, MAX_OTHERS-1)) + + # Check the number of entries in the retro changelog + time.sleep(2) + ents = topology.master.search_s(RETROCL_SUFFIX, ldap.SCOPE_ONELEVEL, "(objectclass=*)") + assert len(ents) == MAX_OTHERS + + +def test_ticket47619_create_index(topology): + args = {INDEX_TYPE: 'eq'} + for attr in ATTRIBUTES: + topology.master.index.create(suffix=RETROCL_SUFFIX, attr=attr, args=args) + + +def test_ticket47619_reindex(topology): + ''' + Reindex all the attributes in ATTRIBUTES + ''' + args = {TASK_WAIT: True} + for attr in ATTRIBUTES: + rc = topology.master.tasks.reindex(suffix=RETROCL_SUFFIX, attrname=attr, args=args) + assert rc == 0 + + +def test_ticket47619_check_indexed_search(topology): + for attr in ATTRIBUTES: + ents = topology.master.search_s(RETROCL_SUFFIX, ldap.SCOPE_SUBTREE, "(%s=hello)" % attr) + assert len(ents) == 0 + + +def test_ticket47619_final(topology): + topology.master.delete() + topology.consumer.delete() + log.info('Testcase PASSED') + + +def run_isolated(): + ''' + run_isolated is used to run these test cases independently of a test scheduler (xunit, py.test..) + To run isolated without py.test, you need to + - edit this file and comment '@pytest.fixture' line before 'topology' function. + - set the installation prefix + - run this program + ''' + global installation_prefix + installation_prefix = None + + topo = topology(True) + test_ticket47619_init(topo) + + test_ticket47619_create_index(topo) + + # important restart that trigger the hang + # at restart, finding the new 'changelog' backend, the backend is acquired in Read + # preventing the reindex task to complete + topo.master.restart(timeout=10) + test_ticket47619_reindex(topo) + test_ticket47619_check_indexed_search(topo) + + test_ticket47619_final(topo) + + +if __name__ == '__main__': + run_isolated() + diff --git a/dirsrvtests/tests/tickets/ticket47640_test.py b/dirsrvtests/tests/tickets/ticket47640_test.py new file mode 100644 index 0000000..cd450ab --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket47640_test.py @@ -0,0 +1,130 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2015 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import os +import sys +import time +import ldap +import logging +import pytest +from lib389 import DirSrv, Entry, tools, tasks +from lib389.tools import DirSrvTools +from lib389._constants import * +from lib389.properties import * +from lib389.tasks import * +from lib389.utils import * + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + +installation1_prefix = None + + +class TopologyStandalone(object): + def __init__(self, standalone): + standalone.open() + self.standalone = standalone + + +@pytest.fixture(scope="module") +def topology(request): + global installation1_prefix + if installation1_prefix: + args_instance[SER_DEPLOYED_DIR] = installation1_prefix + + # Creating standalone instance ... + standalone = DirSrv(verbose=False) + args_instance[SER_HOST] = HOST_STANDALONE + args_instance[SER_PORT] = PORT_STANDALONE + args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE + args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX + args_standalone = args_instance.copy() + standalone.allocate(args_standalone) + instance_standalone = standalone.exists() + if instance_standalone: + standalone.delete() + standalone.create() + standalone.open() + + # Clear out the tmp dir + standalone.clearTmpDir(__file__) + + return TopologyStandalone(standalone) + + +def test_ticket47640(topology): + ''' + Linked Attrs Plugins - verify that if the plugin fails to update the link entry + that the entire operation is aborted + ''' + + # Enable Dynamic plugins, and the linked Attrs plugin + try: + topology.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-dynamic-plugins', 'on')]) + except ldap.LDAPError as e: + ldap.fatal('Failed to enable dynamic plugin!' + e.message['desc']) + assert False + + try: + topology.standalone.plugins.enable(name=PLUGIN_LINKED_ATTRS) + except ValueError as e: + ldap.fatal('Failed to enable linked attributes plugin!' + e.message['desc']) + assert False + + # Add the plugin config entry + try: + topology.standalone.add_s(Entry(('cn=manager link,cn=Linked Attributes,cn=plugins,cn=config', { + 'objectclass': 'top extensibleObject'.split(), + 'cn': 'Manager Link', + 'linkType': 'seeAlso', + 'managedType': 'seeAlso' + }))) + except ldap.LDAPError as e: + log.fatal('Failed to add linked attr config entry: error ' + e.message['desc']) + assert False + + # Add an entry who has a link to an entry that does not exist + OP_REJECTED = False + try: + topology.standalone.add_s(Entry(('uid=manager,' + DEFAULT_SUFFIX, { + 'objectclass': 'top extensibleObject'.split(), + 'uid': 'manager', + 'seeAlso': 'uid=user,dc=example,dc=com' + }))) + except ldap.UNWILLING_TO_PERFORM: + # Success + log.info('Add operation correctly rejected.') + OP_REJECTED = True + except ldap.LDAPError as e: + log.fatal('Add operation incorrectly rejected: error %s - ' + + 'expected "unwilling to perform"' % e.message['desc']) + assert False + if not OP_REJECTED: + log.fatal('Add operation incorrectly allowed') + assert False + + log.info('Test complete') + + +def test_ticket47640_final(topology): + topology.standalone.delete() + log.info('Testcase PASSED') + + +def run_isolated(): + global installation1_prefix + installation1_prefix = None + + topo = topology(True) + test_ticket47640(topo) + test_ticket47640_final(topo) + + +if __name__ == '__main__': + run_isolated() + diff --git a/dirsrvtests/tests/tickets/ticket47653MMR_test.py b/dirsrvtests/tests/tickets/ticket47653MMR_test.py new file mode 100644 index 0000000..f951e55 --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket47653MMR_test.py @@ -0,0 +1,473 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2015 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +''' +Created on Nov 7, 2013 + +@author: tbordaz +''' +import os +import sys +import time +import ldap +import logging +import pytest +from lib389 import DirSrv, Entry, tools +from lib389.tools import DirSrvTools +from lib389._constants import * +from lib389.properties import * + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + +# +# important part. We can deploy Master1 and Master2 on different versions +# +installation1_prefix = None +installation2_prefix = None + +TEST_REPL_DN = "cn=test_repl, %s" % SUFFIX +OC_NAME = 'OCticket47653' +MUST = "(postalAddress $ postalCode)" +MAY = "(member $ street)" + +OTHER_NAME = 'other_entry' +MAX_OTHERS = 10 + +BIND_NAME = 'bind_entry' +BIND_DN = 'cn=%s, %s' % (BIND_NAME, SUFFIX) +BIND_PW = 'password' + +ENTRY_NAME = 'test_entry' +ENTRY_DN = 'cn=%s, %s' % (ENTRY_NAME, SUFFIX) +ENTRY_OC = "top person %s" % OC_NAME + + +def _oc_definition(oid_ext, name, must=None, may=None): + oid = "1.2.3.4.5.6.7.8.9.10.%d" % oid_ext + desc = 'To test ticket 47490' + sup = 'person' + if not must: + must = MUST + if not may: + may = MAY + + new_oc = "( %s NAME '%s' DESC '%s' SUP %s AUXILIARY MUST %s MAY %s )" % (oid, name, desc, sup, must, may) + return new_oc + + +class TopologyMaster1Master2(object): + def __init__(self, master1, master2): + master1.open() + self.master1 = master1 + + master2.open() + self.master2 = master2 + + +@pytest.fixture(scope="module") +def topology(request): + ''' + This fixture is used to create a replicated topology for the 'module'. + The replicated topology is MASTER1 <-> Master2. + ''' + global installation1_prefix + global installation2_prefix + + # allocate master1 on a given deployement + master1 = DirSrv(verbose=False) + if installation1_prefix: + args_instance[SER_DEPLOYED_DIR] = installation1_prefix + + # Args for the master1 instance + args_instance[SER_HOST] = HOST_MASTER_1 + args_instance[SER_PORT] = PORT_MASTER_1 + args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1 + args_master = args_instance.copy() + master1.allocate(args_master) + + # allocate master1 on a given deployement + master2 = DirSrv(verbose=False) + if installation2_prefix: + args_instance[SER_DEPLOYED_DIR] = installation2_prefix + + # Args for the consumer instance + args_instance[SER_HOST] = HOST_MASTER_2 + args_instance[SER_PORT] = PORT_MASTER_2 + args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_2 + args_master = args_instance.copy() + master2.allocate(args_master) + + # Get the status of the instance and restart it if it exists + instance_master1 = master1.exists() + instance_master2 = master2.exists() + + # Remove all the instances + if instance_master1: + master1.delete() + if instance_master2: + master2.delete() + + # Create the instances + master1.create() + master1.open() + master2.create() + master2.open() + + # + # Now prepare the Master-Consumer topology + # + # First Enable replication + master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_1) + master2.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_2) + + # Initialize the supplier->consumer + + properties = {RA_NAME: r'meTo_$host:$port', + RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], + RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], + RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], + RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} + repl_agreement = master1.agreement.create(suffix=SUFFIX, host=master2.host, port=master2.port, properties=properties) + + if not repl_agreement: + log.fatal("Fail to create a replica agreement") + sys.exit(1) + + log.debug("%s created" % repl_agreement) + + properties = {RA_NAME: r'meTo_$host:$port', + RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], + RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], + RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], + RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} + master2.agreement.create(suffix=SUFFIX, host=master1.host, port=master1.port, properties=properties) + + master1.agreement.init(SUFFIX, HOST_MASTER_2, PORT_MASTER_2) + master1.waitForReplInit(repl_agreement) + + # Check replication is working fine + if master1.testReplication(DEFAULT_SUFFIX, master2): + log.info('Replication is working.') + else: + log.fatal('Replication is not working.') + assert False + + # clear the tmp directory + master1.clearTmpDir(__file__) + + # Here we have two instances master and consumer + # with replication working. + return TopologyMaster1Master2(master1, master2) + + +def test_ticket47653_init(topology): + """ + It adds + - Objectclass with MAY 'member' + - an entry ('bind_entry') with which we bind to test the 'SELFDN' operation + It deletes the anonymous aci + + """ + + topology.master1.log.info("Add %s that allows 'member' attribute" % OC_NAME) + new_oc = _oc_definition(2, OC_NAME, must=MUST, may=MAY) + topology.master1.schema.add_schema('objectClasses', new_oc) + + # entry used to bind with + topology.master1.log.info("Add %s" % BIND_DN) + topology.master1.add_s(Entry((BIND_DN, { + 'objectclass': "top person".split(), + 'sn': BIND_NAME, + 'cn': BIND_NAME, + 'userpassword': BIND_PW}))) + + # enable acl error logging + mod = [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', str(128 + 8192))] # ACL + REPL + topology.master1.modify_s(DN_CONFIG, mod) + topology.master2.modify_s(DN_CONFIG, mod) + + # get read of anonymous ACI for use 'read-search' aci in SEARCH test + ACI_ANONYMOUS = "(targetattr!=\"userPassword\")(version 3.0; acl \"Enable anonymous access\"; allow (read, search, compare) userdn=\"ldap:///anyone\";)" + mod = [(ldap.MOD_DELETE, 'aci', ACI_ANONYMOUS)] + topology.master1.modify_s(SUFFIX, mod) + topology.master2.modify_s(SUFFIX, mod) + + # add dummy entries + for cpt in range(MAX_OTHERS): + name = "%s%d" % (OTHER_NAME, cpt) + topology.master1.add_s(Entry(("cn=%s,%s" % (name, SUFFIX), { + 'objectclass': "top person".split(), + 'sn': name, + 'cn': name}))) + + +def test_ticket47653_add(topology): + ''' + This test ADD an entry on MASTER1 where 47653 is fixed. Then it checks that entry is replicated + on MASTER2 (even if on MASTER2 47653 is NOT fixed). Then update on MASTER2 and check the update on MASTER1 + + It checks that, bound as bind_entry, + - we can not ADD an entry without the proper SELFDN aci. + - with the proper ACI we can not ADD with 'member' attribute + - with the proper ACI and 'member' it succeeds to ADD + ''' + topology.master1.log.info("\n\n######################### ADD ######################\n") + + # bind as bind_entry + topology.master1.log.info("Bind as %s" % BIND_DN) + topology.master1.simple_bind_s(BIND_DN, BIND_PW) + + # Prepare the entry with multivalued members + entry_with_members = Entry(ENTRY_DN) + entry_with_members.setValues('objectclass', 'top', 'person', 'OCticket47653') + entry_with_members.setValues('sn', ENTRY_NAME) + entry_with_members.setValues('cn', ENTRY_NAME) + entry_with_members.setValues('postalAddress', 'here') + entry_with_members.setValues('postalCode', '1234') + members = [] + for cpt in range(MAX_OTHERS): + name = "%s%d" % (OTHER_NAME, cpt) + members.append("cn=%s,%s" % (name, SUFFIX)) + members.append(BIND_DN) + entry_with_members.setValues('member', members) + + # Prepare the entry with only one member value + entry_with_member = Entry(ENTRY_DN) + entry_with_member.setValues('objectclass', 'top', 'person', 'OCticket47653') + entry_with_member.setValues('sn', ENTRY_NAME) + entry_with_member.setValues('cn', ENTRY_NAME) + entry_with_member.setValues('postalAddress', 'here') + entry_with_member.setValues('postalCode', '1234') + member = [] + member.append(BIND_DN) + entry_with_member.setValues('member', member) + + # entry to add WITH member being BIND_DN but WITHOUT the ACI -> ldap.INSUFFICIENT_ACCESS + try: + topology.master1.log.info("Try to add Add %s (aci is missing): %r" % (ENTRY_DN, entry_with_member)) + + topology.master1.add_s(entry_with_member) + except Exception as e: + topology.master1.log.info("Exception (expected): %s" % type(e).__name__) + assert isinstance(e, ldap.INSUFFICIENT_ACCESS) + + # Ok Now add the proper ACI + topology.master1.log.info("Bind as %s and add the ADD SELFDN aci" % DN_DM) + topology.master1.simple_bind_s(DN_DM, PASSWORD) + + ACI_TARGET = "(target = \"ldap:///cn=*,%s\")" % SUFFIX + ACI_TARGETFILTER = "(targetfilter =\"(objectClass=%s)\")" % OC_NAME + ACI_ALLOW = "(version 3.0; acl \"SelfDN add\"; allow (add)" + ACI_SUBJECT = " userattr = \"member#selfDN\";)" + ACI_BODY = ACI_TARGET + ACI_TARGETFILTER + ACI_ALLOW + ACI_SUBJECT + mod = [(ldap.MOD_ADD, 'aci', ACI_BODY)] + topology.master1.modify_s(SUFFIX, mod) + time.sleep(1) + + # bind as bind_entry + topology.master1.log.info("Bind as %s" % BIND_DN) + topology.master1.simple_bind_s(BIND_DN, BIND_PW) + + # entry to add WITHOUT member and WITH the ACI -> ldap.INSUFFICIENT_ACCESS + try: + topology.master1.log.info("Try to add Add %s (member is missing)" % ENTRY_DN) + topology.master1.add_s(Entry((ENTRY_DN, { + 'objectclass': ENTRY_OC.split(), + 'sn': ENTRY_NAME, + 'cn': ENTRY_NAME, + 'postalAddress': 'here', + 'postalCode': '1234'}))) + except Exception as e: + topology.master1.log.info("Exception (expected): %s" % type(e).__name__) + assert isinstance(e, ldap.INSUFFICIENT_ACCESS) + + # entry to add WITH memberS and WITH the ACI -> ldap.INSUFFICIENT_ACCESS + # member should contain only one value + try: + topology.master1.log.info("Try to add Add %s (with several member values)" % ENTRY_DN) + topology.master1.add_s(entry_with_members) + except Exception as e: + topology.master1.log.info("Exception (expected): %s" % type(e).__name__) + assert isinstance(e, ldap.INSUFFICIENT_ACCESS) + + topology.master1.log.info("Try to add Add %s should be successful" % ENTRY_DN) + try: + topology.master1.add_s(entry_with_member) + except ldap.LDAPError as e: + topology.master1.log.info("Failed to add entry, error: " + e.message['desc']) + assert False + + # + # Now check the entry as been replicated + # + topology.master2.simple_bind_s(DN_DM, PASSWORD) + topology.master1.log.info("Try to retrieve %s from Master2" % ENTRY_DN) + loop = 0 + while loop <= 10: + try: + ent = topology.master2.getEntry(ENTRY_DN, ldap.SCOPE_BASE, "(objectclass=*)") + break + except ldap.NO_SUCH_OBJECT: + time.sleep(1) + loop += 1 + assert loop <= 10 + + # Now update the entry on Master2 (as DM because 47653 is possibly not fixed on M2) + topology.master1.log.info("Update %s on M2" % ENTRY_DN) + mod = [(ldap.MOD_REPLACE, 'description', 'test_add')] + topology.master2.modify_s(ENTRY_DN, mod) + + topology.master1.simple_bind_s(DN_DM, PASSWORD) + loop = 0 + while loop <= 10: + try: + ent = topology.master1.getEntry(ENTRY_DN, ldap.SCOPE_BASE, "(objectclass=*)") + if ent.hasAttr('description') and (ent.getValue('description') == 'test_add'): + break + except ldap.NO_SUCH_OBJECT: + time.sleep(1) + loop += 1 + + assert ent.getValue('description') == 'test_add' + + +def test_ticket47653_modify(topology): + ''' + This test MOD an entry on MASTER1 where 47653 is fixed. Then it checks that update is replicated + on MASTER2 (even if on MASTER2 47653 is NOT fixed). Then update on MASTER2 (bound as BIND_DN). + This update may fail whether or not 47653 is fixed on MASTER2 + + It checks that, bound as bind_entry, + - we can not modify an entry without the proper SELFDN aci. + - adding the ACI, we can modify the entry + ''' + # bind as bind_entry + topology.master1.log.info("Bind as %s" % BIND_DN) + topology.master1.simple_bind_s(BIND_DN, BIND_PW) + + topology.master1.log.info("\n\n######################### MODIFY ######################\n") + + # entry to modify WITH member being BIND_DN but WITHOUT the ACI -> ldap.INSUFFICIENT_ACCESS + try: + topology.master1.log.info("Try to modify %s (aci is missing)" % ENTRY_DN) + mod = [(ldap.MOD_REPLACE, 'postalCode', '9876')] + topology.master1.modify_s(ENTRY_DN, mod) + except Exception as e: + topology.master1.log.info("Exception (expected): %s" % type(e).__name__) + assert isinstance(e, ldap.INSUFFICIENT_ACCESS) + + # Ok Now add the proper ACI + topology.master1.log.info("Bind as %s and add the WRITE SELFDN aci" % DN_DM) + topology.master1.simple_bind_s(DN_DM, PASSWORD) + + ACI_TARGET = "(target = \"ldap:///cn=*,%s\")" % SUFFIX + ACI_TARGETATTR = "(targetattr = *)" + ACI_TARGETFILTER = "(targetfilter =\"(objectClass=%s)\")" % OC_NAME + ACI_ALLOW = "(version 3.0; acl \"SelfDN write\"; allow (write)" + ACI_SUBJECT = " userattr = \"member#selfDN\";)" + ACI_BODY = ACI_TARGET + ACI_TARGETATTR + ACI_TARGETFILTER + ACI_ALLOW + ACI_SUBJECT + mod = [(ldap.MOD_ADD, 'aci', ACI_BODY)] + topology.master1.modify_s(SUFFIX, mod) + time.sleep(1) + + # bind as bind_entry + topology.master1.log.info("M1: Bind as %s" % BIND_DN) + topology.master1.simple_bind_s(BIND_DN, BIND_PW) + + # modify the entry and checks the value + topology.master1.log.info("M1: Try to modify %s. It should succeeds" % ENTRY_DN) + mod = [(ldap.MOD_REPLACE, 'postalCode', '1928')] + topology.master1.modify_s(ENTRY_DN, mod) + + topology.master1.log.info("M1: Bind as %s" % DN_DM) + topology.master1.simple_bind_s(DN_DM, PASSWORD) + + topology.master1.log.info("M1: Check the update of %s" % ENTRY_DN) + ents = topology.master1.search_s(ENTRY_DN, ldap.SCOPE_BASE, 'objectclass=*') + assert len(ents) == 1 + assert ents[0].postalCode == '1928' + + # Now check the update has been replicated on M2 + topology.master1.log.info("M2: Bind as %s" % DN_DM) + topology.master2.simple_bind_s(DN_DM, PASSWORD) + topology.master1.log.info("M2: Try to retrieve %s" % ENTRY_DN) + loop = 0 + while loop <= 10: + try: + ent = topology.master2.getEntry(ENTRY_DN, ldap.SCOPE_BASE, "(objectclass=*)") + if ent.hasAttr('postalCode') and (ent.getValue('postalCode') == '1928'): + break + except ldap.NO_SUCH_OBJECT: + time.sleep(1) + loop += 1 + assert loop <= 10 + assert ent.getValue('postalCode') == '1928' + + # Now update the entry on Master2 bound as BIND_DN (update may fail if 47653 is not fixed on M2) + topology.master1.log.info("M2: Update %s (bound as %s)" % (ENTRY_DN, BIND_DN)) + topology.master2.simple_bind_s(BIND_DN, PASSWORD) + fail = False + try: + mod = [(ldap.MOD_REPLACE, 'postalCode', '1929')] + topology.master2.modify_s(ENTRY_DN, mod) + fail = False + except ldap.INSUFFICIENT_ACCESS: + topology.master1.log.info("M2: Exception (INSUFFICIENT_ACCESS): that is fine the bug is possibly not fixed on M2") + fail = True + except Exception as e: + topology.master1.log.info("M2: Exception (not expected): %s" % type(e).__name__) + assert 0 + + if not fail: + # Check the update has been replicaed on M1 + topology.master1.log.info("M1: Bind as %s" % DN_DM) + topology.master1.simple_bind_s(DN_DM, PASSWORD) + topology.master1.log.info("M1: Check %s.postalCode=1929)" % (ENTRY_DN)) + loop = 0 + while loop <= 10: + try: + ent = topology.master1.getEntry(ENTRY_DN, ldap.SCOPE_BASE, "(objectclass=*)") + if ent.hasAttr('postalCode') and (ent.getValue('postalCode') == '1929'): + break + except ldap.NO_SUCH_OBJECT: + time.sleep(1) + loop += 1 + assert ent.getValue('postalCode') == '1929' + + +def test_ticket47653_final(topology): + topology.master1.delete() + topology.master2.delete() + log.info('Testcase PASSED') + + +def run_isolated(): + ''' + run_isolated is used to run these test cases independently of a test scheduler (xunit, py.test..) + To run isolated without py.test, you need to + - edit this file and comment '@pytest.fixture' line before 'topology' function. + - set the installation prefix + - run this program + ''' + global installation1_prefix + global installation2_prefix + installation1_prefix = None + installation2_prefix = None + + topo = topology(True) + test_ticket47653_init(topo) + + test_ticket47653_add(topo) + test_ticket47653_modify(topo) + + test_ticket47653_final(topo) + + +if __name__ == '__main__': + run_isolated() diff --git a/dirsrvtests/tests/tickets/ticket47653_test.py b/dirsrvtests/tests/tickets/ticket47653_test.py new file mode 100644 index 0000000..1901b84 --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket47653_test.py @@ -0,0 +1,381 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2015 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import os +import sys +import time +import ldap +import logging +import pytest +from lib389 import DirSrv, Entry, tools +from lib389.tools import DirSrvTools +from lib389._constants import * +from lib389.properties import * + +log = logging.getLogger(__name__) + +installation_prefix = None + +OC_NAME = 'OCticket47653' +MUST = "(postalAddress $ postalCode)" +MAY = "(member $ street)" + +OTHER_NAME = 'other_entry' +MAX_OTHERS = 10 + +BIND_NAME = 'bind_entry' +BIND_DN = 'cn=%s, %s' % (BIND_NAME, SUFFIX) +BIND_PW = 'password' + +ENTRY_NAME = 'test_entry' +ENTRY_DN = 'cn=%s, %s' % (ENTRY_NAME, SUFFIX) +ENTRY_OC = "top person %s" % OC_NAME + + +def _oc_definition(oid_ext, name, must=None, may=None): + oid = "1.2.3.4.5.6.7.8.9.10.%d" % oid_ext + desc = 'To test ticket 47490' + sup = 'person' + if not must: + must = MUST + if not may: + may = MAY + + new_oc = "( %s NAME '%s' DESC '%s' SUP %s AUXILIARY MUST %s MAY %s )" % (oid, name, desc, sup, must, may) + return new_oc + + +class TopologyStandalone(object): + def __init__(self, standalone): + standalone.open() + self.standalone = standalone + + +@pytest.fixture(scope="module") +def topology(request): + global installation_prefix + + if installation_prefix: + args_instance[SER_DEPLOYED_DIR] = installation_prefix + + standalone = DirSrv(verbose=False) + + # Args for the standalone instance + args_instance[SER_HOST] = HOST_STANDALONE + args_instance[SER_PORT] = PORT_STANDALONE + args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE + args_standalone = args_instance.copy() + standalone.allocate(args_standalone) + + # Get the status of the instance and restart it if it exists + instance_standalone = standalone.exists() + + # Remove the instance + if instance_standalone: + standalone.delete() + + # Create the instance + standalone.create() + + # Used to retrieve configuration information (dbdir, confdir...) + standalone.open() + + # clear the tmp directory + standalone.clearTmpDir(__file__) + + # Here we have standalone instance up and running + return TopologyStandalone(standalone) + + +def test_ticket47653_init(topology): + """ + It adds + - Objectclass with MAY 'member' + - an entry ('bind_entry') with which we bind to test the 'SELFDN' operation + It deletes the anonymous aci + + """ + + topology.standalone.log.info("Add %s that allows 'member' attribute" % OC_NAME) + new_oc = _oc_definition(2, OC_NAME, must=MUST, may=MAY) + topology.standalone.schema.add_schema('objectClasses', new_oc) + + # entry used to bind with + topology.standalone.log.info("Add %s" % BIND_DN) + topology.standalone.add_s(Entry((BIND_DN, { + 'objectclass': "top person".split(), + 'sn': BIND_NAME, + 'cn': BIND_NAME, + 'userpassword': BIND_PW}))) + + # enable acl error logging + mod = [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', '128')] + topology.standalone.modify_s(DN_CONFIG, mod) + + # get read of anonymous ACI for use 'read-search' aci in SEARCH test + ACI_ANONYMOUS = "(targetattr!=\"userPassword\")(version 3.0; acl \"Enable anonymous access\"; allow (read, search, compare) userdn=\"ldap:///anyone\";)" + mod = [(ldap.MOD_DELETE, 'aci', ACI_ANONYMOUS)] + topology.standalone.modify_s(SUFFIX, mod) + + # add dummy entries + for cpt in range(MAX_OTHERS): + name = "%s%d" % (OTHER_NAME, cpt) + topology.standalone.add_s(Entry(("cn=%s,%s" % (name, SUFFIX), { + 'objectclass': "top person".split(), + 'sn': name, + 'cn': name}))) + + +def test_ticket47653_add(topology): + ''' + It checks that, bound as bind_entry, + - we can not ADD an entry without the proper SELFDN aci. + - with the proper ACI we can not ADD with 'member' attribute + - with the proper ACI and 'member' it succeeds to ADD + ''' + topology.standalone.log.info("\n\n######################### ADD ######################\n") + + # bind as bind_entry + topology.standalone.log.info("Bind as %s" % BIND_DN) + topology.standalone.simple_bind_s(BIND_DN, BIND_PW) + + # Prepare the entry with multivalued members + entry_with_members = Entry(ENTRY_DN) + entry_with_members.setValues('objectclass', 'top', 'person', 'OCticket47653') + entry_with_members.setValues('sn', ENTRY_NAME) + entry_with_members.setValues('cn', ENTRY_NAME) + entry_with_members.setValues('postalAddress', 'here') + entry_with_members.setValues('postalCode', '1234') + members = [] + for cpt in range(MAX_OTHERS): + name = "%s%d" % (OTHER_NAME, cpt) + members.append("cn=%s,%s" % (name, SUFFIX)) + members.append(BIND_DN) + entry_with_members.setValues('member', members) + + # Prepare the entry with one member + entry_with_member = Entry(ENTRY_DN) + entry_with_member.setValues('objectclass', 'top', 'person', 'OCticket47653') + entry_with_member.setValues('sn', ENTRY_NAME) + entry_with_member.setValues('cn', ENTRY_NAME) + entry_with_member.setValues('postalAddress', 'here') + entry_with_member.setValues('postalCode', '1234') + member = [] + member.append(BIND_DN) + entry_with_member.setValues('member', member) + + # entry to add WITH member being BIND_DN but WITHOUT the ACI -> ldap.INSUFFICIENT_ACCESS + try: + topology.standalone.log.info("Try to add Add %s (aci is missing): %r" % (ENTRY_DN, entry_with_member)) + + topology.standalone.add_s(entry_with_member) + except Exception as e: + topology.standalone.log.info("Exception (expected): %s" % type(e).__name__) + assert isinstance(e, ldap.INSUFFICIENT_ACCESS) + + # Ok Now add the proper ACI + topology.standalone.log.info("Bind as %s and add the ADD SELFDN aci" % DN_DM) + topology.standalone.simple_bind_s(DN_DM, PASSWORD) + + ACI_TARGET = "(target = \"ldap:///cn=*,%s\")" % SUFFIX + ACI_TARGETFILTER = "(targetfilter =\"(objectClass=%s)\")" % OC_NAME + ACI_ALLOW = "(version 3.0; acl \"SelfDN add\"; allow (add)" + ACI_SUBJECT = " userattr = \"member#selfDN\";)" + ACI_BODY = ACI_TARGET + ACI_TARGETFILTER + ACI_ALLOW + ACI_SUBJECT + mod = [(ldap.MOD_ADD, 'aci', ACI_BODY)] + topology.standalone.modify_s(SUFFIX, mod) + + # bind as bind_entry + topology.standalone.log.info("Bind as %s" % BIND_DN) + topology.standalone.simple_bind_s(BIND_DN, BIND_PW) + + # entry to add WITHOUT member and WITH the ACI -> ldap.INSUFFICIENT_ACCESS + try: + topology.standalone.log.info("Try to add Add %s (member is missing)" % ENTRY_DN) + topology.standalone.add_s(Entry((ENTRY_DN, { + 'objectclass': ENTRY_OC.split(), + 'sn': ENTRY_NAME, + 'cn': ENTRY_NAME, + 'postalAddress': 'here', + 'postalCode': '1234'}))) + except Exception as e: + topology.standalone.log.info("Exception (expected): %s" % type(e).__name__) + assert isinstance(e, ldap.INSUFFICIENT_ACCESS) + + # entry to add WITH memberS and WITH the ACI -> ldap.INSUFFICIENT_ACCESS + # member should contain only one value + try: + topology.standalone.log.info("Try to add Add %s (with several member values)" % ENTRY_DN) + topology.standalone.add_s(entry_with_members) + except Exception as e: + topology.standalone.log.info("Exception (expected): %s" % type(e).__name__) + assert isinstance(e, ldap.INSUFFICIENT_ACCESS) + + topology.standalone.log.info("Try to add Add %s should be successful" % ENTRY_DN) + topology.standalone.add_s(entry_with_member) + + +def test_ticket47653_search(topology): + ''' + It checks that, bound as bind_entry, + - we can not search an entry without the proper SELFDN aci. + - adding the ACI, we can search the entry + ''' + topology.standalone.log.info("\n\n######################### SEARCH ######################\n") + # bind as bind_entry + topology.standalone.log.info("Bind as %s" % BIND_DN) + topology.standalone.simple_bind_s(BIND_DN, BIND_PW) + + # entry to search WITH member being BIND_DN but WITHOUT the ACI -> no entry returned + topology.standalone.log.info("Try to search %s (aci is missing)" % ENTRY_DN) + ents = topology.standalone.search_s(ENTRY_DN, ldap.SCOPE_BASE, 'objectclass=*') + assert len(ents) == 0 + + # Ok Now add the proper ACI + topology.standalone.log.info("Bind as %s and add the READ/SEARCH SELFDN aci" % DN_DM) + topology.standalone.simple_bind_s(DN_DM, PASSWORD) + + ACI_TARGET = "(target = \"ldap:///cn=*,%s\")" % SUFFIX + ACI_TARGETATTR = "(targetattr = *)" + ACI_TARGETFILTER = "(targetfilter =\"(objectClass=%s)\")" % OC_NAME + ACI_ALLOW = "(version 3.0; acl \"SelfDN search-read\"; allow (read, search, compare)" + ACI_SUBJECT = " userattr = \"member#selfDN\";)" + ACI_BODY = ACI_TARGET + ACI_TARGETATTR + ACI_TARGETFILTER + ACI_ALLOW + ACI_SUBJECT + mod = [(ldap.MOD_ADD, 'aci', ACI_BODY)] + topology.standalone.modify_s(SUFFIX, mod) + + # bind as bind_entry + topology.standalone.log.info("Bind as %s" % BIND_DN) + topology.standalone.simple_bind_s(BIND_DN, BIND_PW) + + # entry to search with the proper aci + topology.standalone.log.info("Try to search %s should be successful" % ENTRY_DN) + ents = topology.standalone.search_s(ENTRY_DN, ldap.SCOPE_BASE, 'objectclass=*') + assert len(ents) == 1 + + +def test_ticket47653_modify(topology): + ''' + It checks that, bound as bind_entry, + - we can not modify an entry without the proper SELFDN aci. + - adding the ACI, we can modify the entry + ''' + # bind as bind_entry + topology.standalone.log.info("Bind as %s" % BIND_DN) + topology.standalone.simple_bind_s(BIND_DN, BIND_PW) + + topology.standalone.log.info("\n\n######################### MODIFY ######################\n") + + # entry to modify WITH member being BIND_DN but WITHOUT the ACI -> ldap.INSUFFICIENT_ACCESS + try: + topology.standalone.log.info("Try to modify %s (aci is missing)" % ENTRY_DN) + mod = [(ldap.MOD_REPLACE, 'postalCode', '9876')] + topology.standalone.modify_s(ENTRY_DN, mod) + except Exception as e: + topology.standalone.log.info("Exception (expected): %s" % type(e).__name__) + assert isinstance(e, ldap.INSUFFICIENT_ACCESS) + + + # Ok Now add the proper ACI + topology.standalone.log.info("Bind as %s and add the WRITE SELFDN aci" % DN_DM) + topology.standalone.simple_bind_s(DN_DM, PASSWORD) + + ACI_TARGET = "(target = \"ldap:///cn=*,%s\")" % SUFFIX + ACI_TARGETATTR = "(targetattr = *)" + ACI_TARGETFILTER = "(targetfilter =\"(objectClass=%s)\")" % OC_NAME + ACI_ALLOW = "(version 3.0; acl \"SelfDN write\"; allow (write)" + ACI_SUBJECT = " userattr = \"member#selfDN\";)" + ACI_BODY = ACI_TARGET + ACI_TARGETATTR + ACI_TARGETFILTER + ACI_ALLOW + ACI_SUBJECT + mod = [(ldap.MOD_ADD, 'aci', ACI_BODY)] + topology.standalone.modify_s(SUFFIX, mod) + + # bind as bind_entry + topology.standalone.log.info("Bind as %s" % BIND_DN) + topology.standalone.simple_bind_s(BIND_DN, BIND_PW) + + # modify the entry and checks the value + topology.standalone.log.info("Try to modify %s. It should succeeds" % ENTRY_DN) + mod = [(ldap.MOD_REPLACE, 'postalCode', '1928')] + topology.standalone.modify_s(ENTRY_DN, mod) + + ents = topology.standalone.search_s(ENTRY_DN, ldap.SCOPE_BASE, 'objectclass=*') + assert len(ents) == 1 + assert ents[0].postalCode == '1928' + + +def test_ticket47653_delete(topology): + ''' + It checks that, bound as bind_entry, + - we can not delete an entry without the proper SELFDN aci. + - adding the ACI, we can delete the entry + ''' + topology.standalone.log.info("\n\n######################### DELETE ######################\n") + + # bind as bind_entry + topology.standalone.log.info("Bind as %s" % BIND_DN) + topology.standalone.simple_bind_s(BIND_DN, BIND_PW) + + # entry to delete WITH member being BIND_DN but WITHOUT the ACI -> ldap.INSUFFICIENT_ACCESS + try: + topology.standalone.log.info("Try to delete %s (aci is missing)" % ENTRY_DN) + topology.standalone.delete_s(ENTRY_DN) + except Exception as e: + topology.standalone.log.info("Exception (expected): %s" % type(e).__name__) + assert isinstance(e, ldap.INSUFFICIENT_ACCESS) + + # Ok Now add the proper ACI + topology.standalone.log.info("Bind as %s and add the READ/SEARCH SELFDN aci" % DN_DM) + topology.standalone.simple_bind_s(DN_DM, PASSWORD) + + ACI_TARGET = "(target = \"ldap:///cn=*,%s\")" % SUFFIX + ACI_TARGETFILTER = "(targetfilter =\"(objectClass=%s)\")" % OC_NAME + ACI_ALLOW = "(version 3.0; acl \"SelfDN delete\"; allow (delete)" + ACI_SUBJECT = " userattr = \"member#selfDN\";)" + ACI_BODY = ACI_TARGET + ACI_TARGETFILTER + ACI_ALLOW + ACI_SUBJECT + mod = [(ldap.MOD_ADD, 'aci', ACI_BODY)] + topology.standalone.modify_s(SUFFIX, mod) + + # bind as bind_entry + topology.standalone.log.info("Bind as %s" % BIND_DN) + topology.standalone.simple_bind_s(BIND_DN, BIND_PW) + + # entry to search with the proper aci + topology.standalone.log.info("Try to delete %s should be successful" % ENTRY_DN) + topology.standalone.delete_s(ENTRY_DN) + + +def test_ticket47653_final(topology): + topology.standalone.delete() + log.info('Testcase PASSED') + + +def run_isolated(): + ''' + run_isolated is used to run these test cases independently of a test scheduler (xunit, py.test..) + To run isolated without py.test, you need to + - edit this file and comment '@pytest.fixture' line before 'topology' function. + - set the installation prefix + - run this program + ''' + global installation_prefix + installation_prefix = None + + topo = topology(True) + test_ticket47653_init(topo) + + test_ticket47653_add(topo) + test_ticket47653_search(topo) + test_ticket47653_modify(topo) + test_ticket47653_delete(topo) + + test_ticket47653_final(topo) + + +if __name__ == '__main__': + run_isolated() + diff --git a/dirsrvtests/tests/tickets/ticket47664_test.py b/dirsrvtests/tests/tickets/ticket47664_test.py new file mode 100644 index 0000000..460828d --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket47664_test.py @@ -0,0 +1,225 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2015 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import os +import sys +import time +import ldap +import logging +import pytest +from lib389 import DirSrv, Entry, tools, tasks +from lib389.tools import DirSrvTools +from lib389._constants import * +from lib389.properties import * +from lib389.tasks import * +from ldap.controls import SimplePagedResultsControl +from ldap.controls.simple import GetEffectiveRightsControl + +log = logging.getLogger(__name__) + +installation_prefix = None + +MYSUFFIX = 'o=ticket47664.org' +MYSUFFIXBE = 'ticket47664' + +_MYLDIF = 'ticket47664.ldif' + +SEARCHFILTER = '(objectclass=*)' + + +class TopologyStandalone(object): + def __init__(self, standalone): + standalone.open() + self.standalone = standalone + + +@pytest.fixture(scope="module") +def topology(request): + ''' + This fixture is used to standalone topology for the 'module'. + ''' + global installation_prefix + + if installation_prefix: + args_instance[SER_DEPLOYED_DIR] = installation_prefix + + standalone = DirSrv(verbose=False) + + # Args for the standalone instance + args_instance[SER_HOST] = HOST_STANDALONE + args_instance[SER_PORT] = PORT_STANDALONE + args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE + args_standalone = args_instance.copy() + standalone.allocate(args_standalone) + + # Get the status of the instance and restart it if it exists + instance_standalone = standalone.exists() + + # Remove the instance + if instance_standalone: + standalone.delete() + + # Create the instance + standalone.create() + + # Used to retrieve configuration information (dbdir, confdir...) + standalone.open() + + # clear the tmp directory + standalone.clearTmpDir(__file__) + + # Here we have standalone instance up and running + return TopologyStandalone(standalone) + + +def test_ticket47664_run(topology): + """ + Import 20 entries + Search with Simple Paged Results Control (pagesize = 4) + Get Effective Rights Control (attrs list = ['cn']) + If Get Effective Rights attribute (attributeLevelRights for 'cn') is returned 4 attrs / page AND + the page count == 20/4, then the fix is verified. + """ + log.info('Testing Ticket 47664 - paged results control is not working in some cases when we have a subsuffix') + + # bind as directory manager + topology.standalone.log.info("Bind as %s" % DN_DM) + topology.standalone.simple_bind_s(DN_DM, PASSWORD) + + topology.standalone.log.info("\n\n######################### SETUP SUFFIX o=ticket47664.org ######################\n") + + topology.standalone.backend.create(MYSUFFIX, {BACKEND_NAME: MYSUFFIXBE}) + topology.standalone.mappingtree.create(MYSUFFIX, bename=MYSUFFIXBE) + + topology.standalone.log.info("\n\n######################### Generate Test data ######################\n") + + # get tmp dir + mytmp = topology.standalone.getDir(__file__, TMP_DIR) + if mytmp is None: + mytmp = "/tmp" + + MYLDIF = '%s%s' % (mytmp, _MYLDIF) + os.system('ls %s' % MYLDIF) + os.system('rm -f %s' % MYLDIF) + if hasattr(topology.standalone, 'prefix'): + prefix = topology.standalone.prefix + else: + prefix = None + dbgen_prog = prefix + '/bin/dbgen.pl' + topology.standalone.log.info('dbgen_prog: %s' % dbgen_prog) + os.system('%s -s %s -o %s -n 14' % (dbgen_prog, MYSUFFIX, MYLDIF)) + cmdline = 'egrep dn: %s | wc -l' % MYLDIF + p = os.popen(cmdline, "r") + dnnumstr = p.readline() + dnnum = int(dnnumstr) + topology.standalone.log.info("We have %d entries.\n", dnnum) + + topology.standalone.log.info("\n\n######################### Import Test data ######################\n") + + args = {TASK_WAIT: True} + importTask = Tasks(topology.standalone) + importTask.importLDIF(MYSUFFIX, MYSUFFIXBE, MYLDIF, args) + + topology.standalone.log.info("\n\n######################### SEARCH ALL ######################\n") + topology.standalone.log.info("Bind as %s and add the READ/SEARCH SELFDN aci" % DN_DM) + topology.standalone.simple_bind_s(DN_DM, PASSWORD) + + entries = topology.standalone.search_s(MYSUFFIX, ldap.SCOPE_SUBTREE, SEARCHFILTER) + topology.standalone.log.info("Returned %d entries.\n", len(entries)) + + #print entries + + assert dnnum == len(entries) + + topology.standalone.log.info('%d entries are successfully imported.' % dnnum) + + topology.standalone.log.info("\n\n######################### SEARCH WITH SIMPLE PAGED RESULTS CONTROL ######################\n") + + page_size = 4 + spr_req_ctrl = SimplePagedResultsControl(True, size=page_size, cookie='') + ger_req_ctrl = GetEffectiveRightsControl(True, "dn: " + DN_DM) + + known_ldap_resp_ctrls = { + SimplePagedResultsControl.controlType: SimplePagedResultsControl, + } + + topology.standalone.log.info("Calling search_ext...") + msgid = topology.standalone.search_ext(MYSUFFIX, + ldap.SCOPE_SUBTREE, + SEARCHFILTER, + ['cn'], + serverctrls=[spr_req_ctrl, ger_req_ctrl]) + attrlevelrightscnt = 0 + pageddncnt = 0 + pages = 0 + while True: + pages += 1 + + topology.standalone.log.info("Getting page %d" % pages) + rtype, rdata, rmsgid, responcectrls = topology.standalone.result3(msgid, resp_ctrl_classes=known_ldap_resp_ctrls) + topology.standalone.log.info("%d results" % len(rdata)) + pageddncnt += len(rdata) + + topology.standalone.log.info("Results:") + for dn, attrs in rdata: + topology.standalone.log.info("dn: %s" % dn) + topology.standalone.log.info("attributeLevelRights: %s" % attrs['attributeLevelRights'][0]) + if attrs['attributeLevelRights'][0] != "": + attrlevelrightscnt += 1 + + pctrls = [ + c for c in responcectrls if c.controlType == SimplePagedResultsControl.controlType + ] + if not pctrls: + topology.standalone.log.info('Warning: Server ignores RFC 2696 control.') + break + + if pctrls[0].cookie: + spr_req_ctrl.cookie = pctrls[0].cookie + topology.standalone.log.info("cookie: %s" % spr_req_ctrl.cookie) + msgid = topology.standalone.search_ext(MYSUFFIX, + ldap.SCOPE_SUBTREE, + SEARCHFILTER, + ['cn'], + serverctrls=[spr_req_ctrl, ger_req_ctrl]) + else: + topology.standalone.log.info("No cookie") + break + + topology.standalone.log.info("Paged result search returned %d entries in %d pages.\n", pageddncnt, pages) + + assert dnnum == len(entries) + assert dnnum == attrlevelrightscnt + assert pages == (dnnum / page_size) + topology.standalone.log.info("ticket47664 was successfully verified.") + + +def test_ticket47664_final(topology): + topology.standalone.delete() + log.info('Testcase PASSED') + + +def run_isolated(): + ''' + run_isolated is used to run these test cases independently of a test scheduler (xunit, py.test..) + To run isolated without py.test, you need to + - edit this file and comment '@pytest.fixture' line before 'topology' function. + - set the installation prefix + - run this program + ''' + global installation_prefix + installation_prefix = None + + topo = topology(True) + test_ticket47664_run(topo) + + test_ticket47664_final(topo) + + +if __name__ == '__main__': + run_isolated() + diff --git a/dirsrvtests/tests/tickets/ticket47669_test.py b/dirsrvtests/tests/tickets/ticket47669_test.py new file mode 100644 index 0000000..2ef1f3e --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket47669_test.py @@ -0,0 +1,265 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2015 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import os +import sys +import time +import ldap +import logging +import pytest +from lib389 import DirSrv, Entry, tools, tasks +from lib389.tools import DirSrvTools +from lib389._constants import * +from lib389.properties import * +from lib389.tasks import * +from ldap.controls import SimplePagedResultsControl +from ldap.controls.simple import GetEffectiveRightsControl + +log = logging.getLogger(__name__) + +installation_prefix = None + +CHANGELOG = 'cn=changelog5,cn=config' +RETROCHANGELOG = 'cn=Retro Changelog Plugin,cn=plugins,cn=config' + +MAXAGE = 'nsslapd-changelogmaxage' +TRIMINTERVAL = 'nsslapd-changelogtrim-interval' +COMPACTDBINTERVAL = 'nsslapd-changelogcompactdb-interval' + +FILTER = '(cn=*)' + + +class TopologyStandalone(object): + def __init__(self, standalone): + standalone.open() + self.standalone = standalone + + +@pytest.fixture(scope="module") +def topology(request): + ''' + This fixture is used to standalone topology for the 'module'. + ''' + global installation_prefix + + if installation_prefix: + args_instance[SER_DEPLOYED_DIR] = installation_prefix + + standalone = DirSrv(verbose=False) + + # Args for the standalone instance + args_instance[SER_HOST] = HOST_STANDALONE + args_instance[SER_PORT] = PORT_STANDALONE + args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE + args_standalone = args_instance.copy() + standalone.allocate(args_standalone) + + # Get the status of the instance and restart it if it exists + instance_standalone = standalone.exists() + + # Remove the instance + if instance_standalone: + standalone.delete() + + # Create the instance + standalone.create() + + # Used to retrieve configuration information (dbdir, confdir...) + standalone.open() + + # clear the tmp directory + standalone.clearTmpDir(__file__) + + # Here we have standalone instance up and running + return TopologyStandalone(standalone) + + +def test_ticket47669_init(topology): + """ + Add cn=changelog5,cn=config + Enable cn=Retro Changelog Plugin,cn=plugins,cn=config + """ + log.info('Testing Ticket 47669 - Test duration syntax in the changelogs') + + # bind as directory manager + topology.standalone.log.info("Bind as %s" % DN_DM) + topology.standalone.simple_bind_s(DN_DM, PASSWORD) + + try: + changelogdir = "%s/changelog" % topology.standalone.dbdir + topology.standalone.add_s(Entry((CHANGELOG, + {'objectclass': 'top extensibleObject'.split(), + 'nsslapd-changelogdir': changelogdir}))) + except ldap.LDAPError as e: + log.error('Failed to add ' + CHANGELOG + ': error ' + e.message['desc']) + assert False + + try: + topology.standalone.modify_s(RETROCHANGELOG, [(ldap.MOD_REPLACE, 'nsslapd-pluginEnabled', 'on')]) + except ldap.LDAPError as e: + log.error('Failed to enable ' + RETROCHANGELOG + ': error ' + e.message['desc']) + assert False + + # restart the server + topology.standalone.restart(timeout=10) + + +def add_and_check(topology, plugin, attr, val, isvalid): + """ + Helper function to add/replace attr: val and check the added value + """ + if isvalid: + log.info('Test %s: %s -- valid' % (attr, val)) + try: + topology.standalone.modify_s(plugin, [(ldap.MOD_REPLACE, attr, val)]) + except ldap.LDAPError as e: + log.error('Failed to add ' + attr + ': ' + val + ' to ' + plugin + ': error ' + e.message['desc']) + assert False + else: + log.info('Test %s: %s -- invalid' % (attr, val)) + if plugin == CHANGELOG: + try: + topology.standalone.modify_s(plugin, [(ldap.MOD_REPLACE, attr, val)]) + except ldap.LDAPError as e: + log.error('Expectedly failed to add ' + attr + ': ' + val + + ' to ' + plugin + ': error ' + e.message['desc']) + else: + try: + topology.standalone.modify_s(plugin, [(ldap.MOD_REPLACE, attr, val)]) + except ldap.LDAPError as e: + log.error('Failed to add ' + attr + ': ' + val + ' to ' + plugin + ': error ' + e.message['desc']) + + try: + entries = topology.standalone.search_s(plugin, ldap.SCOPE_BASE, FILTER, [attr]) + if isvalid: + if not entries[0].hasValue(attr, val): + log.fatal('%s does not have expected (%s: %s)' % (plugin, attr, val)) + assert False + else: + if plugin == CHANGELOG: + if entries[0].hasValue(attr, val): + log.fatal('%s has unexpected (%s: %s)' % (plugin, attr, val)) + assert False + else: + if not entries[0].hasValue(attr, val): + log.fatal('%s does not have expected (%s: %s)' % (plugin, attr, val)) + assert False + except ldap.LDAPError as e: + log.fatal('Unable to search for entry %s: error %s' % (plugin, e.message['desc'])) + assert False + + +def test_ticket47669_changelog_maxage(topology): + """ + Test nsslapd-changelogmaxage in cn=changelog5,cn=config + """ + log.info('1. Test nsslapd-changelogmaxage in cn=changelog5,cn=config') + + # bind as directory manager + topology.standalone.log.info("Bind as %s" % DN_DM) + topology.standalone.simple_bind_s(DN_DM, PASSWORD) + + add_and_check(topology, CHANGELOG, MAXAGE, '12345', True) + add_and_check(topology, CHANGELOG, MAXAGE, '10s', True) + add_and_check(topology, CHANGELOG, MAXAGE, '30M', True) + add_and_check(topology, CHANGELOG, MAXAGE, '12h', True) + add_and_check(topology, CHANGELOG, MAXAGE, '2D', True) + add_and_check(topology, CHANGELOG, MAXAGE, '4w', True) + add_and_check(topology, CHANGELOG, MAXAGE, '-123', False) + add_and_check(topology, CHANGELOG, MAXAGE, 'xyz', False) + + +def test_ticket47669_changelog_triminterval(topology): + """ + Test nsslapd-changelogtrim-interval in cn=changelog5,cn=config + """ + log.info('2. Test nsslapd-changelogtrim-interval in cn=changelog5,cn=config') + + # bind as directory manager + topology.standalone.log.info("Bind as %s" % DN_DM) + topology.standalone.simple_bind_s(DN_DM, PASSWORD) + + add_and_check(topology, CHANGELOG, TRIMINTERVAL, '12345', True) + add_and_check(topology, CHANGELOG, TRIMINTERVAL, '10s', True) + add_and_check(topology, CHANGELOG, TRIMINTERVAL, '30M', True) + add_and_check(topology, CHANGELOG, TRIMINTERVAL, '12h', True) + add_and_check(topology, CHANGELOG, TRIMINTERVAL, '2D', True) + add_and_check(topology, CHANGELOG, TRIMINTERVAL, '4w', True) + add_and_check(topology, CHANGELOG, TRIMINTERVAL, '-123', False) + add_and_check(topology, CHANGELOG, TRIMINTERVAL, 'xyz', False) + + +def test_ticket47669_changelog_compactdbinterval(topology): + """ + Test nsslapd-changelogcompactdb-interval in cn=changelog5,cn=config + """ + log.info('3. Test nsslapd-changelogcompactdb-interval in cn=changelog5,cn=config') + + # bind as directory manager + topology.standalone.log.info("Bind as %s" % DN_DM) + topology.standalone.simple_bind_s(DN_DM, PASSWORD) + + add_and_check(topology, CHANGELOG, COMPACTDBINTERVAL, '12345', True) + add_and_check(topology, CHANGELOG, COMPACTDBINTERVAL, '10s', True) + add_and_check(topology, CHANGELOG, COMPACTDBINTERVAL, '30M', True) + add_and_check(topology, CHANGELOG, COMPACTDBINTERVAL, '12h', True) + add_and_check(topology, CHANGELOG, COMPACTDBINTERVAL, '2D', True) + add_and_check(topology, CHANGELOG, COMPACTDBINTERVAL, '4w', True) + add_and_check(topology, CHANGELOG, COMPACTDBINTERVAL, '-123', False) + add_and_check(topology, CHANGELOG, COMPACTDBINTERVAL, 'xyz', False) + + +def test_ticket47669_retrochangelog_maxage(topology): + """ + Test nsslapd-changelogmaxage in cn=Retro Changelog Plugin,cn=plugins,cn=config + """ + log.info('4. Test nsslapd-changelogmaxage in cn=Retro Changelog Plugin,cn=plugins,cn=config') + + # bind as directory manager + topology.standalone.log.info("Bind as %s" % DN_DM) + topology.standalone.simple_bind_s(DN_DM, PASSWORD) + + add_and_check(topology, RETROCHANGELOG, MAXAGE, '12345', True) + add_and_check(topology, RETROCHANGELOG, MAXAGE, '10s', True) + add_and_check(topology, RETROCHANGELOG, MAXAGE, '30M', True) + add_and_check(topology, RETROCHANGELOG, MAXAGE, '12h', True) + add_and_check(topology, RETROCHANGELOG, MAXAGE, '2D', True) + add_and_check(topology, RETROCHANGELOG, MAXAGE, '4w', True) + add_and_check(topology, RETROCHANGELOG, MAXAGE, '-123', False) + add_and_check(topology, RETROCHANGELOG, MAXAGE, 'xyz', False) + + topology.standalone.log.info("ticket47669 was successfully verified.") + + +def test_ticket47669_final(topology): + topology.standalone.delete() + log.info('Testcase PASSED') + + +def run_isolated(): + """ + run_isolated is used to run these test cases independently of a test scheduler (xunit, py.test..) + To run isolated without py.test, you need to + - edit this file and comment '@pytest.fixture' line before 'topology' function. + - set the installation prefix + - run this program + """ + global installation_prefix + installation_prefix = None + + topo = topology(True) + test_ticket47669_init(topo) + test_ticket47669_changelog_maxage(topo) + test_ticket47669_changelog_triminterval(topo) + test_ticket47669_changelog_compactdbinterval(topo) + test_ticket47669_retrochangelog_maxage(topo) + test_ticket47669_final(topo) + +if __name__ == '__main__': + run_isolated() + diff --git a/dirsrvtests/tests/tickets/ticket47676_test.py b/dirsrvtests/tests/tickets/ticket47676_test.py new file mode 100644 index 0000000..22c2994 --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket47676_test.py @@ -0,0 +1,406 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2015 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +''' +Created on Nov 7, 2013 + +@author: tbordaz +''' +import os +import sys +import time +import ldap +import logging +import pytest +from lib389 import DirSrv, Entry, tools +from lib389.tools import DirSrvTools +from lib389._constants import * +from lib389.properties import * + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + +# +# important part. We can deploy Master1 and Master2 on different versions +# +installation1_prefix = None +installation2_prefix = None + +SCHEMA_DN = "cn=schema" +TEST_REPL_DN = "cn=test_repl, %s" % SUFFIX +OC_NAME = 'OCticket47676' +OC_OID_EXT = 2 +MUST = "(postalAddress $ postalCode)" +MAY = "(member $ street)" + +OC2_NAME = 'OC2ticket47676' +OC2_OID_EXT = 3 +MUST_2 = "(postalAddress $ postalCode)" +MAY_2 = "(member $ street)" + +REPL_SCHEMA_POLICY_CONSUMER = "cn=consumerUpdatePolicy,cn=replSchema,cn=config" +REPL_SCHEMA_POLICY_SUPPLIER = "cn=supplierUpdatePolicy,cn=replSchema,cn=config" + +OTHER_NAME = 'other_entry' +MAX_OTHERS = 10 + +BIND_NAME = 'bind_entry' +BIND_DN = 'cn=%s, %s' % (BIND_NAME, SUFFIX) +BIND_PW = 'password' + +ENTRY_NAME = 'test_entry' +ENTRY_DN = 'cn=%s, %s' % (ENTRY_NAME, SUFFIX) +ENTRY_OC = "top person %s" % OC_NAME + +BASE_OID = "1.2.3.4.5.6.7.8.9.10" + + +def _oc_definition(oid_ext, name, must=None, may=None): + oid = "%s.%d" % (BASE_OID, oid_ext) + desc = 'To test ticket 47490' + sup = 'person' + if not must: + must = MUST + if not may: + may = MAY + + new_oc = "( %s NAME '%s' DESC '%s' SUP %s AUXILIARY MUST %s MAY %s )" % (oid, name, desc, sup, must, may) + return new_oc + + +class TopologyMaster1Master2(object): + def __init__(self, master1, master2): + master1.open() + self.master1 = master1 + + master2.open() + self.master2 = master2 + + +@pytest.fixture(scope="module") +def topology(request): + ''' + This fixture is used to create a replicated topology for the 'module'. + The replicated topology is MASTER1 <-> Master2. + ''' + global installation1_prefix + global installation2_prefix + + # allocate master1 on a given deployement + master1 = DirSrv(verbose=False) + if installation1_prefix: + args_instance[SER_DEPLOYED_DIR] = installation1_prefix + + # Args for the master1 instance + args_instance[SER_HOST] = HOST_MASTER_1 + args_instance[SER_PORT] = PORT_MASTER_1 + args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1 + args_master = args_instance.copy() + master1.allocate(args_master) + + # allocate master1 on a given deployement + master2 = DirSrv(verbose=False) + if installation2_prefix: + args_instance[SER_DEPLOYED_DIR] = installation2_prefix + + # Args for the consumer instance + args_instance[SER_HOST] = HOST_MASTER_2 + args_instance[SER_PORT] = PORT_MASTER_2 + args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_2 + args_master = args_instance.copy() + master2.allocate(args_master) + + # Get the status of the instance and restart it if it exists + instance_master1 = master1.exists() + instance_master2 = master2.exists() + + # Remove all the instances + if instance_master1: + master1.delete() + if instance_master2: + master2.delete() + + # Create the instances + master1.create() + master1.open() + master2.create() + master2.open() + + # + # Now prepare the Master-Consumer topology + # + # First Enable replication + master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_1) + master2.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_2) + + # Initialize the supplier->consumer + + properties = {RA_NAME: r'meTo_$host:$port', + RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], + RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], + RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], + RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} + repl_agreement = master1.agreement.create(suffix=SUFFIX, host=master2.host, port=master2.port, properties=properties) + + if not repl_agreement: + log.fatal("Fail to create a replica agreement") + sys.exit(1) + + log.debug("%s created" % repl_agreement) + + properties = {RA_NAME: r'meTo_$host:$port', + RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], + RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], + RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], + RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} + master2.agreement.create(suffix=SUFFIX, host=master1.host, port=master1.port, properties=properties) + + master1.agreement.init(SUFFIX, HOST_MASTER_2, PORT_MASTER_2) + master1.waitForReplInit(repl_agreement) + + # Check replication is working fine + if master1.testReplication(DEFAULT_SUFFIX, master2): + log.info('Replication is working.') + else: + log.fatal('Replication is not working.') + assert False + + # clear the tmp directory + master1.clearTmpDir(__file__) + + # Here we have two instances master and consumer + # with replication working. + return TopologyMaster1Master2(master1, master2) + + +def test_ticket47676_init(topology): + """ + It adds + - Objectclass with MAY 'member' + - an entry ('bind_entry') with which we bind to test the 'SELFDN' operation + It deletes the anonymous aci + + """ + + topology.master1.log.info("Add %s that allows 'member' attribute" % OC_NAME) + new_oc = _oc_definition(OC_OID_EXT, OC_NAME, must = MUST, may = MAY) + topology.master1.schema.add_schema('objectClasses', new_oc) + + # entry used to bind with + topology.master1.log.info("Add %s" % BIND_DN) + topology.master1.add_s(Entry((BIND_DN, { + 'objectclass': "top person".split(), + 'sn': BIND_NAME, + 'cn': BIND_NAME, + 'userpassword': BIND_PW}))) + + # enable acl error logging + mod = [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', str(128 + 8192))] # ACL + REPL + topology.master1.modify_s(DN_CONFIG, mod) + topology.master2.modify_s(DN_CONFIG, mod) + + # add dummy entries + for cpt in range(MAX_OTHERS): + name = "%s%d" % (OTHER_NAME, cpt) + topology.master1.add_s(Entry(("cn=%s,%s" % (name, SUFFIX), { + 'objectclass': "top person".split(), + 'sn': name, + 'cn': name}))) + + +def test_ticket47676_skip_oc_at(topology): + ''' + This test ADD an entry on MASTER1 where 47676 is fixed. Then it checks that entry is replicated + on MASTER2 (even if on MASTER2 47676 is NOT fixed). Then update on MASTER2. + If the schema has successfully been pushed, updating Master2 should succeed + ''' + topology.master1.log.info("\n\n######################### ADD ######################\n") + + # bind as 'cn=Directory manager' + topology.master1.log.info("Bind as %s and add the add the entry with specific oc" % DN_DM) + topology.master1.simple_bind_s(DN_DM, PASSWORD) + + # Prepare the entry with multivalued members + entry = Entry(ENTRY_DN) + entry.setValues('objectclass', 'top', 'person', 'OCticket47676') + entry.setValues('sn', ENTRY_NAME) + entry.setValues('cn', ENTRY_NAME) + entry.setValues('postalAddress', 'here') + entry.setValues('postalCode', '1234') + members = [] + for cpt in range(MAX_OTHERS): + name = "%s%d" % (OTHER_NAME, cpt) + members.append("cn=%s,%s" % (name, SUFFIX)) + members.append(BIND_DN) + entry.setValues('member', members) + + topology.master1.log.info("Try to add Add %s should be successful" % ENTRY_DN) + topology.master1.add_s(entry) + + # + # Now check the entry as been replicated + # + topology.master2.simple_bind_s(DN_DM, PASSWORD) + topology.master1.log.info("Try to retrieve %s from Master2" % ENTRY_DN) + loop = 0 + while loop <= 10: + try: + ent = topology.master2.getEntry(ENTRY_DN, ldap.SCOPE_BASE, "(objectclass=*)") + break + except ldap.NO_SUCH_OBJECT: + time.sleep(2) + loop += 1 + assert loop <= 10 + + # Now update the entry on Master2 (as DM because 47676 is possibly not fixed on M2) + topology.master1.log.info("Update %s on M2" % ENTRY_DN) + mod = [(ldap.MOD_REPLACE, 'description', 'test_add')] + topology.master2.modify_s(ENTRY_DN, mod) + + topology.master1.simple_bind_s(DN_DM, PASSWORD) + loop = 0 + while loop <= 10: + ent = topology.master1.getEntry(ENTRY_DN, ldap.SCOPE_BASE, "(objectclass=*)") + if ent.hasAttr('description') and (ent.getValue('description') == 'test_add'): + break + time.sleep(1) + loop += 1 + + assert ent.getValue('description') == 'test_add' + + +def test_ticket47676_reject_action(topology): + + topology.master1.log.info("\n\n######################### REJECT ACTION ######################\n") + + topology.master1.simple_bind_s(DN_DM, PASSWORD) + topology.master2.simple_bind_s(DN_DM, PASSWORD) + + # make master1 to refuse to push the schema if OC_NAME is present in consumer schema + mod = [(ldap.MOD_ADD, 'schemaUpdateObjectclassReject', '%s' % (OC_NAME))] # ACL + REPL + topology.master1.modify_s(REPL_SCHEMA_POLICY_SUPPLIER, mod) + + # Restart is required to take into account that policy + topology.master1.stop(timeout=10) + topology.master1.start(timeout=10) + + # Add a new OC on M1 so that schema CSN will change and M1 will try to push the schema + topology.master1.log.info("Add %s on M1" % OC2_NAME) + new_oc = _oc_definition(OC2_OID_EXT, OC2_NAME, must=MUST, may=MAY) + topology.master1.schema.add_schema('objectClasses', new_oc) + + # Safety checking that the schema has been updated on M1 + topology.master1.log.info("Check %s is in M1" % OC2_NAME) + ent = topology.master1.getEntry(SCHEMA_DN, ldap.SCOPE_BASE, "(objectclass=*)", ["objectclasses"]) + assert ent.hasAttr('objectclasses') + found = False + for objectclass in ent.getValues('objectclasses'): + if str(objectclass).find(OC2_NAME) >= 0: + found = True + break + assert found + + # Do an update of M1 so that M1 will try to push the schema + topology.master1.log.info("Update %s on M1" % ENTRY_DN) + mod = [(ldap.MOD_REPLACE, 'description', 'test_reject')] + topology.master1.modify_s(ENTRY_DN, mod) + + # Check the replication occured and so also M1 attempted to push the schema + topology.master1.log.info("Check updated %s on M2" % ENTRY_DN) + loop = 0 + while loop <= 10: + ent = topology.master2.getEntry(ENTRY_DN, ldap.SCOPE_BASE, "(objectclass=*)", ['description']) + if ent.hasAttr('description') and ent.getValue('description') == 'test_reject': + # update was replicated + break + time.sleep(2) + loop += 1 + assert loop <= 10 + + # Check that the schema has not been pushed + topology.master1.log.info("Check %s is not in M2" % OC2_NAME) + ent = topology.master2.getEntry(SCHEMA_DN, ldap.SCOPE_BASE, "(objectclass=*)", ["objectclasses"]) + assert ent.hasAttr('objectclasses') + found = False + for objectclass in ent.getValues('objectclasses'): + if str(objectclass).find(OC2_NAME) >= 0: + found = True + break + assert not found + + topology.master1.log.info("\n\n######################### NO MORE REJECT ACTION ######################\n") + + # make master1 to do no specific action on OC_NAME + mod = [(ldap.MOD_DELETE, 'schemaUpdateObjectclassReject', '%s' % (OC_NAME))] # ACL + REPL + topology.master1.modify_s(REPL_SCHEMA_POLICY_SUPPLIER, mod) + + # Restart is required to take into account that policy + topology.master1.stop(timeout=10) + topology.master1.start(timeout=10) + + # Do an update of M1 so that M1 will try to push the schema + topology.master1.log.info("Update %s on M1" % ENTRY_DN) + mod = [(ldap.MOD_REPLACE, 'description', 'test_no_more_reject')] + topology.master1.modify_s(ENTRY_DN, mod) + + # Check the replication occured and so also M1 attempted to push the schema + topology.master1.log.info("Check updated %s on M2" % ENTRY_DN) + loop = 0 + while loop <= 10: + ent = topology.master2.getEntry(ENTRY_DN, ldap.SCOPE_BASE, "(objectclass=*)", ['description']) + if ent.hasAttr('description') and ent.getValue('description') == 'test_no_more_reject': + # update was replicated + break + time.sleep(2) + loop += 1 + assert loop <= 10 + + # Check that the schema has been pushed + topology.master1.log.info("Check %s is in M2" % OC2_NAME) + ent = topology.master2.getEntry(SCHEMA_DN, ldap.SCOPE_BASE, "(objectclass=*)", ["objectclasses"]) + assert ent.hasAttr('objectclasses') + found = False + for objectclass in ent.getValues('objectclasses'): + if str(objectclass).find(OC2_NAME) >= 0: + found = True + break + assert found + + +def test_ticket47676_final(topology): + topology.master1.delete() + topology.master2.delete() + log.info('Testcase PASSED') + + +def run_isolated(): + ''' + run_isolated is used to run these test cases independently of a test scheduler (xunit, py.test..) + To run isolated without py.test, you need to + - edit this file and comment '@pytest.fixture' line before 'topology' function. + - set the installation prefix + - run this program + ''' + global installation1_prefix + global installation2_prefix + installation1_prefix = None + installation2_prefix = None + + topo = topology(True) + topo.master1.log.info("\n\n######################### Ticket 47676 ######################\n") + test_ticket47676_init(topo) + + test_ticket47676_skip_oc_at(topo) + test_ticket47676_reject_action(topo) + + test_ticket47676_final(topo) + + +if __name__ == '__main__': + run_isolated() + diff --git a/dirsrvtests/tests/tickets/ticket47714_test.py b/dirsrvtests/tests/tickets/ticket47714_test.py new file mode 100644 index 0000000..268ddef --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket47714_test.py @@ -0,0 +1,263 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2015 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import os +import sys +import time +import ldap +import logging +import pytest +import shutil +from lib389 import DirSrv, Entry, tools +from lib389.tools import DirSrvTools +from lib389._constants import * +from lib389.properties import * + +log = logging.getLogger(__name__) + +installation_prefix = None + +ACCT_POLICY_CONFIG_DN = 'cn=config,cn=%s,cn=plugins,cn=config' % PLUGIN_ACCT_POLICY +ACCT_POLICY_DN = 'cn=Account Inactivation Pplicy,%s' % SUFFIX +INACTIVITY_LIMIT = '9' +SEARCHFILTER = '(objectclass=*)' + +TEST_USER = 'ticket47714user' +TEST_USER_DN = 'uid=%s,%s' % (TEST_USER, SUFFIX) +TEST_USER_PW = '%s' % TEST_USER + + +class TopologyStandalone(object): + def __init__(self, standalone): + standalone.open() + self.standalone = standalone + + +@pytest.fixture(scope="module") +def topology(request): + ''' + This fixture is used to standalone topology for the 'module'. + ''' + global installation_prefix + + if installation_prefix: + args_instance[SER_DEPLOYED_DIR] = installation_prefix + + standalone = DirSrv(verbose=False) + + # Args for the standalone instance + args_instance[SER_HOST] = HOST_STANDALONE + args_instance[SER_PORT] = PORT_STANDALONE + args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE + args_standalone = args_instance.copy() + standalone.allocate(args_standalone) + + # Get the status of the instance and restart it if it exists + instance_standalone = standalone.exists() + + # Remove the instance + if instance_standalone: + standalone.delete() + + # Create the instance + standalone.create() + + # Used to retrieve configuration information (dbdir, confdir...) + standalone.open() + + # clear the tmp directory + standalone.clearTmpDir(__file__) + + # Here we have standalone instance up and running + return TopologyStandalone(standalone) + + +def _header(topology, label): + topology.standalone.log.info("\n\n###############################################") + topology.standalone.log.info("#######") + topology.standalone.log.info("####### %s" % label) + topology.standalone.log.info("#######") + topology.standalone.log.info("###############################################") + + +def test_ticket47714_init(topology): + """ + 1. Add account policy entry to the DB + 2. Add a test user to the DB + """ + _header(topology, 'Testing Ticket 47714 - [RFE] Update lastLoginTime also in Account Policy plugin if account lockout is based on passwordExpirationTime.') + + topology.standalone.simple_bind_s(DN_DM, PASSWORD) + + log.info("\n######################### Adding Account Policy entry: %s ######################\n" % ACCT_POLICY_DN) + topology.standalone.add_s(Entry((ACCT_POLICY_DN, {'objectclass': "top ldapsubentry extensibleObject accountpolicy".split(), + 'accountInactivityLimit': INACTIVITY_LIMIT}))) + + log.info("\n######################### Adding Test User entry: %s ######################\n" % TEST_USER_DN) + topology.standalone.add_s(Entry((TEST_USER_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(), + 'cn': TEST_USER, + 'sn': TEST_USER, + 'givenname': TEST_USER, + 'userPassword': TEST_USER_PW, + 'acctPolicySubentry': ACCT_POLICY_DN}))) + + +def test_ticket47714_run_0(topology): + """ + Check this change has no inpact to the existing functionality. + 1. Set account policy config without the new attr alwaysRecordLoginAttr + 2. Bind as a test user + 3. Bind as the test user again and check the lastLoginTime is updated + 4. Waint longer than the accountInactivityLimit time and bind as the test user, + which should fail with CONSTANT_VIOLATION. + """ + _header(topology, 'Account Policy - No new attr alwaysRecordLoginAttr in config') + + topology.standalone.simple_bind_s(DN_DM, PASSWORD) + + # Modify Account Policy config entry + topology.standalone.modify_s(ACCT_POLICY_CONFIG_DN, [(ldap.MOD_REPLACE, 'alwaysrecordlogin', 'yes'), + (ldap.MOD_REPLACE, 'stateattrname', 'lastLoginTime'), + (ldap.MOD_REPLACE, 'altstateattrname', 'createTimestamp'), + (ldap.MOD_REPLACE, 'specattrname', 'acctPolicySubentry'), + (ldap.MOD_REPLACE, 'limitattrname', 'accountInactivityLimit')]) + + # Enable the plugins + topology.standalone.plugins.enable(name=PLUGIN_ACCT_POLICY) + + topology.standalone.restart(timeout=120) + + log.info("\n######################### Bind as %s ######################\n" % TEST_USER_DN) + try: + topology.standalone.simple_bind_s(TEST_USER_DN, TEST_USER_PW) + except ldap.CONSTRAINT_VIOLATION as e: + log.error('CONSTRAINT VIOLATION ' + e.message['desc']) + + topology.standalone.simple_bind_s(DN_DM, PASSWORD) + entry = topology.standalone.search_s(TEST_USER_DN, ldap.SCOPE_BASE, SEARCHFILTER, ['lastLoginTime']) + + lastLoginTime0 = entry[0].lastLoginTime + + time.sleep(2) + + log.info("\n######################### Bind as %s again ######################\n" % TEST_USER_DN) + try: + topology.standalone.simple_bind_s(TEST_USER_DN, TEST_USER_PW) + except ldap.CONSTRAINT_VIOLATION as e: + log.error('CONSTRAINT VIOLATION ' + e.message['desc']) + + topology.standalone.simple_bind_s(DN_DM, PASSWORD) + entry = topology.standalone.search_s(TEST_USER_DN, ldap.SCOPE_BASE, SEARCHFILTER, ['lastLoginTime']) + + lastLoginTime1 = entry[0].lastLoginTime + + log.info("First lastLoginTime: %s, Second lastLoginTime: %s" % (lastLoginTime0, lastLoginTime1)) + assert lastLoginTime0 < lastLoginTime1 + + topology.standalone.simple_bind_s(DN_DM, PASSWORD) + entry = topology.standalone.search_s(ACCT_POLICY_DN, ldap.SCOPE_BASE, SEARCHFILTER) + log.info("\n######################### %s ######################\n" % ACCT_POLICY_CONFIG_DN) + log.info("accountInactivityLimit: %s" % entry[0].accountInactivityLimit) + log.info("\n######################### %s DONE ######################\n" % ACCT_POLICY_CONFIG_DN) + + time.sleep(10) + + log.info("\n######################### Bind as %s again to fail ######################\n" % TEST_USER_DN) + try: + topology.standalone.simple_bind_s(TEST_USER_DN, TEST_USER_PW) + except ldap.CONSTRAINT_VIOLATION as e: + log.info('CONSTRAINT VIOLATION ' + e.message['desc']) + log.info("%s was successfully inactivated." % TEST_USER_DN) + pass + + +def test_ticket47714_run_1(topology): + """ + Verify a new config attr alwaysRecordLoginAttr + 1. Set account policy config with the new attr alwaysRecordLoginAttr: lastLoginTime + Note: bogus attr is set to stateattrname. + altstateattrname type value is used for checking whether the account is idle or not. + 2. Bind as a test user + 3. Bind as the test user again and check the alwaysRecordLoginAttr: lastLoginTime is updated + """ + _header(topology, 'Account Policy - With new attr alwaysRecordLoginAttr in config') + + topology.standalone.simple_bind_s(DN_DM, PASSWORD) + topology.standalone.modify_s(TEST_USER_DN, [(ldap.MOD_DELETE, 'lastLoginTime', None)]) + + # Modify Account Policy config entry + topology.standalone.modify_s(ACCT_POLICY_CONFIG_DN, [(ldap.MOD_REPLACE, 'alwaysrecordlogin', 'yes'), + (ldap.MOD_REPLACE, 'stateattrname', 'bogus'), + (ldap.MOD_REPLACE, 'altstateattrname', 'modifyTimestamp'), + (ldap.MOD_REPLACE, 'alwaysRecordLoginAttr', 'lastLoginTime'), + (ldap.MOD_REPLACE, 'specattrname', 'acctPolicySubentry'), + (ldap.MOD_REPLACE, 'limitattrname', 'accountInactivityLimit')]) + + # Enable the plugins + topology.standalone.plugins.enable(name=PLUGIN_ACCT_POLICY) + + topology.standalone.restart(timeout=120) + + log.info("\n######################### Bind as %s ######################\n" % TEST_USER_DN) + try: + topology.standalone.simple_bind_s(TEST_USER_DN, TEST_USER_PW) + except ldap.CONSTRAINT_VIOLATION as e: + log.error('CONSTRAINT VIOLATION ' + e.message['desc']) + + time.sleep(1) + + topology.standalone.simple_bind_s(DN_DM, PASSWORD) + entry = topology.standalone.search_s(TEST_USER_DN, ldap.SCOPE_BASE, SEARCHFILTER, ['lastLoginTime']) + lastLoginTime0 = entry[0].lastLoginTime + + log.info("\n######################### Bind as %s again ######################\n" % TEST_USER_DN) + try: + topology.standalone.simple_bind_s(TEST_USER_DN, TEST_USER_PW) + except ldap.CONSTRAINT_VIOLATION as e: + log.error('CONSTRAINT VIOLATION ' + e.message['desc']) + + time.sleep(1) + + topology.standalone.simple_bind_s(DN_DM, PASSWORD) + entry = topology.standalone.search_s(TEST_USER_DN, ldap.SCOPE_BASE, SEARCHFILTER, ['lastLoginTime']) + lastLoginTime1 = entry[0].lastLoginTime + + log.info("First lastLoginTime: %s, Second lastLoginTime: %s" % (lastLoginTime0, lastLoginTime1)) + assert lastLoginTime0 < lastLoginTime1 + + topology.standalone.log.info("ticket47714 was successfully verified.") + + +def test_ticket47714_final(topology): + topology.standalone.delete() + log.info('Testcase PASSED') + + +def run_isolated(): + ''' + run_isolated is used to run these test cases independently of a test scheduler (xunit, py.test..) + To run isolated without py.test, you need to + - edit this file and comment '@pytest.fixture' line before 'topology' function. + - set the installation prefix + - run this program + ''' + global installation_prefix + installation_prefix = None + + topo = topology(True) + test_ticket47714_init(topo) + + test_ticket47714_run_0(topo) + + test_ticket47714_run_1(topo) + + test_ticket47714_final(topo) + + +if __name__ == '__main__': + run_isolated() diff --git a/dirsrvtests/tests/tickets/ticket47721_test.py b/dirsrvtests/tests/tickets/ticket47721_test.py new file mode 100644 index 0000000..7841423 --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket47721_test.py @@ -0,0 +1,468 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2015 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +''' +Created on Nov 7, 2013 + +@author: tbordaz +''' +import os +import sys +import time +import ldap +import logging +import pytest +from lib389 import DirSrv, Entry, tools +from lib389.tools import DirSrvTools +from lib389._constants import * +from lib389.properties import * +from lib389._constants import REPLICAROLE_MASTER + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + +# +# important part. We can deploy Master1 and Master2 on different versions +# +installation1_prefix = None +installation2_prefix = None + +SCHEMA_DN = "cn=schema" +TEST_REPL_DN = "cn=test_repl, %s" % SUFFIX +OC_NAME = 'OCticket47721' +OC_OID_EXT = 2 +MUST = "(postalAddress $ postalCode)" +MAY = "(member $ street)" + +OC2_NAME = 'OC2ticket47721' +OC2_OID_EXT = 3 +MUST_2 = "(postalAddress $ postalCode)" +MAY_2 = "(member $ street)" + +REPL_SCHEMA_POLICY_CONSUMER = "cn=consumerUpdatePolicy,cn=replSchema,cn=config" +REPL_SCHEMA_POLICY_SUPPLIER = "cn=supplierUpdatePolicy,cn=replSchema,cn=config" + +OTHER_NAME = 'other_entry' +MAX_OTHERS = 10 + +BIND_NAME = 'bind_entry' +BIND_DN = 'cn=%s, %s' % (BIND_NAME, SUFFIX) +BIND_PW = 'password' + +ENTRY_NAME = 'test_entry' +ENTRY_DN = 'cn=%s, %s' % (ENTRY_NAME, SUFFIX) +ENTRY_OC = "top person %s" % OC_NAME + +BASE_OID = "1.2.3.4.5.6.7.8.9.10" + +SLEEP_INTERVAL = 60 + +def _add_custom_at_definition(name='ATticket47721'): + new_at = "( %s-oid NAME '%s' DESC 'test AT ticket 47721' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 X-ORIGIN ( 'Test 47721' 'user defined' ) )" % (name, name) + return new_at + + +def _chg_std_at_defintion(): + new_at = "( 2.16.840.1.113730.3.1.569 NAME 'cosPriority' DESC 'Netscape defined attribute type' SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 X-ORIGIN 'Netscape Directory Server' )" + return new_at + + +def _add_custom_oc_defintion(name='OCticket47721'): + new_oc = "( %s-oid NAME '%s' DESC 'An group of related automount objects' SUP top STRUCTURAL MUST ou X-ORIGIN 'draft-howard-rfc2307bis' )" % (name, name) + return new_oc + + +def _chg_std_oc_defintion(): + new_oc = "( 5.3.6.1.1.1.2.0 NAME 'trustAccount' DESC 'Sets trust accounts information' SUP top AUXILIARY MUST trustModel MAY ( accessTo $ ou ) X-ORIGIN 'nss_ldap/pam_ldap' )" + return new_oc + + +class TopologyMaster1Master2(object): + def __init__(self, master1, master2): + master1.open() + self.master1 = master1 + + master2.open() + self.master2 = master2 + + +@pytest.fixture(scope="module") +def topology(request): + ''' + This fixture is used to create a replicated topology for the 'module'. + The replicated topology is MASTER1 <-> Master2. + ''' + global installation1_prefix + global installation2_prefix + + # allocate master1 on a given deployement + master1 = DirSrv(verbose=False) + if installation1_prefix: + args_instance[SER_DEPLOYED_DIR] = installation1_prefix + + # Args for the master1 instance + args_instance[SER_HOST] = HOST_MASTER_1 + args_instance[SER_PORT] = PORT_MASTER_1 + args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1 + args_master = args_instance.copy() + master1.allocate(args_master) + + # allocate master1 on a given deployement + master2 = DirSrv(verbose=False) + if installation2_prefix: + args_instance[SER_DEPLOYED_DIR] = installation2_prefix + + # Args for the consumer instance + args_instance[SER_HOST] = HOST_MASTER_2 + args_instance[SER_PORT] = PORT_MASTER_2 + args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_2 + args_master = args_instance.copy() + master2.allocate(args_master) + + # Get the status of the instance and restart it if it exists + instance_master1 = master1.exists() + instance_master2 = master2.exists() + + # Remove all the instances + if instance_master1: + master1.delete() + if instance_master2: + master2.delete() + + # Create the instances + master1.create() + master1.open() + master2.create() + master2.open() + + # + # Now prepare the Master-Consumer topology + # + # First Enable replication + master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_1) + master2.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_2) + + # Initialize the supplier->consumer + + properties = {RA_NAME: r'meTo_$host:$port', + RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], + RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], + RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], + RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} + repl_agreement = master1.agreement.create(suffix=SUFFIX, host=master2.host, port=master2.port, properties=properties) + + if not repl_agreement: + log.fatal("Fail to create a replica agreement") + sys.exit(1) + + log.debug("%s created" % repl_agreement) + + properties = {RA_NAME: r'meTo_$host:$port', + RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], + RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], + RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], + RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} + master2.agreement.create(suffix=SUFFIX, host=master1.host, port=master1.port, properties=properties) + + master1.agreement.init(SUFFIX, HOST_MASTER_2, PORT_MASTER_2) + master1.waitForReplInit(repl_agreement) + + # Check replication is working fine + if master1.testReplication(DEFAULT_SUFFIX, master2): + log.info('Replication is working.') + else: + log.fatal('Replication is not working.') + assert False + + # clear the tmp directory + master1.clearTmpDir(__file__) + + # + # Here we have two instances master and consumer + # with replication working. Either coming from a backup recovery + # or from a fresh (re)init + # Time to return the topology + return TopologyMaster1Master2(master1, master2) + + +def test_ticket47721_init(topology): + """ + It adds + - Objectclass with MAY 'member' + - an entry ('bind_entry') with which we bind to test the 'SELFDN' operation + It deletes the anonymous aci + + """ + + # entry used to bind with + topology.master1.log.info("Add %s" % BIND_DN) + topology.master1.add_s(Entry((BIND_DN, { + 'objectclass': "top person".split(), + 'sn': BIND_NAME, + 'cn': BIND_NAME, + 'userpassword': BIND_PW}))) + + # enable repl error logging + mod = [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', str(8192))] # REPL logging + topology.master1.modify_s(DN_CONFIG, mod) + topology.master2.modify_s(DN_CONFIG, mod) + + # add dummy entries + for cpt in range(MAX_OTHERS): + name = "%s%d" % (OTHER_NAME, cpt) + topology.master1.add_s(Entry(("cn=%s,%s" % (name, SUFFIX), { + 'objectclass': "top person".split(), + 'sn': name, + 'cn': name}))) + + +def test_ticket47721_0(topology): + dn = "cn=%s0,%s" % (OTHER_NAME, SUFFIX) + loop = 0 + ent = None + while loop <= 10: + try: + ent = topology.master2.getEntry(dn, ldap.SCOPE_BASE, "(objectclass=*)") + break + except ldap.NO_SUCH_OBJECT: + time.sleep(1) + loop += 1 + if ent is None: + assert False + + +def test_ticket47721_1(topology): + log.info('Running test 1...') + #topology.master1.log.info("Attach debugger\n\n") + #time.sleep(30) + + new = _add_custom_at_definition() + topology.master1.log.info("Add (M2) %s " % new) + topology.master2.schema.add_schema('attributetypes', new) + + new = _chg_std_at_defintion() + topology.master1.log.info("Chg (M2) %s " % new) + topology.master2.schema.add_schema('attributetypes', new) + + new = _add_custom_oc_defintion() + topology.master1.log.info("Add (M2) %s " % new) + topology.master2.schema.add_schema('objectClasses', new) + + new = _chg_std_oc_defintion() + topology.master1.log.info("Chg (M2) %s " % new) + topology.master2.schema.add_schema('objectClasses', new) + + mod = [(ldap.MOD_REPLACE, 'description', 'Hello world 1')] + dn = "cn=%s0,%s" % (OTHER_NAME, SUFFIX) + topology.master2.modify_s(dn, mod) + + loop = 0 + while loop <= 10: + try: + ent = topology.master1.getEntry(dn, ldap.SCOPE_BASE, "(objectclass=*)") + if ent.hasAttr('description') and (ent.getValue('description') == 'Hello world 1'): + break + except ldap.NO_SUCH_OBJECT: + loop += 1 + time.sleep(1) + assert loop <= 10 + + time.sleep(2) + schema_csn_master1 = topology.master1.schema.get_schema_csn() + schema_csn_master2 = topology.master2.schema.get_schema_csn() + log.debug('Master 1 schemaCSN: %s' % schema_csn_master1) + log.debug('Master 2 schemaCSN: %s' % schema_csn_master2) + + +def test_ticket47721_2(topology): + log.info('Running test 2...') + + mod = [(ldap.MOD_REPLACE, 'description', 'Hello world 2')] + dn = "cn=%s0,%s" % (OTHER_NAME, SUFFIX) + topology.master1.modify_s(dn, mod) + + loop = 0 + while loop <= 10: + try: + ent = topology.master2.getEntry(dn, ldap.SCOPE_BASE, "(objectclass=*)") + if ent.hasAttr('description') and (ent.getValue('description') == 'Hello world 2'): + break + except ldap.NO_SUCH_OBJECT: + loop += 1 + time.sleep(1) + assert loop <= 10 + + time.sleep(2) + schema_csn_master1 = topology.master1.schema.get_schema_csn() + schema_csn_master2 = topology.master2.schema.get_schema_csn() + log.debug('Master 1 schemaCSN: %s' % schema_csn_master1) + log.debug('Master 2 schemaCSN: %s' % schema_csn_master2) + if schema_csn_master1 != schema_csn_master2: + # We need to give the server a little more time, then check it again + log.info('Schema CSNs are not in sync yet: m1 (%s) vs m2 (%s), wait a little...' + % (schema_csn_master1, schema_csn_master2)) + time.sleep(SLEEP_INTERVAL) + schema_csn_master1 = topology.master1.schema.get_schema_csn() + schema_csn_master2 = topology.master2.schema.get_schema_csn() + + assert schema_csn_master1 is not None + assert schema_csn_master1 == schema_csn_master2 + + +def test_ticket47721_3(topology): + ''' + Check that the supplier can update its schema from consumer schema + Update M2 schema, then trigger a replication M1->M2 + ''' + log.info('Running test 3...') + + # stop RA M2->M1, so that M1 can only learn being a supplier + ents = topology.master2.agreement.list(suffix=SUFFIX) + assert len(ents) == 1 + topology.master2.agreement.pause(ents[0].dn) + + new = _add_custom_at_definition('ATtest3') + topology.master1.log.info("Update schema (M2) %s " % new) + topology.master2.schema.add_schema('attributetypes', new) + + new = _add_custom_oc_defintion('OCtest3') + topology.master1.log.info("Update schema (M2) %s " % new) + topology.master2.schema.add_schema('objectClasses', new) + + mod = [(ldap.MOD_REPLACE, 'description', 'Hello world 3')] + dn = "cn=%s0,%s" % (OTHER_NAME, SUFFIX) + topology.master1.modify_s(dn, mod) + + loop = 0 + while loop <= 10: + try: + ent = topology.master2.getEntry(dn, ldap.SCOPE_BASE, "(objectclass=*)") + if ent.hasAttr('description') and (ent.getValue('description') == 'Hello world 3'): + break + except ldap.NO_SUCH_OBJECT: + loop += 1 + time.sleep(1) + assert loop <= 10 + + time.sleep(2) + schema_csn_master1 = topology.master1.schema.get_schema_csn() + schema_csn_master2 = topology.master2.schema.get_schema_csn() + log.debug('Master 1 schemaCSN: %s' % schema_csn_master1) + log.debug('Master 2 schemaCSN: %s' % schema_csn_master2) + if schema_csn_master1 == schema_csn_master2: + # We need to give the server a little more time, then check it again + log.info('Schema CSNs are not in sync yet: m1 (%s) vs m2 (%s), wait a little...' + % (schema_csn_master1, schema_csn_master2)) + time.sleep(SLEEP_INTERVAL) + schema_csn_master1 = topology.master1.schema.get_schema_csn() + schema_csn_master2 = topology.master2.schema.get_schema_csn() + + assert schema_csn_master1 is not None + # schema csn on M2 is larger that on M1. M1 only took the new definitions + assert schema_csn_master1 != schema_csn_master2 + + +def test_ticket47721_4(topology): + ''' + Here M2->M1 agreement is disabled. + with test_ticket47721_3, M1 schema and M2 should be identical BUT + the nsschemacsn is M2>M1. But as the RA M2->M1 is disabled, M1 keeps its schemacsn. + Update schema on M2 (nsschemaCSN update), update M2. Check they have the same schemacsn + ''' + log.info('Running test 4...') + + new = _add_custom_at_definition('ATtest4') + topology.master1.log.info("Update schema (M1) %s " % new) + topology.master1.schema.add_schema('attributetypes', new) + + new = _add_custom_oc_defintion('OCtest4') + topology.master1.log.info("Update schema (M1) %s " % new) + topology.master1.schema.add_schema('objectClasses', new) + + topology.master1.log.info("trigger replication M1->M2: to update the schema") + mod = [(ldap.MOD_REPLACE, 'description', 'Hello world 4')] + dn = "cn=%s0,%s" % (OTHER_NAME, SUFFIX) + topology.master1.modify_s(dn, mod) + + loop = 0 + while loop <= 10: + try: + ent = topology.master2.getEntry(dn, ldap.SCOPE_BASE, "(objectclass=*)") + if ent.hasAttr('description') and (ent.getValue('description') == 'Hello world 4'): + break + except ldap.NO_SUCH_OBJECT: + loop += 1 + time.sleep(1) + assert loop <= 10 + + topology.master1.log.info("trigger replication M1->M2: to push the schema") + mod = [(ldap.MOD_REPLACE, 'description', 'Hello world 5')] + dn = "cn=%s0,%s" % (OTHER_NAME, SUFFIX) + topology.master1.modify_s(dn, mod) + + loop = 0 + while loop <= 10: + try: + ent = topology.master2.getEntry(dn, ldap.SCOPE_BASE, "(objectclass=*)") + if ent.hasAttr('description') and (ent.getValue('description') == 'Hello world 5'): + break + except ldap.NO_SUCH_OBJECT: + loop += 1 + time.sleep(1) + assert loop <= 10 + + time.sleep(2) + schema_csn_master1 = topology.master1.schema.get_schema_csn() + schema_csn_master2 = topology.master2.schema.get_schema_csn() + log.debug('Master 1 schemaCSN: %s' % schema_csn_master1) + log.debug('Master 2 schemaCSN: %s' % schema_csn_master2) + if schema_csn_master1 != schema_csn_master2: + # We need to give the server a little more time, then check it again + log.info('Schema CSNs are incorrectly in sync, wait a little...') + time.sleep(SLEEP_INTERVAL) + schema_csn_master1 = topology.master1.schema.get_schema_csn() + schema_csn_master2 = topology.master2.schema.get_schema_csn() + + assert schema_csn_master1 is not None + assert schema_csn_master1 == schema_csn_master2 + + +def test_ticket47721_final(topology): + topology.master1.delete() + topology.master2.delete() + log.info('Testcase PASSED') + + +def run_isolated(): + ''' + run_isolated is used to run these test cases independently of a test scheduler (xunit, py.test..) + To run isolated without py.test, you need to + - edit this file and comment '@pytest.fixture' line before 'topology' function. + - set the installation prefix + - run this program + ''' + global installation1_prefix + global installation2_prefix + installation1_prefix = None + installation2_prefix = None + + topo = topology(True) + topo.master1.log.info("\n\n######################### Ticket 47721 ######################\n") + test_ticket47721_init(topo) + + test_ticket47721_0(topo) + test_ticket47721_1(topo) + test_ticket47721_2(topo) + test_ticket47721_3(topo) + test_ticket47721_4(topo) + + test_ticket47721_final(topo) + + +if __name__ == '__main__': + run_isolated() diff --git a/dirsrvtests/tests/tickets/ticket47781_test.py b/dirsrvtests/tests/tickets/ticket47781_test.py new file mode 100644 index 0000000..c52612e --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket47781_test.py @@ -0,0 +1,188 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2015 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import os +import sys +import time +import ldap +import logging +import pytest +from lib389 import DirSrv, Entry, tools, tasks +from lib389.tools import DirSrvTools +from lib389._constants import * +from lib389.properties import * +from lib389.tasks import * + +log = logging.getLogger(__name__) + +installation_prefix = None + + +class TopologyStandalone(object): + def __init__(self, standalone): + standalone.open() + self.standalone = standalone + + +@pytest.fixture(scope="module") +def topology(request): + ''' + This fixture is used to standalone topology for the 'module'. + ''' + global installation_prefix + + if installation_prefix: + args_instance[SER_DEPLOYED_DIR] = installation_prefix + + standalone = DirSrv(verbose=False) + + # Args for the standalone instance + args_instance[SER_HOST] = HOST_STANDALONE + args_instance[SER_PORT] = PORT_STANDALONE + args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE + args_standalone = args_instance.copy() + standalone.allocate(args_standalone) + + # Get the status of the instance and restart it if it exists + instance_standalone = standalone.exists() + + # Remove the instance + if instance_standalone: + standalone.delete() + + # Create the instance + standalone.create() + + # Used to retrieve configuration information (dbdir, confdir...) + standalone.open() + + # clear the tmp directory + standalone.clearTmpDir(__file__) + + # Here we have standalone instance up and running + return TopologyStandalone(standalone) + + +def test_ticket47781(topology): + """ + Testing for a deadlock after doing an online import of an LDIF with + replication data. The replication agreement should be invalid. + """ + + log.info('Testing Ticket 47781 - Testing for deadlock after importing LDIF with replication data') + + # + # Setup Replication + # + log.info('Setting up replication...') + topology.standalone.replica.enableReplication(suffix=DEFAULT_SUFFIX, role=REPLICAROLE_MASTER, + replicaId=REPLICAID_MASTER_1) + + properties = {RA_NAME: r'meTo_$host:$port', + RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], + RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], + RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], + RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} + # The agreement should point to a server that does NOT exist (invalid port) + repl_agreement = topology.standalone.agreement.create(suffix=DEFAULT_SUFFIX, + host=topology.standalone.host, + port=5555, + properties=properties) + + # + # add two entries + # + log.info('Adding two entries...') + try: + topology.standalone.add_s(Entry(('cn=entry1,dc=example,dc=com', { + 'objectclass': 'top person'.split(), + 'sn': 'user', + 'cn': 'entry1'}))) + except ldap.LDAPError as e: + log.error('Failed to add entry 1: ' + e.message['desc']) + assert False + + try: + topology.standalone.add_s(Entry(('cn=entry2,dc=example,dc=com', { + 'objectclass': 'top person'.split(), + 'sn': 'user', + 'cn': 'entry2'}))) + except ldap.LDAPError as e: + log.error('Failed to add entry 2: ' + e.message['desc']) + assert False + + # + # export the replication ldif + # + log.info('Exporting replication ldif...') + args = {EXPORT_REPL_INFO: True} + exportTask = Tasks(topology.standalone) + try: + exportTask.exportLDIF(DEFAULT_SUFFIX, None, "/tmp/export.ldif", args) + except ValueError: + assert False + + # + # Restart the server + # + log.info('Restarting server...') + topology.standalone.stop(timeout=5) + topology.standalone.start(timeout=5) + + # + # Import the ldif + # + log.info('Import replication LDIF file...') + importTask = Tasks(topology.standalone) + args = {TASK_WAIT: True} + try: + importTask.importLDIF(DEFAULT_SUFFIX, None, "/tmp/export.ldif", args) + os.remove("/tmp/export.ldif") + except ValueError: + os.remove("/tmp/export.ldif") + assert False + + # + # Search for tombstones - we should not hang/timeout + # + log.info('Search for tombstone entries(should find one and not hang)...') + topology.standalone.set_option(ldap.OPT_NETWORK_TIMEOUT, 5) + topology.standalone.set_option(ldap.OPT_TIMEOUT, 5) + try: + entries = topology.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, 'objectclass=nsTombstone') + if not entries: + log.fatal('Search failed to find any entries.') + assert PR_False + except ldap.LDAPError as e: + log.fatal('Search failed: ' + e.message['desc']) + assert PR_False + + +def test_ticket47781_final(topology): + topology.standalone.delete() + log.info('Testcase PASSED') + + +def run_isolated(): + ''' + run_isolated is used to run these test cases independently of a test scheduler (xunit, py.test..) + To run isolated without py.test, you need to + - edit this file and comment '@pytest.fixture' line before 'topology' function. + - set the installation prefix + - run this program + ''' + global installation_prefix + installation_prefix = None + + topo = topology(True) + test_ticket47781(topo) + test_ticket47781_final(topo) + + +if __name__ == '__main__': + run_isolated() diff --git a/dirsrvtests/tests/tickets/ticket47787_test.py b/dirsrvtests/tests/tickets/ticket47787_test.py new file mode 100644 index 0000000..305ec75 --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket47787_test.py @@ -0,0 +1,561 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2015 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +''' +Created on April 14, 2014 + +@author: tbordaz +''' +import os +import sys +import time +import ldap +import logging +import pytest +import re +from lib389 import DirSrv, Entry, tools, NoSuchEntryError +from lib389.tools import DirSrvTools +from lib389._constants import * +from lib389.properties import * +from lib389._constants import REPLICAROLE_MASTER + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + +# +# important part. We can deploy Master1 and Master2 on different versions +# +installation1_prefix = None +installation2_prefix = None + +# set this flag to False so that it will assert on failure _status_entry_both_server +DEBUG_FLAG = False + +TEST_REPL_DN = "cn=test_repl, %s" % SUFFIX + +STAGING_CN = "staged user" +PRODUCTION_CN = "accounts" +EXCEPT_CN = "excepts" + +STAGING_DN = "cn=%s,%s" % (STAGING_CN, SUFFIX) +PRODUCTION_DN = "cn=%s,%s" % (PRODUCTION_CN, SUFFIX) +PROD_EXCEPT_DN = "cn=%s,%s" % (EXCEPT_CN, PRODUCTION_DN) + +STAGING_PATTERN = "cn=%s*,%s" % (STAGING_CN[:2], SUFFIX) +PRODUCTION_PATTERN = "cn=%s*,%s" % (PRODUCTION_CN[:2], SUFFIX) +BAD_STAGING_PATTERN = "cn=bad*,%s" % (SUFFIX) +BAD_PRODUCTION_PATTERN = "cn=bad*,%s" % (SUFFIX) + +BIND_CN = "bind_entry" +BIND_DN = "cn=%s,%s" % (BIND_CN, SUFFIX) +BIND_PW = "password" + +NEW_ACCOUNT = "new_account" +MAX_ACCOUNTS = 20 + +CONFIG_MODDN_ACI_ATTR = "nsslapd-moddn-aci" + + +class TopologyMaster1Master2(object): + def __init__(self, master1, master2): + master1.open() + self.master1 = master1 + + master2.open() + self.master2 = master2 + + +@pytest.fixture(scope="module") +def topology(request): + ''' + This fixture is used to create a replicated topology for the 'module'. + The replicated topology is MASTER1 <-> Master2. + ''' + global installation1_prefix + global installation2_prefix + + # allocate master1 on a given deployement + master1 = DirSrv(verbose=False) + if installation1_prefix: + args_instance[SER_DEPLOYED_DIR] = installation1_prefix + + # Args for the master1 instance + args_instance[SER_HOST] = HOST_MASTER_1 + args_instance[SER_PORT] = PORT_MASTER_1 + args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1 + args_master = args_instance.copy() + master1.allocate(args_master) + + # allocate master1 on a given deployement + master2 = DirSrv(verbose=False) + if installation2_prefix: + args_instance[SER_DEPLOYED_DIR] = installation2_prefix + + # Args for the consumer instance + args_instance[SER_HOST] = HOST_MASTER_2 + args_instance[SER_PORT] = PORT_MASTER_2 + args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_2 + args_master = args_instance.copy() + master2.allocate(args_master) + + # Get the status of the instance and restart it if it exists + instance_master1 = master1.exists() + instance_master2 = master2.exists() + + # Remove all the instances + if instance_master1: + master1.delete() + if instance_master2: + master2.delete() + + # Create the instances + master1.create() + master1.open() + master2.create() + master2.open() + + # + # Now prepare the Master-Consumer topology + # + # First Enable replication + master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_1) + master2.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_2) + + # Initialize the supplier->consumer + + properties = {RA_NAME: r'meTo_$host:$port', + RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], + RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], + RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], + RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} + repl_agreement = master1.agreement.create(suffix=SUFFIX, host=master2.host, port=master2.port, properties=properties) + + if not repl_agreement: + log.fatal("Fail to create a replica agreement") + sys.exit(1) + + log.debug("%s created" % repl_agreement) + + properties = {RA_NAME: r'meTo_$host:$port', + RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], + RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], + RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], + RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} + master2.agreement.create(suffix=SUFFIX, host=master1.host, port=master1.port, properties=properties) + + master1.agreement.init(SUFFIX, HOST_MASTER_2, PORT_MASTER_2) + master1.waitForReplInit(repl_agreement) + + # Check replication is working fine + if master1.testReplication(DEFAULT_SUFFIX, master2): + log.info('Replication is working.') + else: + log.fatal('Replication is not working.') + assert False + + # clear the tmp directory + master1.clearTmpDir(__file__) + + # Here we have two instances master and consumer + # with replication working. + return TopologyMaster1Master2(master1, master2) + + +def _bind_manager(server): + server.log.info("Bind as %s " % DN_DM) + server.simple_bind_s(DN_DM, PASSWORD) + + +def _bind_normal(server): + server.log.info("Bind as %s " % BIND_DN) + server.simple_bind_s(BIND_DN, BIND_PW) + + +def _header(topology, label): + topology.master1.log.info("\n\n###############################################") + topology.master1.log.info("#######") + topology.master1.log.info("####### %s" % label) + topology.master1.log.info("#######") + topology.master1.log.info("###############################################") + + +def _status_entry_both_server(topology, name=None, desc=None, debug=True): + if not name: + return + topology.master1.log.info("\n\n######################### Tombstone on M1 ######################\n") + attr = 'description' + found = False + attempt = 0 + while not found and attempt < 10: + ent_m1 = _find_tombstone(topology.master1, SUFFIX, 'sn', name) + if attr in ent_m1.getAttrs(): + found = True + else: + time.sleep(1) + attempt = attempt + 1 + assert ent_m1 + + topology.master1.log.info("\n\n######################### Tombstone on M2 ######################\n") + ent_m2 = _find_tombstone(topology.master2, SUFFIX, 'sn', name) + assert ent_m2 + + topology.master1.log.info("\n\n######################### Description ######################\n%s\n" % desc) + topology.master1.log.info("M1 only\n") + for attr in ent_m1.getAttrs(): + + if not debug: + assert attr in ent_m2.getAttrs() + + if not attr in ent_m2.getAttrs(): + topology.master1.log.info(" %s" % attr) + for val in ent_m1.getValues(attr): + topology.master1.log.info(" %s" % val) + + topology.master1.log.info("M2 only\n") + for attr in ent_m2.getAttrs(): + + if not debug: + assert attr in ent_m1.getAttrs() + + if not attr in ent_m1.getAttrs(): + topology.master1.log.info(" %s" % attr) + for val in ent_m2.getValues(attr): + topology.master1.log.info(" %s" % val) + + topology.master1.log.info("M1 differs M2\n") + + if not debug: + assert ent_m1.dn == ent_m2.dn + + if ent_m1.dn != ent_m2.dn: + topology.master1.log.info(" M1[dn] = %s\n M2[dn] = %s" % (ent_m1.dn, ent_m2.dn)) + + for attr1 in ent_m1.getAttrs(): + if attr1 in ent_m2.getAttrs(): + for val1 in ent_m1.getValues(attr1): + found = False + for val2 in ent_m2.getValues(attr1): + if val1 == val2: + found = True + break + + if not debug: + assert found + + if not found: + topology.master1.log.info(" M1[%s] = %s" % (attr1, val1)) + + for attr2 in ent_m2.getAttrs(): + if attr2 in ent_m1.getAttrs(): + for val2 in ent_m2.getValues(attr2): + found = False + for val1 in ent_m1.getValues(attr2): + if val2 == val1: + found = True + break + + if not debug: + assert found + + if not found: + topology.master1.log.info(" M2[%s] = %s" % (attr2, val2)) + + +def _pause_RAs(topology): + topology.master1.log.info("\n\n######################### Pause RA M1<->M2 ######################\n") + ents = topology.master1.agreement.list(suffix=SUFFIX) + assert len(ents) == 1 + topology.master1.agreement.pause(ents[0].dn) + + ents = topology.master2.agreement.list(suffix=SUFFIX) + assert len(ents) == 1 + topology.master2.agreement.pause(ents[0].dn) + + +def _resume_RAs(topology): + topology.master1.log.info("\n\n######################### resume RA M1<->M2 ######################\n") + ents = topology.master1.agreement.list(suffix=SUFFIX) + assert len(ents) == 1 + topology.master1.agreement.resume(ents[0].dn) + + ents = topology.master2.agreement.list(suffix=SUFFIX) + assert len(ents) == 1 + topology.master2.agreement.resume(ents[0].dn) + + +def _find_tombstone(instance, base, attr, value): + # + # we can not use a filter with a (&(objeclass=nsTombstone)(sn=name)) because + # tombstone are not index in 'sn' so 'sn=name' will return NULL + # and even if tombstone are indexed for objectclass the '&' will set + # the candidate list to NULL + # + filt = '(objectclass=%s)' % REPLICA_OC_TOMBSTONE + ents = instance.search_s(base, ldap.SCOPE_SUBTREE, filt) + #found = False + for ent in ents: + if ent.hasAttr(attr): + for val in ent.getValues(attr): + if val == value: + instance.log.debug("tombstone found: %r" % ent) + return ent + return None + + +def _delete_entry(instance, entry_dn, name): + instance.log.info("\n\n######################### DELETE %s (M1) ######################\n" % name) + + # delete the entry + instance.delete_s(entry_dn) + assert _find_tombstone(instance, SUFFIX, 'sn', name) is not None + + +def _mod_entry(instance, entry_dn, attr, value): + instance.log.info("\n\n######################### MOD %s (M2) ######################\n" % entry_dn) + mod = [(ldap.MOD_REPLACE, attr, value)] + instance.modify_s(entry_dn, mod) + + +def _modrdn_entry(instance=None, entry_dn=None, new_rdn=None, del_old=0, new_superior=None): + assert instance is not None + assert entry_dn is not None + + if not new_rdn: + pattern = 'cn=(.*),(.*)' + rdnre = re.compile(pattern) + match = rdnre.match(entry_dn) + old_value = match.group(1) + new_rdn_val = "%s_modrdn" % old_value + new_rdn = "cn=%s" % new_rdn_val + + instance.log.info("\n\n######################### MODRDN %s (M2) ######################\n" % new_rdn) + if new_superior: + instance.rename_s(entry_dn, new_rdn, newsuperior=new_superior, delold=del_old) + else: + instance.rename_s(entry_dn, new_rdn, delold=del_old) + + +def _check_entry_exists(instance, entry_dn): + loop = 0 + ent = None + while loop <= 10: + try: + ent = instance.getEntry(entry_dn, ldap.SCOPE_BASE, "(objectclass=*)") + break + except ldap.NO_SUCH_OBJECT: + time.sleep(1) + loop += 1 + if ent is None: + assert False + + +def _check_mod_received(instance, base, filt, attr, value): + instance.log.info("\n\n######################### Check MOD replicated on %s ######################\n" % instance.serverid) + loop = 0 + while loop <= 10: + ent = instance.getEntry(base, ldap.SCOPE_SUBTREE, filt) + if ent.hasAttr(attr) and ent.getValue(attr) == value: + break + time.sleep(1) + loop += 1 + assert loop <= 10 + + +def _check_replication(topology, entry_dn): + # prepare the filter to retrieve the entry + filt = entry_dn.split(',')[0] + + topology.master1.log.info("\n######################### Check replicat M1->M2 ######################\n") + loop = 0 + while loop <= 10: + attr = 'description' + value = 'test_value_%d' % loop + mod = [(ldap.MOD_REPLACE, attr, value)] + topology.master1.modify_s(entry_dn, mod) + _check_mod_received(topology.master2, SUFFIX, filt, attr, value) + loop += 1 + + topology.master1.log.info("\n######################### Check replicat M2->M1 ######################\n") + loop = 0 + while loop <= 10: + attr = 'description' + value = 'test_value_%d' % loop + mod = [(ldap.MOD_REPLACE, attr, value)] + topology.master2.modify_s(entry_dn, mod) + _check_mod_received(topology.master1, SUFFIX, filt, attr, value) + loop += 1 + + +def test_ticket47787_init(topology): + """ + Creates + - a staging DIT + - a production DIT + - add accounts in staging DIT + + """ + + topology.master1.log.info("\n\n######################### INITIALIZATION ######################\n") + + # entry used to bind with + topology.master1.log.info("Add %s" % BIND_DN) + topology.master1.add_s(Entry((BIND_DN, { + 'objectclass': "top person".split(), + 'sn': BIND_CN, + 'cn': BIND_CN, + 'userpassword': BIND_PW}))) + + # DIT for staging + topology.master1.log.info("Add %s" % STAGING_DN) + topology.master1.add_s(Entry((STAGING_DN, { + 'objectclass': "top organizationalRole".split(), + 'cn': STAGING_CN, + 'description': "staging DIT"}))) + + # DIT for production + topology.master1.log.info("Add %s" % PRODUCTION_DN) + topology.master1.add_s(Entry((PRODUCTION_DN, { + 'objectclass': "top organizationalRole".split(), + 'cn': PRODUCTION_CN, + 'description': "production DIT"}))) + + # enable replication error logging + mod = [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', '8192')] + topology.master1.modify_s(DN_CONFIG, mod) + topology.master2.modify_s(DN_CONFIG, mod) + + # add dummy entries in the staging DIT + for cpt in range(MAX_ACCOUNTS): + name = "%s%d" % (NEW_ACCOUNT, cpt) + topology.master1.add_s(Entry(("cn=%s,%s" % (name, STAGING_DN), { + 'objectclass': "top person".split(), + 'sn': name, + 'cn': name}))) + + +def test_ticket47787_2(topology): + ''' + Disable replication so that updates are not replicated + Delete an entry on M1. Modrdn it on M2 (chg rdn + delold=0 + same superior). + update a test entry on M2 + Reenable the RA. + checks that entry was deleted on M2 (with the modified RDN) + checks that test entry was replicated on M1 (replication M2->M1 not broken by modrdn) + ''' + + _header(topology, "test_ticket47787_2") + _bind_manager(topology.master1) + _bind_manager(topology.master2) + + #entry to test the replication is still working + name = "%s%d" % (NEW_ACCOUNT, MAX_ACCOUNTS - 1) + test_rdn = "cn=%s" % (name) + testentry_dn = "%s,%s" % (test_rdn, STAGING_DN) + + name = "%s%d" % (NEW_ACCOUNT, MAX_ACCOUNTS - 2) + test2_rdn = "cn=%s" % (name) + testentry2_dn = "%s,%s" % (test2_rdn, STAGING_DN) + + # value of updates to test the replication both ways + attr = 'description' + value = 'test_ticket47787_2' + + # entry for the modrdn + name = "%s%d" % (NEW_ACCOUNT, 1) + rdn = "cn=%s" % (name) + entry_dn = "%s,%s" % (rdn, STAGING_DN) + + # created on M1, wait the entry exists on M2 + _check_entry_exists(topology.master2, entry_dn) + _check_entry_exists(topology.master2, testentry_dn) + + _pause_RAs(topology) + + # Delete 'entry_dn' on M1. + # dummy update is only have a first CSN before the DEL + # else the DEL will be in min_csn RUV and make diagnostic a bit more complex + _mod_entry(topology.master1, testentry2_dn, attr, 'dummy') + _delete_entry(topology.master1, entry_dn, name) + _mod_entry(topology.master1, testentry2_dn, attr, value) + + time.sleep(1) # important to have MOD.csn != DEL.csn + + # MOD 'entry_dn' on M1. + # dummy update is only have a first CSN before the MOD entry_dn + # else the DEL will be in min_csn RUV and make diagnostic a bit more complex + _mod_entry(topology.master2, testentry_dn, attr, 'dummy') + _mod_entry(topology.master2, entry_dn, attr, value) + _mod_entry(topology.master2, testentry_dn, attr, value) + + _resume_RAs(topology) + + topology.master1.log.info("\n\n######################### Check DEL replicated on M2 ######################\n") + loop = 0 + while loop <= 10: + ent = _find_tombstone(topology.master2, SUFFIX, 'sn', name) + if ent: + break + time.sleep(1) + loop += 1 + assert loop <= 10 + assert ent + + # the following checks are not necessary + # as this bug is only for failing replicated MOD (entry_dn) on M1 + #_check_mod_received(topology.master1, SUFFIX, "(%s)" % (test_rdn), attr, value) + #_check_mod_received(topology.master2, SUFFIX, "(%s)" % (test2_rdn), attr, value) + # + #_check_replication(topology, testentry_dn) + + _status_entry_both_server(topology, name=name, desc="DEL M1 - MOD M2", debug=DEBUG_FLAG) + + topology.master1.log.info("\n\n######################### Check MOD replicated on M1 ######################\n") + loop = 0 + while loop <= 10: + ent = _find_tombstone(topology.master1, SUFFIX, 'sn', name) + if ent: + break + time.sleep(1) + loop += 1 + assert loop <= 10 + assert ent + assert ent.hasAttr(attr) + assert ent.getValue(attr) == value + + +def test_ticket47787_final(topology): + topology.master1.delete() + topology.master2.delete() + log.info('Testcase PASSED') + + +def run_isolated(): + ''' + run_isolated is used to run these test cases independently of a test scheduler (xunit, py.test..) + To run isolated without py.test, you need to + - edit this file and comment '@pytest.fixture' line before 'topology' function. + - set the installation prefix + - run this program + ''' + global installation1_prefix + global installation2_prefix + installation1_prefix = None + installation2_prefix = None + + topo = topology(True) + topo.master1.log.info("\n\n######################### Ticket 47787 ######################\n") + test_ticket47787_init(topo) + + test_ticket47787_2(topo) + + test_ticket47787_final(topo) + + +if __name__ == '__main__': + run_isolated() + diff --git a/dirsrvtests/tests/tickets/ticket47808_test.py b/dirsrvtests/tests/tickets/ticket47808_test.py new file mode 100644 index 0000000..4254c8c --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket47808_test.py @@ -0,0 +1,166 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2015 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import os +import sys +import time +import ldap +import logging +import pytest +from lib389 import DirSrv, Entry, tools +from lib389.tools import DirSrvTools +from lib389._constants import * +from lib389.properties import * + +log = logging.getLogger(__name__) + +installation_prefix = None + +ATTRIBUTE_UNIQUENESS_PLUGIN = 'cn=attribute uniqueness,cn=plugins,cn=config' +ENTRY_NAME = 'test_entry' + + +class TopologyStandalone(object): + def __init__(self, standalone): + standalone.open() + self.standalone = standalone + + +@pytest.fixture(scope="module") +def topology(request): + ''' + This fixture is used to standalone topology for the 'module'. + ''' + global installation_prefix + + if installation_prefix: + args_instance[SER_DEPLOYED_DIR] = installation_prefix + + standalone = DirSrv(verbose=True) + + # Args for the standalone instance + args_instance[SER_HOST] = HOST_STANDALONE + args_instance[SER_PORT] = PORT_STANDALONE + args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE + args_standalone = args_instance.copy() + standalone.allocate(args_standalone) + + # Get the status of the instance and restart it if it exists + instance_standalone = standalone.exists() + + # Remove the instance + if instance_standalone: + standalone.delete() + + # Create the instance + standalone.create() + + # Used to retrieve configuration information (dbdir, confdir...) + standalone.open() + + # clear the tmp directory + standalone.clearTmpDir(__file__) + + # Here we have standalone instance up and running + return TopologyStandalone(standalone) + + +def test_ticket47808_run(topology): + """ + It enables attribute uniqueness plugin with sn as a unique attribute + Add an entry 1 with sn = ENTRY_NAME + Add an entry 2 with sn = ENTRY_NAME + If the second add does not crash the server and the following search found none, + the bug is fixed. + """ + + # bind as directory manager + topology.standalone.log.info("Bind as %s" % DN_DM) + topology.standalone.simple_bind_s(DN_DM, PASSWORD) + + topology.standalone.log.info("\n\n######################### SETUP ATTR UNIQ PLUGIN ######################\n") + + # enable attribute uniqueness plugin + mod = [(ldap.MOD_REPLACE, 'nsslapd-pluginEnabled', 'on'), (ldap.MOD_REPLACE, 'nsslapd-pluginarg0', 'sn'), (ldap.MOD_REPLACE, 'nsslapd-pluginarg1', SUFFIX)] + topology.standalone.modify_s(ATTRIBUTE_UNIQUENESS_PLUGIN, mod) + + topology.standalone.log.info("\n\n######################### ADD USER 1 ######################\n") + + # Prepare entry 1 + entry_name = '%s 1' % (ENTRY_NAME) + entry_dn_1 = 'cn=%s, %s' % (entry_name, SUFFIX) + entry_1 = Entry(entry_dn_1) + entry_1.setValues('objectclass', 'top', 'person') + entry_1.setValues('sn', ENTRY_NAME) + entry_1.setValues('cn', entry_name) + topology.standalone.log.info("Try to add Add %s: %r" % (entry_1, entry_1)) + topology.standalone.add_s(entry_1) + + topology.standalone.log.info("\n\n######################### Restart Server ######################\n") + topology.standalone.stop(timeout=10) + topology.standalone.start(timeout=10) + + topology.standalone.log.info("\n\n######################### ADD USER 2 ######################\n") + + # Prepare entry 2 having the same sn, which crashes the server + entry_name = '%s 2' % (ENTRY_NAME) + entry_dn_2 = 'cn=%s, %s' % (entry_name, SUFFIX) + entry_2 = Entry(entry_dn_2) + entry_2.setValues('objectclass', 'top', 'person') + entry_2.setValues('sn', ENTRY_NAME) + entry_2.setValues('cn', entry_name) + topology.standalone.log.info("Try to add Add %s: %r" % (entry_2, entry_2)) + try: + topology.standalone.add_s(entry_2) + except: + topology.standalone.log.warn("Adding %s failed" % entry_dn_2) + pass + + topology.standalone.log.info("\n\n######################### IS SERVER UP? ######################\n") + ents = topology.standalone.search_s(entry_dn_1, ldap.SCOPE_BASE, '(objectclass=*)') + assert len(ents) == 1 + topology.standalone.log.info("Yes, it's up.") + + topology.standalone.log.info("\n\n######################### CHECK USER 2 NOT ADDED ######################\n") + topology.standalone.log.info("Try to search %s" % entry_dn_2) + try: + ents = topology.standalone.search_s(entry_dn_2, ldap.SCOPE_BASE, '(objectclass=*)') + except ldap.NO_SUCH_OBJECT: + topology.standalone.log.info("Found none") + + topology.standalone.log.info("\n\n######################### DELETE USER 1 ######################\n") + + topology.standalone.log.info("Try to delete %s " % entry_dn_1) + topology.standalone.delete_s(entry_dn_1) + + +def test_ticket47808_final(topology): + topology.standalone.delete() + log.info('Testcase PASSED') + + +def run_isolated(): + ''' + run_isolated is used to run these test cases independently of a test scheduler (xunit, py.test..) + To run isolated without py.test, you need to + - edit this file and comment '@pytest.fixture' line before 'topology' function. + - set the installation prefix + - run this program + ''' + global installation_prefix + installation_prefix = None + + topo = topology(True) + test_ticket47808_run(topo) + + test_ticket47808_final(topo) + + +if __name__ == '__main__': + run_isolated() + diff --git a/dirsrvtests/tests/tickets/ticket47815_test.py b/dirsrvtests/tests/tickets/ticket47815_test.py new file mode 100644 index 0000000..675e97b --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket47815_test.py @@ -0,0 +1,179 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2015 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import os +import sys +import time +import ldap +import logging +import pytest +from lib389 import DirSrv, Entry, tools +from lib389.tools import DirSrvTools +from lib389._constants import * +from lib389.properties import * + +log = logging.getLogger(__name__) + +installation_prefix = None + + +class TopologyStandalone(object): + def __init__(self, standalone): + standalone.open() + self.standalone = standalone + + +@pytest.fixture(scope="module") +def topology(request): + ''' + This fixture is used to standalone topology for the 'module'. + ''' + global installation_prefix + + if installation_prefix: + args_instance[SER_DEPLOYED_DIR] = installation_prefix + + standalone = DirSrv(verbose=False) + + # Args for the standalone instance + args_instance[SER_HOST] = HOST_STANDALONE + args_instance[SER_PORT] = PORT_STANDALONE + args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE + args_standalone = args_instance.copy() + standalone.allocate(args_standalone) + + # Get the status of the instance and restart it if it exists + instance_standalone = standalone.exists() + + # Remove the instance + if instance_standalone: + standalone.delete() + + # Create the instance + standalone.create() + + # Used to retrieve configuration information (dbdir, confdir...) + standalone.open() + + # clear the tmp directory + standalone.clearTmpDir(__file__) + + # Here we have standalone instance up and running + return TopologyStandalone(standalone) + + +def test_ticket47815(topology): + """ + Test betxn plugins reject an invalid option, and make sure that the rejected entry + is not in the entry cache. + + Enable memberOf, automember, and retrocl plugins + Add the automember config entry + Add the automember group + Add a user that will be rejected by a betxn plugin - result error 53 + Attempt the same add again, and it should result in another error 53 (not error 68) + """ + result = 0 + result2 = 0 + + log.info('Testing Ticket 47815 - Add entries that should be rejected by the betxn plugins, and are not left in the entry cache') + + # Enabled the plugins + topology.standalone.plugins.enable(name=PLUGIN_MEMBER_OF) + topology.standalone.plugins.enable(name=PLUGIN_AUTOMEMBER) + topology.standalone.plugins.enable(name=PLUGIN_RETRO_CHANGELOG) + + # configure automember config entry + log.info('Adding automember config') + try: + topology.standalone.add_s(Entry(('cn=group cfg,cn=Auto Membership Plugin,cn=plugins,cn=config', { + 'objectclass': 'top autoMemberDefinition'.split(), + 'autoMemberScope': 'dc=example,dc=com', + 'autoMemberFilter': 'cn=user', + 'autoMemberDefaultGroup': 'cn=group,dc=example,dc=com', + 'autoMemberGroupingAttr': 'member:dn', + 'cn': 'group cfg'}))) + except: + log.error('Failed to add automember config') + exit(1) + + topology.standalone.stop(timeout=120) + time.sleep(1) + topology.standalone.start(timeout=120) + time.sleep(3) + + # need to reopen a connection toward the instance + topology.standalone.open() + + # add automember group + log.info('Adding automember group') + try: + topology.standalone.add_s(Entry(('cn=group,dc=example,dc=com', { + 'objectclass': 'top groupOfNames'.split(), + 'cn': 'group'}))) + except: + log.error('Failed to add automember group') + exit(1) + + # add user that should result in an error 53 + log.info('Adding invalid entry') + + try: + topology.standalone.add_s(Entry(('cn=user,dc=example,dc=com', { + 'objectclass': 'top person'.split(), + 'sn': 'user', + 'cn': 'user'}))) + except ldap.UNWILLING_TO_PERFORM: + log.debug('Adding invalid entry failed as expected') + result = 53 + except ldap.LDAPError as e: + log.error('Unexpected result ' + e.message['desc']) + assert False + if result == 0: + log.error('Add operation unexpectedly succeeded') + assert False + + # Attempt to add user again, should result in error 53 again + try: + topology.standalone.add_s(Entry(('cn=user,dc=example,dc=com', { + 'objectclass': 'top person'.split(), + 'sn': 'user', + 'cn': 'user'}))) + except ldap.UNWILLING_TO_PERFORM: + log.debug('2nd add of invalid entry failed as expected') + result2 = 53 + except ldap.LDAPError as e: + log.error('Unexpected result ' + e.message['desc']) + assert False + if result2 == 0: + log.error('2nd Add operation unexpectedly succeeded') + assert False + + +def test_ticket47815_final(topology): + topology.standalone.delete() + log.info('Testcase PASSED') + + +def run_isolated(): + ''' + run_isolated is used to run these test cases independently of a test scheduler (xunit, py.test..) + To run isolated without py.test, you need to + - edit this file and comment '@pytest.fixture' line before 'topology' function. + - set the installation prefix + - run this program + ''' + global installation_prefix + installation_prefix = None + + topo = topology(True) + test_ticket47815(topo) + test_ticket47815_final(topo) + +if __name__ == '__main__': + run_isolated() diff --git a/dirsrvtests/tests/tickets/ticket47819_test.py b/dirsrvtests/tests/tickets/ticket47819_test.py new file mode 100644 index 0000000..435b36c --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket47819_test.py @@ -0,0 +1,296 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2015 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import os +import sys +import time +import ldap +import logging +import pytest +from lib389 import DirSrv, Entry, tools, tasks +from lib389.tools import DirSrvTools +from lib389._constants import * +from lib389.properties import * +from lib389.tasks import * + +log = logging.getLogger(__name__) + +installation_prefix = None + + +class TopologyStandalone(object): + def __init__(self, standalone): + standalone.open() + self.standalone = standalone + + +@pytest.fixture(scope="module") +def topology(request): + ''' + This fixture is used to standalone topology for the 'module'. + ''' + global installation_prefix + + if installation_prefix: + args_instance[SER_DEPLOYED_DIR] = installation_prefix + + standalone = DirSrv(verbose=False) + + # Args for the standalone instance + args_instance[SER_HOST] = HOST_STANDALONE + args_instance[SER_PORT] = PORT_STANDALONE + args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE + args_standalone = args_instance.copy() + standalone.allocate(args_standalone) + + # Get the status of the instance and restart it if it exists + instance_standalone = standalone.exists() + + # Remove the instance + if instance_standalone: + standalone.delete() + + # Create the instance + standalone.create() + + # Used to retrieve configuration information (dbdir, confdir...) + standalone.open() + + # clear the tmp directory + standalone.clearTmpDir(__file__) + + # Here we have standalone instance up and running + return TopologyStandalone(standalone) + + +def test_ticket47819(topology): + """ + Testing precise tombstone purging: + [1] Make sure "nsTombstoneCSN" is added to new tombstones + [2] Make sure an import of a replication ldif adds "nsTombstoneCSN" + to old tombstones + [4] Test fixup task + [3] Make sure tombstone purging works + """ + + log.info('Testing Ticket 47819 - Test precise tombstone purging') + + # + # Setup Replication + # + log.info('Setting up replication...') + topology.standalone.replica.enableReplication(suffix=DEFAULT_SUFFIX, role=REPLICAROLE_MASTER, + replicaId=REPLICAID_MASTER_1) + + # + # Part 1 create a tombstone entry and make sure nsTombstoneCSN is added + # + log.info('Part 1: Add and then delete an entry to create a tombstone...') + + try: + topology.standalone.add_s(Entry(('cn=entry1,dc=example,dc=com', { + 'objectclass': 'top person'.split(), + 'sn': 'user', + 'cn': 'entry1'}))) + except ldap.LDAPError as e: + log.error('Failed to add entry: ' + e.message['desc']) + assert False + + try: + topology.standalone.delete_s('cn=entry1,dc=example,dc=com') + except ldap.LDAPError as e: + log.error('Failed to delete entry: ' + e.message['desc']) + assert False + + log.info('Search for tombstone entries...') + try: + entries = topology.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, + '(&(nsTombstoneCSN=*)(objectclass=nsTombstone))') + if not entries: + log.fatal('Search failed to the new tombstone(nsTombstoneCSN is probably missing).') + assert False + except ldap.LDAPError as e: + log.fatal('Search failed: ' + e.message['desc']) + assert False + + log.info('Part 1 - passed') + + # + # Part 2 - import ldif with tombstones missing 'nsTombstoneCSN' + # + # First, export the replication ldif, edit the file(remove nstombstonecsn), + # and reimport it. + # + log.info('Part 2: Exporting replication ldif...') + + # Get the the full path and name for our LDIF we will be exporting + ldif_file = topology.standalone.getDir(__file__, TMP_DIR) + "export.ldif" + + args = {EXPORT_REPL_INFO: True, + TASK_WAIT: True} + exportTask = Tasks(topology.standalone) + try: + exportTask.exportLDIF(DEFAULT_SUFFIX, None, ldif_file, args) + except ValueError: + assert False + + # open the ldif file, get the lines, then rewrite the file + ldif = open(ldif_file, "r") + lines = ldif.readlines() + ldif.close() + + ldif = open(ldif_file, "w") + for line in lines: + if not line.lower().startswith('nstombstonecsn'): + ldif.write(line) + ldif.close() + + # import the new ldif file + log.info('Import replication LDIF file...') + importTask = Tasks(topology.standalone) + args = {TASK_WAIT: True} + try: + importTask.importLDIF(DEFAULT_SUFFIX, None, ldif_file, args) + os.remove(ldif_file) + except ValueError: + os.remove(ldif_file) + assert False + + # Search for the tombstone again + log.info('Search for tombstone entries...') + try: + entries = topology.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, + '(&(nsTombstoneCSN=*)(objectclass=nsTombstone))') + if not entries: + log.fatal('Search failed to fine the new tombstone(nsTombstoneCSN is probably missing).') + assert False + except ldap.LDAPError as e: + log.fatal('Search failed: ' + e.message['desc']) + assert False + + log.info('Part 2 - passed') + + # + # Part 3 - test fixup task + # + log.info('Part 4: test the fixup task') + + # Run fixup task using the strip option. This removes nsTombstoneCSN + # so we can test if the fixup task works. + args = {TASK_WAIT: True, + TASK_TOMB_STRIP: True} + fixupTombTask = Tasks(topology.standalone) + try: + fixupTombTask.fixupTombstones(DEFAULT_BENAME, args) + except: + assert False + + # Search for tombstones with nsTombstoneCSN - better not find any + log.info('Search for tombstone entries...') + try: + entries = topology.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, + '(&(nsTombstoneCSN=*)(objectclass=nsTombstone))') + if entries: + log.fatal('Search found tombstones with nsTombstoneCSN') + assert False + except ldap.LDAPError as e: + log.fatal('Search failed: ' + e.message['desc']) + assert False + + # Now run the fixup task + args = {TASK_WAIT: True} + fixupTombTask = Tasks(topology.standalone) + try: + fixupTombTask.fixupTombstones(DEFAULT_BENAME, args) + except: + assert False + + # Search for tombstones with nsTombstoneCSN - better find some + log.info('Search for tombstone entries...') + try: + entries = topology.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, + '(&(nsTombstoneCSN=*)(objectclass=nsTombstone))') + if not entries: + log.fatal('Search did not find any fixed-up tombstones') + assert False + except ldap.LDAPError as e: + log.fatal('Search failed: ' + e.message['desc']) + assert False + + log.info('Part 3 - passed') + + # + # Part 4 - Test tombstone purging + # + log.info('Part 4: test tombstone purging...') + + args = {REPLICA_PRECISE_PURGING: 'on', + REPLICA_PURGE_DELAY: '5', + REPLICA_PURGE_INTERVAL: '5'} + try: + topology.standalone.replica.setProperties(DEFAULT_SUFFIX, None, None, args) + except: + log.fatal('Failed to configure replica') + assert False + + # Wait for the interval to pass + log.info('Wait for tombstone purge interval to pass...') + time.sleep(6) + + # Add an entry to trigger replication + log.info('Perform an update to help trigger tombstone purging...') + try: + topology.standalone.add_s(Entry(('cn=test_entry,dc=example,dc=com', { + 'objectclass': 'top person'.split(), + 'sn': 'user', + 'cn': 'entry1'}))) + except ldap.LDAPError as e: + log.error('Failed to add entry: ' + e.message['desc']) + assert False + + # Wait for the interval to pass again + log.info('Wait for tombstone purge interval to pass again...') + time.sleep(10) + + # search for tombstones, there should be none + log.info('Search for tombstone entries...') + try: + entries = topology.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, + '(&(nsTombstoneCSN=*)(objectclass=nsTombstone))') + if entries: + log.fatal('Search unexpectedly found tombstones') + assert False + except ldap.LDAPError as e: + log.fatal('Search failed: ' + e.message['desc']) + assert False + + log.info('Part 4 - passed') + + +def test_ticket47819_final(topology): + topology.standalone.delete() + log.info('Testcase PASSED') + + +def run_isolated(): + ''' + run_isolated is used to run these test cases independently of a test scheduler (xunit, py.test..) + To run isolated without py.test, you need to + - edit this file and comment '@pytest.fixture' line before 'topology' function. + - set the installation prefix + - run this program + ''' + global installation_prefix + installation_prefix = None + + topo = topology(True) + test_ticket47819(topo) + test_ticket47819_final(topo) + +if __name__ == '__main__': + run_isolated() \ No newline at end of file diff --git a/dirsrvtests/tests/tickets/ticket47823_test.py b/dirsrvtests/tests/tickets/ticket47823_test.py new file mode 100644 index 0000000..635827c --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket47823_test.py @@ -0,0 +1,1021 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2015 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import os +import sys +import time +import ldap +import logging +import pytest +import re +import shutil +from lib389 import DirSrv, Entry, tools +from lib389.tools import DirSrvTools +from lib389._constants import * +from lib389.properties import * + + +log = logging.getLogger(__name__) + +installation_prefix = None + +PROVISIONING_CN = "provisioning" +PROVISIONING_DN = "cn=%s,%s" % (PROVISIONING_CN, SUFFIX) + +ACTIVE_CN = "accounts" +STAGE_CN = "staged users" +DELETE_CN = "deleted users" +ACTIVE_DN = "cn=%s,%s" % (ACTIVE_CN, SUFFIX) +STAGE_DN = "cn=%s,%s" % (STAGE_CN, PROVISIONING_DN) +DELETE_DN = "cn=%s,%s" % (DELETE_CN, PROVISIONING_DN) + +STAGE_USER_CN = "stage guy" +STAGE_USER_DN = "cn=%s,%s" % (STAGE_USER_CN, STAGE_DN) + +ACTIVE_USER_CN = "active guy" +ACTIVE_USER_DN = "cn=%s,%s" % (ACTIVE_USER_CN, ACTIVE_DN) + +ACTIVE_USER_1_CN = "test_1" +ACTIVE_USER_1_DN = "cn=%s,%s" % (ACTIVE_USER_1_CN, ACTIVE_DN) +ACTIVE_USER_2_CN = "test_2" +ACTIVE_USER_2_DN = "cn=%s,%s" % (ACTIVE_USER_2_CN, ACTIVE_DN) + +STAGE_USER_1_CN = ACTIVE_USER_1_CN +STAGE_USER_1_DN = "cn=%s,%s" % (STAGE_USER_1_CN, STAGE_DN) +STAGE_USER_2_CN = ACTIVE_USER_2_CN +STAGE_USER_2_DN = "cn=%s,%s" % (STAGE_USER_2_CN, STAGE_DN) + +ALL_CONFIG_ATTRS = ['nsslapd-pluginarg0', 'nsslapd-pluginarg1', 'nsslapd-pluginarg2', + 'uniqueness-attribute-name', 'uniqueness-subtrees', 'uniqueness-across-all-subtrees'] + + +class TopologyStandalone(object): + def __init__(self, standalone): + standalone.open() + self.standalone = standalone + + +@pytest.fixture(scope="module") +def topology(request): + ''' + This fixture is used to standalone topology for the 'module'. + ''' + global installation_prefix + + + + standalone = DirSrv(verbose=False) + if installation_prefix: + args_instance[SER_DEPLOYED_DIR] = installation_prefix + # Args for the standalone instance + args_instance[SER_HOST] = HOST_STANDALONE + args_instance[SER_PORT] = PORT_STANDALONE + args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE + args_standalone = args_instance.copy() + standalone.allocate(args_standalone) + + # Get the status of the instance and restart it if it exists + instance_standalone = standalone.exists() + + # Remove the instance + if instance_standalone: + standalone.delete() + + # Create the instance + standalone.create() + + # Used to retrieve configuration information (dbdir, confdir...) + standalone.open() + + # clear the tmp directory + standalone.clearTmpDir(__file__) + + # Here we have standalone instance up and running + return TopologyStandalone(standalone) + + +def _header(topology, label): + topology.standalone.log.info("\n\n###############################################") + topology.standalone.log.info("#######") + topology.standalone.log.info("####### %s" % label) + topology.standalone.log.info("#######") + topology.standalone.log.info("###############################################") + + +def _uniqueness_config_entry(topology, name=None): + if not name: + return None + + ent = topology.standalone.getEntry("cn=%s,%s" % (PLUGIN_ATTR_UNIQUENESS, DN_PLUGIN), ldap.SCOPE_BASE, + "(objectclass=nsSlapdPlugin)", + ['objectClass', 'cn', 'nsslapd-pluginPath', 'nsslapd-pluginInitfunc', + 'nsslapd-pluginType', 'nsslapd-pluginEnabled', 'nsslapd-plugin-depends-on-type', + 'nsslapd-pluginId', 'nsslapd-pluginVersion', 'nsslapd-pluginVendor', + 'nsslapd-pluginDescription']) + ent.dn = "cn=%s uniqueness,%s" % (name, DN_PLUGIN) + return ent + + +def _build_config(topology, attr_name='cn', subtree_1=None, subtree_2=None, type_config='old', across_subtrees=False): + assert topology + assert attr_name + assert subtree_1 + + if type_config == 'old': + # enable the 'cn' uniqueness on Active + config = _uniqueness_config_entry(topology, attr_name) + config.setValue('nsslapd-pluginarg0', attr_name) + config.setValue('nsslapd-pluginarg1', subtree_1) + if subtree_2: + config.setValue('nsslapd-pluginarg2', subtree_2) + else: + # prepare the config entry + config = _uniqueness_config_entry(topology, attr_name) + config.setValue('uniqueness-attribute-name', attr_name) + config.setValue('uniqueness-subtrees', subtree_1) + if subtree_2: + config.setValue('uniqueness-subtrees', subtree_2) + if across_subtrees: + config.setValue('uniqueness-across-all-subtrees', 'on') + return config + + +def _active_container_invalid_cfg_add(topology): + ''' + Check uniqueness is not enforced with ADD (invalid config) + ''' + topology.standalone.add_s(Entry((ACTIVE_USER_1_DN, { + 'objectclass': "top person".split(), + 'sn': ACTIVE_USER_1_CN, + 'cn': ACTIVE_USER_1_CN}))) + + topology.standalone.add_s(Entry((ACTIVE_USER_2_DN, { + 'objectclass': "top person".split(), + 'sn': ACTIVE_USER_2_CN, + 'cn': [ACTIVE_USER_1_CN, ACTIVE_USER_2_CN]}))) + + topology.standalone.delete_s(ACTIVE_USER_1_DN) + topology.standalone.delete_s(ACTIVE_USER_2_DN) + + +def _active_container_add(topology, type_config='old'): + ''' + Check uniqueness in a single container (Active) + Add an entry with a given 'cn', then check we can not add an entry with the same 'cn' value + + ''' + config = _build_config(topology, attr_name='cn', subtree_1=ACTIVE_DN, subtree_2=None, type_config=type_config, across_subtrees=False) + + # remove the 'cn' uniqueness entry + try: + topology.standalone.delete_s(config.dn) + + except ldap.NO_SUCH_OBJECT: + pass + topology.standalone.restart(timeout=120) + + topology.standalone.log.info('Uniqueness not enforced: create the entries') + + topology.standalone.add_s(Entry((ACTIVE_USER_1_DN, { + 'objectclass': "top person".split(), + 'sn': ACTIVE_USER_1_CN, + 'cn': ACTIVE_USER_1_CN}))) + + topology.standalone.add_s(Entry((ACTIVE_USER_2_DN, { + 'objectclass': "top person".split(), + 'sn': ACTIVE_USER_2_CN, + 'cn': [ACTIVE_USER_1_CN, ACTIVE_USER_2_CN]}))) + + topology.standalone.delete_s(ACTIVE_USER_1_DN) + topology.standalone.delete_s(ACTIVE_USER_2_DN) + + + topology.standalone.log.info('Uniqueness enforced: checks second entry is rejected') + + # enable the 'cn' uniqueness on Active + topology.standalone.add_s(config) + topology.standalone.restart(timeout=120) + topology.standalone.add_s(Entry((ACTIVE_USER_1_DN, { + 'objectclass': "top person".split(), + 'sn': ACTIVE_USER_1_CN, + 'cn': ACTIVE_USER_1_CN}))) + + try: + topology.standalone.add_s(Entry((ACTIVE_USER_2_DN, { + 'objectclass': "top person".split(), + 'sn': ACTIVE_USER_2_CN, + 'cn': [ACTIVE_USER_1_CN, ACTIVE_USER_2_CN]}))) + except ldap.CONSTRAINT_VIOLATION: + # yes it is expected + pass + + # cleanup the stuff now + topology.standalone.delete_s(config.dn) + topology.standalone.delete_s(ACTIVE_USER_1_DN) + + +def _active_container_mod(topology, type_config='old'): + ''' + Check uniqueness in a single container (active) + Add and entry with a given 'cn', then check we can not modify an entry with the same 'cn' value + + ''' + + config = _build_config(topology, attr_name='cn', subtree_1=ACTIVE_DN, subtree_2=None, type_config=type_config, across_subtrees=False) + + # enable the 'cn' uniqueness on Active + topology.standalone.add_s(config) + topology.standalone.restart(timeout=120) + + topology.standalone.log.info('Uniqueness enforced: checks MOD ADD entry is rejected') + topology.standalone.add_s(Entry((ACTIVE_USER_1_DN, { + 'objectclass': "top person".split(), + 'sn': ACTIVE_USER_1_CN, + 'cn': ACTIVE_USER_1_CN}))) + + topology.standalone.add_s(Entry((ACTIVE_USER_2_DN, { + 'objectclass': "top person".split(), + 'sn': ACTIVE_USER_2_CN, + 'cn': ACTIVE_USER_2_CN}))) + + try: + topology.standalone.modify_s(ACTIVE_USER_2_DN, [(ldap.MOD_ADD, 'cn', ACTIVE_USER_1_CN)]) + except ldap.CONSTRAINT_VIOLATION: + # yes it is expected + pass + + topology.standalone.log.info('Uniqueness enforced: checks MOD REPLACE entry is rejected') + try: + topology.standalone.modify_s(ACTIVE_USER_2_DN, [(ldap.MOD_REPLACE, 'cn', [ACTIVE_USER_1_CN, ACTIVE_USER_2_CN])]) + except ldap.CONSTRAINT_VIOLATION: + # yes it is expected + pass + + # cleanup the stuff now + topology.standalone.delete_s(config.dn) + topology.standalone.delete_s(ACTIVE_USER_1_DN) + topology.standalone.delete_s(ACTIVE_USER_2_DN) + + +def _active_container_modrdn(topology, type_config='old'): + ''' + Check uniqueness in a single container + Add and entry with a given 'cn', then check we can not modrdn an entry with the same 'cn' value + + ''' + config = _build_config(topology, attr_name='cn', subtree_1=ACTIVE_DN, subtree_2=None, type_config=type_config, across_subtrees=False) + + # enable the 'cn' uniqueness on Active + topology.standalone.add_s(config) + topology.standalone.restart(timeout=120) + + topology.standalone.log.info('Uniqueness enforced: checks MODRDN entry is rejected') + + topology.standalone.add_s(Entry((ACTIVE_USER_1_DN, { + 'objectclass': "top person".split(), + 'sn': ACTIVE_USER_1_CN, + 'cn': [ACTIVE_USER_1_CN, 'dummy']}))) + + topology.standalone.add_s(Entry((ACTIVE_USER_2_DN, { + 'objectclass': "top person".split(), + 'sn': ACTIVE_USER_2_CN, + 'cn': ACTIVE_USER_2_CN}))) + + try: + topology.standalone.rename_s(ACTIVE_USER_2_DN, 'cn=dummy', delold=0) + except ldap.CONSTRAINT_VIOLATION: + # yes it is expected + pass + + # cleanup the stuff now + topology.standalone.delete_s(config.dn) + topology.standalone.delete_s(ACTIVE_USER_1_DN) + topology.standalone.delete_s(ACTIVE_USER_2_DN) + + +def _active_stage_containers_add(topology, type_config='old', across_subtrees=False): + ''' + Check uniqueness in several containers + Add an entry on a container with a given 'cn' + with across_subtrees=False check we CAN add an entry with the same 'cn' value on the other container + with across_subtrees=True check we CAN NOT add an entry with the same 'cn' value on the other container + + ''' + config = _build_config(topology, attr_name='cn', subtree_1=ACTIVE_DN, subtree_2=STAGE_DN, type_config=type_config, across_subtrees=False) + + topology.standalone.add_s(config) + topology.standalone.restart(timeout=120) + topology.standalone.add_s(Entry((ACTIVE_USER_1_DN, { + 'objectclass': "top person".split(), + 'sn': ACTIVE_USER_1_CN, + 'cn': ACTIVE_USER_1_CN}))) + try: + + # adding an entry on a separated contains with the same 'cn' + topology.standalone.add_s(Entry((STAGE_USER_1_DN, { + 'objectclass': "top person".split(), + 'sn': STAGE_USER_1_CN, + 'cn': ACTIVE_USER_1_CN}))) + except ldap.CONSTRAINT_VIOLATION: + assert across_subtrees + + # cleanup the stuff now + topology.standalone.delete_s(config.dn) + topology.standalone.delete_s(ACTIVE_USER_1_DN) + topology.standalone.delete_s(STAGE_USER_1_DN) + + +def _active_stage_containers_mod(topology, type_config='old', across_subtrees=False): + ''' + Check uniqueness in a several containers + Add an entry on a container with a given 'cn', then check we CAN mod an entry with the same 'cn' value on the other container + + ''' + config = _build_config(topology, attr_name='cn', subtree_1=ACTIVE_DN, subtree_2=STAGE_DN, type_config=type_config, across_subtrees=False) + + topology.standalone.add_s(config) + topology.standalone.restart(timeout=120) + # adding an entry on active with a different 'cn' + topology.standalone.add_s(Entry((ACTIVE_USER_1_DN, { + 'objectclass': "top person".split(), + 'sn': ACTIVE_USER_1_CN, + 'cn': ACTIVE_USER_2_CN}))) + + # adding an entry on a stage with a different 'cn' + topology.standalone.add_s(Entry((STAGE_USER_1_DN, { + 'objectclass': "top person".split(), + 'sn': STAGE_USER_1_CN, + 'cn': STAGE_USER_1_CN}))) + + try: + + # modify add same value + topology.standalone.modify_s(STAGE_USER_1_DN, [(ldap.MOD_ADD, 'cn', [ACTIVE_USER_2_CN])]) + except ldap.CONSTRAINT_VIOLATION: + assert across_subtrees + + topology.standalone.delete_s(STAGE_USER_1_DN) + topology.standalone.add_s(Entry((STAGE_USER_1_DN, { + 'objectclass': "top person".split(), + 'sn': STAGE_USER_1_CN, + 'cn': STAGE_USER_2_CN}))) + try: + # modify replace same value + topology.standalone.modify_s(STAGE_USER_1_DN, [(ldap.MOD_REPLACE, 'cn', [STAGE_USER_2_CN, ACTIVE_USER_1_CN])]) + except ldap.CONSTRAINT_VIOLATION: + assert across_subtrees + + # cleanup the stuff now + topology.standalone.delete_s(config.dn) + topology.standalone.delete_s(ACTIVE_USER_1_DN) + topology.standalone.delete_s(STAGE_USER_1_DN) + + +def _active_stage_containers_modrdn(topology, type_config='old', across_subtrees=False): + ''' + Check uniqueness in a several containers + Add and entry with a given 'cn', then check we CAN modrdn an entry with the same 'cn' value on the other container + + ''' + + config = _build_config(topology, attr_name='cn', subtree_1=ACTIVE_DN, subtree_2=STAGE_DN, type_config=type_config, across_subtrees=False) + + # enable the 'cn' uniqueness on Active and Stage + topology.standalone.add_s(config) + topology.standalone.restart(timeout=120) + topology.standalone.add_s(Entry((ACTIVE_USER_1_DN, { + 'objectclass': "top person".split(), + 'sn': ACTIVE_USER_1_CN, + 'cn': [ACTIVE_USER_1_CN, 'dummy']}))) + + topology.standalone.add_s(Entry((STAGE_USER_1_DN, { + 'objectclass': "top person".split(), + 'sn': STAGE_USER_1_CN, + 'cn': STAGE_USER_1_CN}))) + + try: + + topology.standalone.rename_s(STAGE_USER_1_DN, 'cn=dummy', delold=0) + + # check stage entry has 'cn=dummy' + stage_ent = topology.standalone.getEntry("cn=dummy,%s" % (STAGE_DN), ldap.SCOPE_BASE, "objectclass=*", ['cn']) + assert stage_ent.hasAttr('cn') + found = False + for value in stage_ent.getValues('cn'): + if value == 'dummy': + found = True + assert found + + # check active entry has 'cn=dummy' + active_ent = topology.standalone.getEntry(ACTIVE_USER_1_DN, ldap.SCOPE_BASE, "objectclass=*", ['cn']) + assert active_ent.hasAttr('cn') + found = False + for value in stage_ent.getValues('cn'): + if value == 'dummy': + found = True + assert found + + topology.standalone.delete_s("cn=dummy,%s" % (STAGE_DN)) + except ldap.CONSTRAINT_VIOLATION: + assert across_subtrees + topology.standalone.delete_s(STAGE_USER_1_DN) + + # cleanup the stuff now + topology.standalone.delete_s(config.dn) + topology.standalone.delete_s(ACTIVE_USER_1_DN) + + +def _config_file(topology, action='save'): + dse_ldif = topology.standalone.confdir + '/dse.ldif' + sav_file = topology.standalone.confdir + '/dse.ldif.ticket47823' + if action == 'save': + shutil.copy(dse_ldif, sav_file) + else: + shutil.copy(sav_file, dse_ldif) + + +def _pattern_errorlog(file, log_pattern): + try: + _pattern_errorlog.last_pos += 1 + except AttributeError: + _pattern_errorlog.last_pos = 0 + + found = None + log.debug("_pattern_errorlog: start at offset %d" % _pattern_errorlog.last_pos) + file.seek(_pattern_errorlog.last_pos) + + # Use a while true iteration because 'for line in file: hit a + # python bug that break file.tell() + while True: + line = file.readline() + log.debug("_pattern_errorlog: [%d] %s" % (file.tell(), line)) + found = log_pattern.search(line) + if ((line == '') or (found)): + break + + log.debug("_pattern_errorlog: end at offset %d" % file.tell()) + _pattern_errorlog.last_pos = file.tell() + return found + + +def test_ticket47823_init(topology): + """ + + """ + + # Enabled the plugins + topology.standalone.plugins.enable(name=PLUGIN_ATTR_UNIQUENESS) + topology.standalone.restart(timeout=120) + + topology.standalone.add_s(Entry((PROVISIONING_DN, {'objectclass': "top nscontainer".split(), + 'cn': PROVISIONING_CN}))) + topology.standalone.add_s(Entry((ACTIVE_DN, {'objectclass': "top nscontainer".split(), + 'cn': ACTIVE_CN}))) + topology.standalone.add_s(Entry((STAGE_DN, {'objectclass': "top nscontainer".split(), + 'cn': STAGE_CN}))) + topology.standalone.add_s(Entry((DELETE_DN, {'objectclass': "top nscontainer".split(), + 'cn': DELETE_CN}))) + topology.standalone.errorlog_file = open(topology.standalone.errlog, "r") + + topology.standalone.stop(timeout=120) + time.sleep(1) + topology.standalone.start(timeout=120) + time.sleep(3) + + +def test_ticket47823_one_container_add(topology): + ''' + Check uniqueness in a single container + Add and entry with a given 'cn', then check we can not add an entry with the same 'cn' value + + ''' + _header(topology, "With former config (args), check attribute uniqueness with 'cn' (ADD) ") + + _active_container_add(topology, type_config='old') + + _header(topology, "With new config (args), check attribute uniqueness with 'cn' (ADD) ") + + _active_container_add(topology, type_config='new') + + +def test_ticket47823_one_container_mod(topology): + ''' + Check uniqueness in a single container + Add and entry with a given 'cn', then check we can not modify an entry with the same 'cn' value + + ''' + _header(topology, "With former config (args), check attribute uniqueness with 'cn' (MOD)") + + _active_container_mod(topology, type_config='old') + + _header(topology, "With new config (args), check attribute uniqueness with 'cn' (MOD)") + + _active_container_mod(topology, type_config='new') + + +def test_ticket47823_one_container_modrdn(topology): + ''' + Check uniqueness in a single container + Add and entry with a given 'cn', then check we can not modrdn an entry with the same 'cn' value + + ''' + _header(topology, "With former config (args), check attribute uniqueness with 'cn' (MODRDN)") + + _active_container_modrdn(topology, type_config='old') + + _header(topology, "With former config (args), check attribute uniqueness with 'cn' (MODRDN)") + + _active_container_modrdn(topology, type_config='new') + + +def test_ticket47823_multi_containers_add(topology): + ''' + Check uniqueness in a several containers + Add and entry with a given 'cn', then check we can not add an entry with the same 'cn' value + + ''' + _header(topology, "With former config (args), check attribute uniqueness with 'cn' (ADD) ") + + _active_stage_containers_add(topology, type_config='old', across_subtrees=False) + + _header(topology, "With new config (args), check attribute uniqueness with 'cn' (ADD) ") + + _active_stage_containers_add(topology, type_config='new', across_subtrees=False) + + +def test_ticket47823_multi_containers_mod(topology): + ''' + Check uniqueness in a several containers + Add an entry on a container with a given 'cn', then check we CAN mod an entry with the same 'cn' value on the other container + + ''' + _header(topology, "With former config (args), check attribute uniqueness with 'cn' (MOD) on separated container") + + topology.standalone.log.info('Uniqueness not enforced: if same \'cn\' modified (add/replace) on separated containers') + _active_stage_containers_mod(topology, type_config='old', across_subtrees=False) + + _header(topology, "With new config (args), check attribute uniqueness with 'cn' (MOD) on separated container") + + topology.standalone.log.info('Uniqueness not enforced: if same \'cn\' modified (add/replace) on separated containers') + _active_stage_containers_mod(topology, type_config='new', across_subtrees=False) + + +def test_ticket47823_multi_containers_modrdn(topology): + ''' + Check uniqueness in a several containers + Add and entry with a given 'cn', then check we CAN modrdn an entry with the same 'cn' value on the other container + + ''' + _header(topology, "With former config (args), check attribute uniqueness with 'cn' (MODRDN) on separated containers") + + topology.standalone.log.info('Uniqueness not enforced: checks MODRDN entry is accepted on separated containers') + _active_stage_containers_modrdn(topology, type_config='old', across_subtrees=False) + + topology.standalone.log.info('Uniqueness not enforced: checks MODRDN entry is accepted on separated containers') + _active_stage_containers_modrdn(topology, type_config='old') + + +def test_ticket47823_across_multi_containers_add(topology): + ''' + Check uniqueness across several containers, uniquely with the new configuration + Add and entry with a given 'cn', then check we can not add an entry with the same 'cn' value + + ''' + _header(topology, "With new config (args), check attribute uniqueness with 'cn' (ADD) across several containers") + + _active_stage_containers_add(topology, type_config='old', across_subtrees=True) + + +def test_ticket47823_across_multi_containers_mod(topology): + ''' + Check uniqueness across several containers, uniquely with the new configuration + Add and entry with a given 'cn', then check we can not modifiy an entry with the same 'cn' value + + ''' + _header(topology, "With new config (args), check attribute uniqueness with 'cn' (MOD) across several containers") + + _active_stage_containers_mod(topology, type_config='old', across_subtrees=True) + + +def test_ticket47823_across_multi_containers_modrdn(topology): + ''' + Check uniqueness across several containers, uniquely with the new configuration + Add and entry with a given 'cn', then check we can not modrdn an entry with the same 'cn' value + + ''' + _header(topology, "With new config (args), check attribute uniqueness with 'cn' (MODRDN) across several containers") + + _active_stage_containers_modrdn(topology, type_config='old', across_subtrees=True) + + +def test_ticket47823_invalid_config_1(topology): + ''' + Check that an invalid config is detected. No uniqueness enforced + Using old config: arg0 is missing + ''' + _header(topology, "Invalid config (old): arg0 is missing") + + _config_file(topology, action='save') + + # create an invalid config without arg0 + config = _build_config(topology, attr_name='cn', subtree_1=ACTIVE_DN, subtree_2=None, type_config='old', across_subtrees=False) + + del config.data['nsslapd-pluginarg0'] + # replace 'cn' uniqueness entry + try: + topology.standalone.delete_s(config.dn) + + except ldap.NO_SUCH_OBJECT: + pass + topology.standalone.add_s(config) + + topology.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS) + + # Check the server did not restart + topology.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', '65536')]) + try: + topology.standalone.restart(timeout=5) + ent = topology.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS) + if ent: + # be sure to restore a valid config before assert + _config_file(topology, action='restore') + assert not ent + except ldap.SERVER_DOWN: + pass + + # Check the expected error message + regex = re.compile("Config fail: unable to parse old style") + res = _pattern_errorlog(topology.standalone.errorlog_file, regex) + if not res: + # be sure to restore a valid config before assert + _config_file(topology, action='restore') + assert res + + # Check we can restart the server + _config_file(topology, action='restore') + topology.standalone.start(timeout=5) + try: + topology.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS) + except ldap.NO_SUCH_OBJECT: + pass + + +def test_ticket47823_invalid_config_2(topology): + ''' + Check that an invalid config is detected. No uniqueness enforced + Using old config: arg1 is missing + ''' + _header(topology, "Invalid config (old): arg1 is missing") + + _config_file(topology, action='save') + + # create an invalid config without arg0 + config = _build_config(topology, attr_name='cn', subtree_1=ACTIVE_DN, subtree_2=None, type_config='old', across_subtrees=False) + + del config.data['nsslapd-pluginarg1'] + # replace 'cn' uniqueness entry + try: + topology.standalone.delete_s(config.dn) + + except ldap.NO_SUCH_OBJECT: + pass + topology.standalone.add_s(config) + + topology.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS) + + # Check the server did not restart + try: + topology.standalone.restart(timeout=5) + ent = topology.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS) + if ent: + # be sure to restore a valid config before assert + _config_file(topology, action='restore') + assert not ent + except ldap.SERVER_DOWN: + pass + + # Check the expected error message + regex = re.compile("Config info: No valid subtree is defined") + res = _pattern_errorlog(topology.standalone.errorlog_file, regex) + if not res: + # be sure to restore a valid config before assert + _config_file(topology, action='restore') + assert res + + # Check we can restart the server + _config_file(topology, action='restore') + topology.standalone.start(timeout=5) + try: + topology.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS) + except ldap.NO_SUCH_OBJECT: + pass + + +def test_ticket47823_invalid_config_3(topology): + ''' + Check that an invalid config is detected. No uniqueness enforced + Using old config: arg0 is missing + ''' + _header(topology, "Invalid config (old): arg0 is missing but new config attrname exists") + + _config_file(topology, action='save') + + # create an invalid config without arg0 + config = _build_config(topology, attr_name='cn', subtree_1=ACTIVE_DN, subtree_2=None, type_config='old', across_subtrees=False) + + del config.data['nsslapd-pluginarg0'] + config.data['uniqueness-attribute-name'] = 'cn' + # replace 'cn' uniqueness entry + try: + topology.standalone.delete_s(config.dn) + + except ldap.NO_SUCH_OBJECT: + pass + topology.standalone.add_s(config) + + topology.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS) + + # Check the server did not restart + topology.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', '65536')]) + try: + topology.standalone.restart(timeout=5) + ent = topology.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS) + if ent: + # be sure to restore a valid config before assert + _config_file(topology, action='restore') + assert not ent + except ldap.SERVER_DOWN: + pass + + # Check the expected error message + regex = re.compile("Config fail: unable to parse old style") + res = _pattern_errorlog(topology.standalone.errorlog_file, regex) + if not res: + # be sure to restore a valid config before assert + _config_file(topology, action='restore') + assert res + + # Check we can restart the server + _config_file(topology, action='restore') + topology.standalone.start(timeout=5) + try: + topology.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS) + except ldap.NO_SUCH_OBJECT: + pass + + +def test_ticket47823_invalid_config_4(topology): + ''' + Check that an invalid config is detected. No uniqueness enforced + Using old config: arg1 is missing + ''' + _header(topology, "Invalid config (old): arg1 is missing but new config exist") + + _config_file(topology, action='save') + + # create an invalid config without arg0 + config = _build_config(topology, attr_name='cn', subtree_1=ACTIVE_DN, subtree_2=None, type_config='old', across_subtrees=False) + + del config.data['nsslapd-pluginarg1'] + config.data['uniqueness-subtrees'] = ACTIVE_DN + # replace 'cn' uniqueness entry + try: + topology.standalone.delete_s(config.dn) + + except ldap.NO_SUCH_OBJECT: + pass + topology.standalone.add_s(config) + + topology.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS) + + # Check the server did not restart + try: + topology.standalone.restart(timeout=5) + ent = topology.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS) + if ent: + # be sure to restore a valid config before assert + _config_file(topology, action='restore') + assert not ent + except ldap.SERVER_DOWN: + pass + + # Check the expected error message + regex = re.compile("Config info: No valid subtree is defined") + res = _pattern_errorlog(topology.standalone.errorlog_file, regex) + if not res: + # be sure to restore a valid config before assert + _config_file(topology, action='restore') + assert res + + # Check we can restart the server + _config_file(topology, action='restore') + topology.standalone.start(timeout=5) + try: + topology.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS) + except ldap.NO_SUCH_OBJECT: + pass + + +def test_ticket47823_invalid_config_5(topology): + ''' + Check that an invalid config is detected. No uniqueness enforced + Using new config: uniqueness-attribute-name is missing + ''' + _header(topology, "Invalid config (new): uniqueness-attribute-name is missing") + + _config_file(topology, action='save') + + # create an invalid config without arg0 + config = _build_config(topology, attr_name='cn', subtree_1=ACTIVE_DN, subtree_2=None, type_config='new', across_subtrees=False) + + del config.data['uniqueness-attribute-name'] + # replace 'cn' uniqueness entry + try: + topology.standalone.delete_s(config.dn) + + except ldap.NO_SUCH_OBJECT: + pass + topology.standalone.add_s(config) + + topology.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS) + + # Check the server did not restart + try: + topology.standalone.restart(timeout=5) + ent = topology.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS) + if ent: + # be sure to restore a valid config before assert + _config_file(topology, action='restore') + assert not ent + except ldap.SERVER_DOWN: + pass + + # Check the expected error message + regex = re.compile("Config info: attribute name not defined") + res = _pattern_errorlog(topology.standalone.errorlog_file, regex) + if not res: + # be sure to restore a valid config before assert + _config_file(topology, action='restore') + assert res + + # Check we can restart the server + _config_file(topology, action='restore') + topology.standalone.start(timeout=5) + try: + topology.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS) + except ldap.NO_SUCH_OBJECT: + pass + + +def test_ticket47823_invalid_config_6(topology): + ''' + Check that an invalid config is detected. No uniqueness enforced + Using new config: uniqueness-subtrees is missing + ''' + _header(topology, "Invalid config (new): uniqueness-subtrees is missing") + + _config_file(topology, action='save') + + # create an invalid config without arg0 + config = _build_config(topology, attr_name='cn', subtree_1=ACTIVE_DN, subtree_2=None, type_config='new', across_subtrees=False) + + del config.data['uniqueness-subtrees'] + # replace 'cn' uniqueness entry + try: + topology.standalone.delete_s(config.dn) + + except ldap.NO_SUCH_OBJECT: + pass + topology.standalone.add_s(config) + + topology.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS) + + # Check the server did not restart + try: + topology.standalone.restart(timeout=5) + ent = topology.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS) + if ent: + # be sure to restore a valid config before assert + _config_file(topology, action='restore') + assert not ent + except ldap.SERVER_DOWN: + pass + + # Check the expected error message + regex = re.compile("Config info: objectclass for subtree entries is not defined") + res = _pattern_errorlog(topology.standalone.errorlog_file, regex) + if not res: + # be sure to restore a valid config before assert + _config_file(topology, action='restore') + assert res + + # Check we can restart the server + _config_file(topology, action='restore') + topology.standalone.start(timeout=5) + try: + topology.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS) + except ldap.NO_SUCH_OBJECT: + pass + + +def test_ticket47823_invalid_config_7(topology): + ''' + Check that an invalid config is detected. No uniqueness enforced + Using new config: uniqueness-subtrees is missing + ''' + _header(topology, "Invalid config (new): uniqueness-subtrees are invalid") + + _config_file(topology, action='save') + + # create an invalid config without arg0 + config = _build_config(topology, attr_name='cn', subtree_1="this_is dummy DN", subtree_2="an other=dummy DN", type_config='new', across_subtrees=False) + + topology.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', '65536')]) + # replace 'cn' uniqueness entry + try: + topology.standalone.delete_s(config.dn) + + except ldap.NO_SUCH_OBJECT: + pass + topology.standalone.add_s(config) + + topology.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS) + + # Check the server did not restart + try: + topology.standalone.restart(timeout=5) + ent = topology.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS) + if ent: + # be sure to restore a valid config before assert + _config_file(topology, action='restore') + assert not ent + except ldap.SERVER_DOWN: + pass + + # Check the expected error message + regex = re.compile("Config info: No valid subtree is defined") + res = _pattern_errorlog(topology.standalone.errorlog_file, regex) + if not res: + # be sure to restore a valid config before assert + _config_file(topology, action='restore') + assert res + + # Check we can restart the server + _config_file(topology, action='restore') + topology.standalone.start(timeout=5) + try: + topology.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS) + except ldap.NO_SUCH_OBJECT: + pass + + +def test_ticket47823_final(topology): + topology.standalone.delete() + log.info('Testcase PASSED') + + +def run_isolated(): + ''' + run_isolated is used to run these test cases independently of a test scheduler (xunit, py.test..) + To run isolated without py.test, you need to + - edit this file and comment '@pytest.fixture' line before 'topology' function. + - set the installation prefix + - run this program + ''' + global installation_prefix + installation_prefix = None + + topo = topology(True) + test_ticket47823_init(topo) + + # run old/new config style that makes uniqueness checking on one subtree + test_ticket47823_one_container_add(topo) + test_ticket47823_one_container_mod(topo) + test_ticket47823_one_container_modrdn(topo) + + # run old config style that makes uniqueness checking on each defined subtrees + test_ticket47823_multi_containers_add(topo) + test_ticket47823_multi_containers_mod(topo) + test_ticket47823_multi_containers_modrdn(topo) + test_ticket47823_across_multi_containers_add(topo) + test_ticket47823_across_multi_containers_mod(topo) + test_ticket47823_across_multi_containers_modrdn(topo) + + test_ticket47823_invalid_config_1(topo) + test_ticket47823_invalid_config_2(topo) + test_ticket47823_invalid_config_3(topo) + test_ticket47823_invalid_config_4(topo) + test_ticket47823_invalid_config_5(topo) + test_ticket47823_invalid_config_6(topo) + test_ticket47823_invalid_config_7(topo) + + test_ticket47823_final(topo) + + +if __name__ == '__main__': + run_isolated() diff --git a/dirsrvtests/tests/tickets/ticket47824_test.py b/dirsrvtests/tests/tickets/ticket47824_test.py new file mode 100644 index 0000000..ce1caa9 --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket47824_test.py @@ -0,0 +1,265 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2015 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import os +import sys +import time +import ldap +import logging +import pytest +from lib389 import DirSrv, Entry, tools, tasks +from lib389.tools import DirSrvTools +from lib389._constants import * +from lib389.properties import * +from lib389.tasks import * +from ldap.controls import SimplePagedResultsControl + +log = logging.getLogger(__name__) + +installation_prefix = None + +MYSUFFIX = 'o=ticket47824.org' +MYSUFFIXBE = 'ticket47824' +SUBSUFFIX0 = 'ou=OU0,o=ticket47824.org' +SUBSUFFIX0BE = 'OU0' +SUBSUFFIX1 = 'ou=OU1,o=ticket47824.org' +SUBSUFFIX1BE = 'OU1' +SUBSUFFIX2 = 'ou=OU2,o=ticket47824.org' +SUBSUFFIX2BE = 'OU2' + +_MYLDIF = 'ticket47824.ldif' +_SUBLDIF0TMP = 'ticket47824_0.tmp' +_SUBLDIF0 = 'ticket47824_0.ldif' +_SUBLDIF1TMP = 'ticket47824_1.tmp' +_SUBLDIF1 = 'ticket47824_1.ldif' +_SUBLDIF2TMP = 'ticket47824_2.tmp' +_SUBLDIF2 = 'ticket47824_2.ldif' + +SEARCHFILTER = '(objectclass=*)' + + +class TopologyStandalone(object): + def __init__(self, standalone): + standalone.open() + self.standalone = standalone + + +@pytest.fixture(scope="module") +def topology(request): + ''' + This fixture is used to standalone topology for the 'module'. + ''' + global installation_prefix + + if installation_prefix: + args_instance[SER_DEPLOYED_DIR] = installation_prefix + + standalone = DirSrv(verbose=False) + + # Args for the standalone instance + args_instance[SER_HOST] = HOST_STANDALONE + args_instance[SER_PORT] = PORT_STANDALONE + args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE + args_standalone = args_instance.copy() + standalone.allocate(args_standalone) + + # Get the status of the instance and restart it if it exists + instance_standalone = standalone.exists() + + # Remove the instance + if instance_standalone: + standalone.delete() + + # Create the instance + standalone.create() + + # Used to retrieve configuration information (dbdir, confdir...) + standalone.open() + + # clear the tmp directory + standalone.clearTmpDir(__file__) + + # Here we have standalone instance up and running + return TopologyStandalone(standalone) + + +def test_ticket47824_run(topology): + """ + Add 3 sub suffixes under the primary suffix + Import 16 entries each + Search with Simple Paged Results Control from the primary suffix (pagesize = 4) + If all of them are returned, the bug is verified + """ + log.info('Testing Ticket 47824 - paged results control is not working in some cases when we have a subsuffix') + + # bind as directory manager + topology.standalone.log.info("Bind as %s" % DN_DM) + topology.standalone.simple_bind_s(DN_DM, PASSWORD) + + topology.standalone.log.info("\n\n######################### SETUP SUFFIX o=ticket47824.org ######################\n") + + topology.standalone.backend.create(MYSUFFIX, {BACKEND_NAME: MYSUFFIXBE}) + topology.standalone.mappingtree.create(MYSUFFIX, bename=MYSUFFIXBE) + + topology.standalone.log.info("\n\n######################### SETUP SUB SUFFIX ou=OU0 ######################\n") + + topology.standalone.backend.create(SUBSUFFIX0, {BACKEND_NAME: SUBSUFFIX0BE}) + topology.standalone.mappingtree.create(SUBSUFFIX0, bename=SUBSUFFIX0BE, parent=MYSUFFIX) + + topology.standalone.log.info("\n\n######################### SETUP SUB SUFFIX ou=OU1 ######################\n") + + topology.standalone.backend.create(SUBSUFFIX1, {BACKEND_NAME: SUBSUFFIX1BE}) + topology.standalone.mappingtree.create(SUBSUFFIX1, bename=SUBSUFFIX1BE, parent=MYSUFFIX) + + topology.standalone.log.info("\n\n######################### SETUP SUB SUFFIX ou=OU2 ######################\n") + + topology.standalone.backend.create(SUBSUFFIX2, {BACKEND_NAME: SUBSUFFIX2BE}) + topology.standalone.mappingtree.create(SUBSUFFIX2, bename=SUBSUFFIX2BE, parent=MYSUFFIX) + + topology.standalone.log.info("\n\n######################### Generate Test data ######################\n") + + # get tmp dir + mytmp = topology.standalone.getDir(__file__, TMP_DIR) + if mytmp is None: + mytmp = "/tmp" + + MYLDIF = '%s%s' % (mytmp, _MYLDIF) + SUBLDIF0TMP = '%s%s' % (mytmp, _SUBLDIF0TMP) + SUBLDIF0 = '%s%s' % (mytmp, _SUBLDIF0) + SUBLDIF1TMP = '%s%s' % (mytmp, _SUBLDIF1TMP) + SUBLDIF1 = '%s%s' % (mytmp, _SUBLDIF1) + SUBLDIF2TMP = '%s%s' % (mytmp, _SUBLDIF2TMP) + SUBLDIF2 = '%s%s' % (mytmp, _SUBLDIF2) + + os.system('ls %s' % MYLDIF) + os.system('ls %s' % SUBLDIF0TMP) + os.system('ls %s' % SUBLDIF1TMP) + os.system('ls %s' % SUBLDIF2TMP) + os.system('rm -f %s' % MYLDIF) + os.system('rm -f %s' % SUBLDIF0TMP) + os.system('rm -f %s' % SUBLDIF1TMP) + os.system('rm -f %s' % SUBLDIF2TMP) + if hasattr(topology.standalone, 'prefix'): + prefix = topology.standalone.prefix + else: + prefix = None + dbgen_prog = prefix + '/bin/dbgen.pl' + topology.standalone.log.info('dbgen: %s' % dbgen_prog) + os.system('%s -s %s -o %s -n 10' % (dbgen_prog, MYSUFFIX, MYLDIF)) + os.system('%s -s %s -o %s -n 10' % (dbgen_prog, SUBSUFFIX0, SUBLDIF0TMP)) + os.system('%s -s %s -o %s -n 10' % (dbgen_prog, SUBSUFFIX1, SUBLDIF1TMP)) + os.system('%s -s %s -o %s -n 10' % (dbgen_prog, SUBSUFFIX2, SUBLDIF2TMP)) + + os.system('cat %s | sed -e "s/\/objectClass: organizationalUnit/" | sed -e "/^o:.*/d" > %s' % (SUBLDIF0TMP, SUBLDIF0)) + os.system('cat %s | sed -e "s/\/objectClass: organizationalUnit/" | sed -e "/^o:.*/d" > %s' % (SUBLDIF1TMP, SUBLDIF1)) + os.system('cat %s | sed -e "s/\/objectClass: organizationalUnit/" | sed -e "/^o:.*/d" > %s' % (SUBLDIF2TMP, SUBLDIF2)) + + cmdline = 'egrep dn: %s %s %s %s | wc -l' % (MYLDIF, SUBLDIF0, SUBLDIF1, SUBLDIF2) + p = os.popen(cmdline, "r") + dnnumstr = p.readline() + dnnum = int(dnnumstr) + topology.standalone.log.info("We have %d entries.\n", dnnum) + + topology.standalone.log.info("\n\n######################### Import Test data ######################\n") + + args = {TASK_WAIT: True} + importTask = Tasks(topology.standalone) + importTask.importLDIF(MYSUFFIX, MYSUFFIXBE, MYLDIF, args) + importTask.importLDIF(SUBSUFFIX0, SUBSUFFIX0BE, SUBLDIF0, args) + importTask.importLDIF(SUBSUFFIX1, SUBSUFFIX1BE, SUBLDIF1, args) + importTask.importLDIF(SUBSUFFIX2, SUBSUFFIX2BE, SUBLDIF2, args) + + topology.standalone.log.info("\n\n######################### SEARCH ALL ######################\n") + topology.standalone.log.info("Bind as %s and add the READ/SEARCH SELFDN aci" % DN_DM) + topology.standalone.simple_bind_s(DN_DM, PASSWORD) + + entries = topology.standalone.search_s(MYSUFFIX, ldap.SCOPE_SUBTREE, SEARCHFILTER) + topology.standalone.log.info("Returned %d entries.\n", len(entries)) + + #print entries + + assert dnnum == len(entries) + + topology.standalone.log.info('%d entries are successfully imported.' % dnnum) + + topology.standalone.log.info("\n\n######################### SEARCH WITH SIMPLE PAGED RESULTS CONTROL ######################\n") + + page_size = 4 + req_ctrl = SimplePagedResultsControl(True, size=page_size, cookie='') + + known_ldap_resp_ctrls = { + SimplePagedResultsControl.controlType: SimplePagedResultsControl, + } + + topology.standalone.log.info("Calling search_ext...") + msgid = topology.standalone.search_ext(MYSUFFIX, ldap.SCOPE_SUBTREE, SEARCHFILTER, None, serverctrls=[req_ctrl]) + + pageddncnt = 0 + pages = 0 + while True: + pages += 1 + + topology.standalone.log.info("Getting page %d" % pages) + rtype, rdata, rmsgid, serverctrls = topology.standalone.result3(msgid, resp_ctrl_classes=known_ldap_resp_ctrls) + topology.standalone.log.info("%d results" % len(rdata)) + pageddncnt += len(rdata) + + topology.standalone.log.info("Results:") + for dn, attrs in rdata: + topology.standalone.log.info("dn: %s" % dn) + + pctrls = [ + c for c in serverctrls if c.controlType == SimplePagedResultsControl.controlType + ] + if not pctrls: + topology.standalone.log.info('Warning: Server ignores RFC 2696 control.') + break + + if pctrls[0].cookie: + req_ctrl.cookie = pctrls[0].cookie + topology.standalone.log.info("cookie: %s" % req_ctrl.cookie) + msgid = topology.standalone.search_ext(MYSUFFIX, + ldap.SCOPE_SUBTREE, + SEARCHFILTER, + None, + serverctrls=[req_ctrl]) + else: + topology.standalone.log.info("No cookie") + break + + topology.standalone.log.info("Paged result search returned %d entries.\n", pageddncnt) + + assert dnnum == len(entries) + topology.standalone.log.info("ticket47824 was successfully verified.") + + +def test_ticket47824_final(topology): + topology.standalone.delete() + log.info('Testcase PASSED') + + +def run_isolated(): + ''' + run_isolated is used to run these test cases independently of a test scheduler (xunit, py.test..) + To run isolated without py.test, you need to + - edit this file and comment '@pytest.fixture' line before 'topology' function. + - set the installation prefix + - run this program + ''' + global installation_prefix + installation_prefix = None + + topo = topology(True) + test_ticket47824_run(topo) + + test_ticket47824_final(topo) + + +if __name__ == '__main__': + run_isolated() + diff --git a/dirsrvtests/tests/tickets/ticket47828_test.py b/dirsrvtests/tests/tickets/ticket47828_test.py new file mode 100644 index 0000000..3962a0a --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket47828_test.py @@ -0,0 +1,728 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2015 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import os +import sys +import time +import ldap +import logging +import socket +import pytest +import shutil +from lib389 import DirSrv, Entry, tools +from lib389.tools import DirSrvTools +from lib389._constants import * +from lib389.properties import * + +log = logging.getLogger(__name__) + +installation_prefix = None + +ACCT_POLICY_CONFIG_DN = 'cn=config,cn=%s,cn=plugins,cn=config' % PLUGIN_ACCT_POLICY +ACCT_POLICY_DN = 'cn=Account Inactivation Pplicy,%s' % SUFFIX +INACTIVITY_LIMIT = '9' +SEARCHFILTER = '(objectclass=*)' + +DUMMY_CONTAINER = 'cn=dummy container,%s' % SUFFIX +PROVISIONING = 'cn=provisioning,%s' % SUFFIX +ACTIVE_USER1_CN = 'active user1' +ACTIVE_USER1_DN = 'cn=%s,%s' % (ACTIVE_USER1_CN, SUFFIX) +STAGED_USER1_CN = 'staged user1' +STAGED_USER1_DN = 'cn=%s,%s' % (STAGED_USER1_CN, PROVISIONING) +DUMMY_USER1_CN = 'dummy user1' +DUMMY_USER1_DN = 'cn=%s,%s' % (DUMMY_USER1_CN, DUMMY_CONTAINER) + +ALLOCATED_ATTR = 'employeeNumber' + +class TopologyStandalone(object): + def __init__(self, standalone): + standalone.open() + self.standalone = standalone + + +@pytest.fixture(scope="module") +def topology(request): + ''' + This fixture is used to standalone topology for the 'module'. + At the beginning, It may exists a standalone instance. + It may also exists a backup for the standalone instance. + + Principle: + If standalone instance exists: + restart it + If backup of standalone exists: + create/rebind to standalone + + restore standalone instance from backup + else: + Cleanup everything + remove instance + remove backup + Create instance + Create backup + ''' + global installation_prefix + + if installation_prefix: + args_instance[SER_DEPLOYED_DIR] = installation_prefix + + standalone = DirSrv(verbose=False) + + # Args for the standalone instance + args_instance[SER_HOST] = HOST_STANDALONE + args_instance[SER_PORT] = PORT_STANDALONE + args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE + args_standalone = args_instance.copy() + standalone.allocate(args_standalone) + + # Get the status of the backups + backup_standalone = standalone.checkBackupFS() + + # Get the status of the instance and restart it if it exists + instance_standalone = standalone.exists() + if instance_standalone: + # assuming the instance is already stopped, just wait 5 sec max + standalone.stop(timeout=5) + try: + standalone.start(timeout=10) + except ldap.SERVER_DOWN: + pass + + if backup_standalone: + # The backup exist, assuming it is correct + # we just re-init the instance with it + if not instance_standalone: + standalone.create() + # Used to retrieve configuration information (dbdir, confdir...) + standalone.open() + + # restore standalone instance from backup + standalone.stop(timeout=10) + standalone.restoreFS(backup_standalone) + standalone.start(timeout=10) + + else: + # We should be here only in two conditions + # - This is the first time a test involve standalone instance + # - Something weird happened (instance/backup destroyed) + # so we discard everything and recreate all + + # Remove the backup. So even if we have a specific backup file + # (e.g backup_standalone) we clear backup that an instance may have created + if backup_standalone: + standalone.clearBackupFS() + + # Remove the instance + if instance_standalone: + standalone.delete() + + # Create the instance + standalone.create() + + # Used to retrieve configuration information (dbdir, confdir...) + standalone.open() + + # Time to create the backups + standalone.stop(timeout=10) + standalone.backupfile = standalone.backupFS() + standalone.start(timeout=10) + + # + # Here we have standalone instance up and running + # Either coming from a backup recovery + # or from a fresh (re)init + # Time to return the topology + return TopologyStandalone(standalone) + +def _header(topology, label): + topology.standalone.log.info("\n\n###############################################") + topology.standalone.log.info("#######") + topology.standalone.log.info("####### %s" % label) + topology.standalone.log.info("#######") + topology.standalone.log.info("###############################################") + +def test_ticket47828_init(topology): + """ + Enable DNA + """ + topology.standalone.plugins.enable(name=PLUGIN_DNA) + + topology.standalone.add_s(Entry((PROVISIONING,{'objectclass': "top nscontainer".split(), + 'cn': 'provisioning'}))) + topology.standalone.add_s(Entry((DUMMY_CONTAINER,{'objectclass': "top nscontainer".split(), + 'cn': 'dummy container'}))) + + dn_config = "cn=excluded scope, cn=%s, %s" % (PLUGIN_DNA, DN_PLUGIN) + topology.standalone.add_s(Entry((dn_config, {'objectclass': "top extensibleObject".split(), + 'cn': 'excluded scope', + 'dnaType': ALLOCATED_ATTR, + 'dnaNextValue': str(1000), + 'dnaMaxValue': str(2000), + 'dnaMagicRegen': str(-1), + 'dnaFilter': '(&(objectClass=person)(objectClass=organizationalPerson)(objectClass=inetOrgPerson))', + 'dnaScope': SUFFIX}))) + topology.standalone.restart(timeout=10) + + + +def test_ticket47828_run_0(topology): + """ + NO exclude scope: Add an active entry and check its ALLOCATED_ATTR is set + """ + _header(topology, 'NO exclude scope: Add an active entry and check its ALLOCATED_ATTR is set') + + topology.standalone.add_s(Entry((ACTIVE_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(), + 'cn': ACTIVE_USER1_CN, + 'sn': ACTIVE_USER1_CN, + ALLOCATED_ATTR: str(-1)}))) + ent = topology.standalone.getEntry(ACTIVE_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)") + assert ent.hasAttr(ALLOCATED_ATTR) + assert ent.getValue(ALLOCATED_ATTR) != str(-1) + topology.standalone.log.debug('%s.%s=%s' % (ACTIVE_USER1_CN, ALLOCATED_ATTR, ent.getValue(ALLOCATED_ATTR))) + topology.standalone.delete_s(ACTIVE_USER1_DN) + +def test_ticket47828_run_1(topology): + """ + NO exclude scope: Add an active entry and check its ALLOCATED_ATTR is unchanged (!= magic) + """ + _header(topology, 'NO exclude scope: Add an active entry and check its ALLOCATED_ATTR is unchanged (!= magic)') + + topology.standalone.add_s(Entry((ACTIVE_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(), + 'cn': ACTIVE_USER1_CN, + 'sn': ACTIVE_USER1_CN, + ALLOCATED_ATTR: str(20)}))) + ent = topology.standalone.getEntry(ACTIVE_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)") + assert ent.hasAttr(ALLOCATED_ATTR) + assert ent.getValue(ALLOCATED_ATTR) == str(20) + topology.standalone.log.debug('%s.%s=%s' % (ACTIVE_USER1_CN, ALLOCATED_ATTR, ent.getValue(ALLOCATED_ATTR))) + topology.standalone.delete_s(ACTIVE_USER1_DN) + +def test_ticket47828_run_2(topology): + """ + NO exclude scope: Add a staged entry and check its ALLOCATED_ATTR is set + """ + _header(topology, 'NO exclude scope: Add a staged entry and check its ALLOCATED_ATTR is set') + + topology.standalone.add_s(Entry((STAGED_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(), + 'cn': STAGED_USER1_CN, + 'sn': STAGED_USER1_CN, + ALLOCATED_ATTR: str(-1)}))) + ent = topology.standalone.getEntry(STAGED_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)") + assert ent.hasAttr(ALLOCATED_ATTR) + assert ent.getValue(ALLOCATED_ATTR) != str(-1) + topology.standalone.log.debug('%s.%s=%s' % (STAGED_USER1_CN, ALLOCATED_ATTR, ent.getValue(ALLOCATED_ATTR))) + topology.standalone.delete_s(STAGED_USER1_DN) + +def test_ticket47828_run_3(topology): + """ + NO exclude scope: Add a staged entry and check its ALLOCATED_ATTR is unchanged (!= magic) + """ + _header(topology, 'NO exclude scope: Add a staged entry and check its ALLOCATED_ATTR is unchanged (!= magic)') + + topology.standalone.add_s(Entry((STAGED_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(), + 'cn': STAGED_USER1_CN, + 'sn': STAGED_USER1_CN, + ALLOCATED_ATTR: str(20)}))) + ent = topology.standalone.getEntry(STAGED_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)") + assert ent.hasAttr(ALLOCATED_ATTR) + assert ent.getValue(ALLOCATED_ATTR) == str(20) + topology.standalone.log.debug('%s.%s=%s' % (STAGED_USER1_CN, ALLOCATED_ATTR, ent.getValue(ALLOCATED_ATTR))) + topology.standalone.delete_s(STAGED_USER1_DN) + +def test_ticket47828_run_4(topology): + ''' + Exclude the provisioning container + ''' + _header(topology, 'Exclude the provisioning container') + + dn_config = "cn=excluded scope, cn=%s, %s" % (PLUGIN_DNA, DN_PLUGIN) + mod = [(ldap.MOD_REPLACE, 'dnaExcludeScope', PROVISIONING)] + topology.standalone.modify_s(dn_config, mod) + +def test_ticket47828_run_5(topology): + """ + Provisioning excluded scope: Add an active entry and check its ALLOCATED_ATTR is set + """ + _header(topology, 'Provisioning excluded scope: Add an active entry and check its ALLOCATED_ATTR is set') + + topology.standalone.add_s(Entry((ACTIVE_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(), + 'cn': ACTIVE_USER1_CN, + 'sn': ACTIVE_USER1_CN, + ALLOCATED_ATTR: str(-1)}))) + ent = topology.standalone.getEntry(ACTIVE_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)") + assert ent.hasAttr(ALLOCATED_ATTR) + assert ent.getValue(ALLOCATED_ATTR) != str(-1) + topology.standalone.log.debug('%s.%s=%s' % (ACTIVE_USER1_CN, ALLOCATED_ATTR, ent.getValue(ALLOCATED_ATTR))) + topology.standalone.delete_s(ACTIVE_USER1_DN) + +def test_ticket47828_run_6(topology): + """ + Provisioning excluded scope: Add an active entry and check its ALLOCATED_ATTR is unchanged (!= magic) + """ + _header(topology, 'Provisioning excluded scope: Add an active entry and check its ALLOCATED_ATTR is unchanged (!= magic)') + + topology.standalone.add_s(Entry((ACTIVE_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(), + 'cn': ACTIVE_USER1_CN, + 'sn': ACTIVE_USER1_CN, + ALLOCATED_ATTR: str(20)}))) + ent = topology.standalone.getEntry(ACTIVE_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)") + assert ent.hasAttr(ALLOCATED_ATTR) + assert ent.getValue(ALLOCATED_ATTR) == str(20) + topology.standalone.log.debug('%s.%s=%s' % (ACTIVE_USER1_CN, ALLOCATED_ATTR, ent.getValue(ALLOCATED_ATTR))) + topology.standalone.delete_s(ACTIVE_USER1_DN) + +def test_ticket47828_run_7(topology): + """ + Provisioning excluded scope: Add a staged entry and check its ALLOCATED_ATTR is not set + """ + _header(topology, 'Provisioning excluded scope: Add a staged entry and check its ALLOCATED_ATTR is not set') + + topology.standalone.add_s(Entry((STAGED_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(), + 'cn': STAGED_USER1_CN, + 'sn': STAGED_USER1_CN, + ALLOCATED_ATTR: str(-1)}))) + ent = topology.standalone.getEntry(STAGED_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)") + assert ent.hasAttr(ALLOCATED_ATTR) + assert ent.getValue(ALLOCATED_ATTR) == str(-1) + topology.standalone.log.debug('%s.%s=%s' % (STAGED_USER1_CN, ALLOCATED_ATTR, ent.getValue(ALLOCATED_ATTR))) + topology.standalone.delete_s(STAGED_USER1_DN) + +def test_ticket47828_run_8(topology): + """ + Provisioning excluded scope: Add a staged entry and check its ALLOCATED_ATTR is unchanged (!= magic) + """ + _header(topology, 'Provisioning excluded scope: Add a staged entry and check its ALLOCATED_ATTR is unchanged (!= magic)') + + topology.standalone.add_s(Entry((STAGED_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(), + 'cn': STAGED_USER1_CN, + 'sn': STAGED_USER1_CN, + ALLOCATED_ATTR: str(20)}))) + ent = topology.standalone.getEntry(STAGED_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)") + assert ent.hasAttr(ALLOCATED_ATTR) + assert ent.getValue(ALLOCATED_ATTR) == str(20) + topology.standalone.log.debug('%s.%s=%s' % (STAGED_USER1_CN, ALLOCATED_ATTR, ent.getValue(ALLOCATED_ATTR))) + topology.standalone.delete_s(STAGED_USER1_DN) + +def test_ticket47828_run_9(topology): + """ + Provisioning excluded scope: Add an dummy entry and check its ALLOCATED_ATTR is set + """ + _header(topology, 'Provisioning excluded scope: Add an dummy entry and check its ALLOCATED_ATTR is set') + + topology.standalone.add_s(Entry((DUMMY_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(), + 'cn': DUMMY_USER1_CN, + 'sn': DUMMY_USER1_CN, + ALLOCATED_ATTR: str(-1)}))) + ent = topology.standalone.getEntry(DUMMY_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)") + assert ent.hasAttr(ALLOCATED_ATTR) + assert ent.getValue(ALLOCATED_ATTR) != str(-1) + topology.standalone.log.debug('%s.%s=%s' % (DUMMY_USER1_CN, ALLOCATED_ATTR, ent.getValue(ALLOCATED_ATTR))) + topology.standalone.delete_s(DUMMY_USER1_DN) + +def test_ticket47828_run_10(topology): + """ + Provisioning excluded scope: Add an dummy entry and check its ALLOCATED_ATTR is unchanged (!= magic) + """ + _header(topology, 'Provisioning excluded scope: Add an dummy entry and check its ALLOCATED_ATTR is unchanged (!= magic)') + + topology.standalone.add_s(Entry((DUMMY_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(), + 'cn': DUMMY_USER1_CN, + 'sn': DUMMY_USER1_CN, + ALLOCATED_ATTR: str(20)}))) + ent = topology.standalone.getEntry(DUMMY_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)") + assert ent.hasAttr(ALLOCATED_ATTR) + assert ent.getValue(ALLOCATED_ATTR) == str(20) + topology.standalone.log.debug('%s.%s=%s' % (DUMMY_USER1_CN, ALLOCATED_ATTR, ent.getValue(ALLOCATED_ATTR))) + topology.standalone.delete_s(DUMMY_USER1_DN) + +def test_ticket47828_run_11(topology): + ''' + Exclude (in addition) the dummy container + ''' + _header(topology, 'Exclude (in addition) the dummy container') + + dn_config = "cn=excluded scope, cn=%s, %s" % (PLUGIN_DNA, DN_PLUGIN) + mod = [(ldap.MOD_ADD, 'dnaExcludeScope', DUMMY_CONTAINER)] + topology.standalone.modify_s(dn_config, mod) + +def test_ticket47828_run_12(topology): + """ + Provisioning/Dummy excluded scope: Add an active entry and check its ALLOCATED_ATTR is set + """ + _header(topology, 'Provisioning/Dummy excluded scope: Add an active entry and check its ALLOCATED_ATTR is set') + + topology.standalone.add_s(Entry((ACTIVE_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(), + 'cn': ACTIVE_USER1_CN, + 'sn': ACTIVE_USER1_CN, + ALLOCATED_ATTR: str(-1)}))) + ent = topology.standalone.getEntry(ACTIVE_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)") + assert ent.hasAttr(ALLOCATED_ATTR) + assert ent.getValue(ALLOCATED_ATTR) != str(-1) + topology.standalone.log.debug('%s.%s=%s' % (ACTIVE_USER1_CN, ALLOCATED_ATTR, ent.getValue(ALLOCATED_ATTR))) + topology.standalone.delete_s(ACTIVE_USER1_DN) + +def test_ticket47828_run_13(topology): + """ + Provisioning/Dummy excluded scope: Add an active entry and check its ALLOCATED_ATTR is unchanged (!= magic) + """ + _header(topology, 'Provisioning/Dummy excluded scope: Add an active entry and check its ALLOCATED_ATTR is unchanged (!= magic)') + + topology.standalone.add_s(Entry((ACTIVE_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(), + 'cn': ACTIVE_USER1_CN, + 'sn': ACTIVE_USER1_CN, + ALLOCATED_ATTR: str(20)}))) + ent = topology.standalone.getEntry(ACTIVE_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)") + assert ent.hasAttr(ALLOCATED_ATTR) + assert ent.getValue(ALLOCATED_ATTR) == str(20) + topology.standalone.log.debug('%s.%s=%s' % (ACTIVE_USER1_CN, ALLOCATED_ATTR, ent.getValue(ALLOCATED_ATTR))) + topology.standalone.delete_s(ACTIVE_USER1_DN) + +def test_ticket47828_run_14(topology): + """ + Provisioning/Dummy excluded scope: Add a staged entry and check its ALLOCATED_ATTR is not set + """ + _header(topology, 'Provisioning/Dummy excluded scope: Add a staged entry and check its ALLOCATED_ATTR is not set') + + topology.standalone.add_s(Entry((STAGED_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(), + 'cn': STAGED_USER1_CN, + 'sn': STAGED_USER1_CN, + ALLOCATED_ATTR: str(-1)}))) + ent = topology.standalone.getEntry(STAGED_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)") + assert ent.hasAttr(ALLOCATED_ATTR) + assert ent.getValue(ALLOCATED_ATTR) == str(-1) + topology.standalone.log.debug('%s.%s=%s' % (STAGED_USER1_CN, ALLOCATED_ATTR, ent.getValue(ALLOCATED_ATTR))) + topology.standalone.delete_s(STAGED_USER1_DN) + +def test_ticket47828_run_15(topology): + """ + Provisioning/Dummy excluded scope: Add a staged entry and check its ALLOCATED_ATTR is unchanged (!= magic) + """ + _header(topology, 'Provisioning/Dummy excluded scope: Add a staged entry and check its ALLOCATED_ATTR is unchanged (!= magic)') + + topology.standalone.add_s(Entry((STAGED_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(), + 'cn': STAGED_USER1_CN, + 'sn': STAGED_USER1_CN, + ALLOCATED_ATTR: str(20)}))) + ent = topology.standalone.getEntry(STAGED_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)") + assert ent.hasAttr(ALLOCATED_ATTR) + assert ent.getValue(ALLOCATED_ATTR) == str(20) + topology.standalone.log.debug('%s.%s=%s' % (STAGED_USER1_CN, ALLOCATED_ATTR, ent.getValue(ALLOCATED_ATTR))) + topology.standalone.delete_s(STAGED_USER1_DN) + +def test_ticket47828_run_16(topology): + """ + Provisioning/Dummy excluded scope: Add an dummy entry and check its ALLOCATED_ATTR is not set + """ + _header(topology, 'Provisioning/Dummy excluded scope: Add an dummy entry and check its ALLOCATED_ATTR not is set') + + topology.standalone.add_s(Entry((DUMMY_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(), + 'cn': DUMMY_USER1_CN, + 'sn': DUMMY_USER1_CN, + ALLOCATED_ATTR: str(-1)}))) + ent = topology.standalone.getEntry(DUMMY_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)") + assert ent.hasAttr(ALLOCATED_ATTR) + assert ent.getValue(ALLOCATED_ATTR) == str(-1) + topology.standalone.log.debug('%s.%s=%s' % (DUMMY_USER1_CN, ALLOCATED_ATTR, ent.getValue(ALLOCATED_ATTR))) + topology.standalone.delete_s(DUMMY_USER1_DN) + +def test_ticket47828_run_17(topology): + """ + Provisioning/Dummy excluded scope: Add an dummy entry and check its ALLOCATED_ATTR is unchanged (!= magic) + """ + _header(topology, 'Provisioning/Dummy excluded scope: Add an dummy entry and check its ALLOCATED_ATTR is unchanged (!= magic)') + + topology.standalone.add_s(Entry((DUMMY_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(), + 'cn': DUMMY_USER1_CN, + 'sn': DUMMY_USER1_CN, + ALLOCATED_ATTR: str(20)}))) + ent = topology.standalone.getEntry(DUMMY_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)") + assert ent.hasAttr(ALLOCATED_ATTR) + assert ent.getValue(ALLOCATED_ATTR) == str(20) + topology.standalone.log.debug('%s.%s=%s' % (DUMMY_USER1_CN, ALLOCATED_ATTR, ent.getValue(ALLOCATED_ATTR))) + topology.standalone.delete_s(DUMMY_USER1_DN) + + +def test_ticket47828_run_18(topology): + ''' + Exclude PROVISIONING and a wrong container + ''' + _header(topology, 'Exclude PROVISIONING and a wrong container') + + dn_config = "cn=excluded scope, cn=%s, %s" % (PLUGIN_DNA, DN_PLUGIN) + mod = [(ldap.MOD_REPLACE, 'dnaExcludeScope', PROVISIONING)] + topology.standalone.modify_s(dn_config, mod) + try: + mod = [(ldap.MOD_ADD, 'dnaExcludeScope', "invalidDN,%s" % SUFFIX)] + topology.standalone.modify_s(dn_config, mod) + raise ValueError("invalid dnaExcludeScope value (not a DN)") + except ldap.INVALID_SYNTAX: + pass + +def test_ticket47828_run_19(topology): + """ + Provisioning+wrong container excluded scope: Add an active entry and check its ALLOCATED_ATTR is set + """ + _header(topology, 'Provisioning+wrong container excluded scope: Add an active entry and check its ALLOCATED_ATTR is set') + + topology.standalone.add_s(Entry((ACTIVE_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(), + 'cn': ACTIVE_USER1_CN, + 'sn': ACTIVE_USER1_CN, + ALLOCATED_ATTR: str(-1)}))) + ent = topology.standalone.getEntry(ACTIVE_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)") + assert ent.hasAttr(ALLOCATED_ATTR) + assert ent.getValue(ALLOCATED_ATTR) != str(-1) + topology.standalone.log.debug('%s.%s=%s' % (ACTIVE_USER1_CN, ALLOCATED_ATTR, ent.getValue(ALLOCATED_ATTR))) + topology.standalone.delete_s(ACTIVE_USER1_DN) + +def test_ticket47828_run_20(topology): + """ + Provisioning+wrong container excluded scope: Add an active entry and check its ALLOCATED_ATTR is unchanged (!= magic) + """ + _header(topology, 'Provisioning+wrong container excluded scope: Add an active entry and check its ALLOCATED_ATTR is unchanged (!= magic)') + + topology.standalone.add_s(Entry((ACTIVE_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(), + 'cn': ACTIVE_USER1_CN, + 'sn': ACTIVE_USER1_CN, + ALLOCATED_ATTR: str(20)}))) + ent = topology.standalone.getEntry(ACTIVE_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)") + assert ent.hasAttr(ALLOCATED_ATTR) + assert ent.getValue(ALLOCATED_ATTR) == str(20) + topology.standalone.log.debug('%s.%s=%s' % (ACTIVE_USER1_CN, ALLOCATED_ATTR, ent.getValue(ALLOCATED_ATTR))) + topology.standalone.delete_s(ACTIVE_USER1_DN) + +def test_ticket47828_run_21(topology): + """ + Provisioning+wrong container excluded scope: Add a staged entry and check its ALLOCATED_ATTR is not set + """ + _header(topology, 'Provisioning+wrong container excluded scope: Add a staged entry and check its ALLOCATED_ATTR is not set') + + topology.standalone.add_s(Entry((STAGED_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(), + 'cn': STAGED_USER1_CN, + 'sn': STAGED_USER1_CN, + ALLOCATED_ATTR: str(-1)}))) + ent = topology.standalone.getEntry(STAGED_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)") + assert ent.hasAttr(ALLOCATED_ATTR) + assert ent.getValue(ALLOCATED_ATTR) == str(-1) + topology.standalone.log.debug('%s.%s=%s' % (STAGED_USER1_CN, ALLOCATED_ATTR, ent.getValue(ALLOCATED_ATTR))) + topology.standalone.delete_s(STAGED_USER1_DN) + +def test_ticket47828_run_22(topology): + """ + Provisioning+wrong container excluded scope: Add a staged entry and check its ALLOCATED_ATTR is unchanged (!= magic) + """ + _header(topology, 'Provisioning+wrong container excluded scope: Add a staged entry and check its ALLOCATED_ATTR is unchanged (!= magic)') + + topology.standalone.add_s(Entry((STAGED_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(), + 'cn': STAGED_USER1_CN, + 'sn': STAGED_USER1_CN, + ALLOCATED_ATTR: str(20)}))) + ent = topology.standalone.getEntry(STAGED_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)") + assert ent.hasAttr(ALLOCATED_ATTR) + assert ent.getValue(ALLOCATED_ATTR) == str(20) + topology.standalone.log.debug('%s.%s=%s' % (STAGED_USER1_CN, ALLOCATED_ATTR, ent.getValue(ALLOCATED_ATTR))) + topology.standalone.delete_s(STAGED_USER1_DN) + +def test_ticket47828_run_23(topology): + """ + Provisioning+wrong container excluded scope: Add an dummy entry and check its ALLOCATED_ATTR is set + """ + _header(topology, 'Provisioning+wrong container excluded scope: Add an dummy entry and check its ALLOCATED_ATTR is set') + + topology.standalone.add_s(Entry((DUMMY_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(), + 'cn': DUMMY_USER1_CN, + 'sn': DUMMY_USER1_CN, + ALLOCATED_ATTR: str(-1)}))) + ent = topology.standalone.getEntry(DUMMY_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)") + assert ent.hasAttr(ALLOCATED_ATTR) + assert ent.getValue(ALLOCATED_ATTR) != str(-1) + topology.standalone.log.debug('%s.%s=%s' % (DUMMY_USER1_CN, ALLOCATED_ATTR, ent.getValue(ALLOCATED_ATTR))) + topology.standalone.delete_s(DUMMY_USER1_DN) + +def test_ticket47828_run_24(topology): + """ + Provisioning+wrong container excluded scope: Add an dummy entry and check its ALLOCATED_ATTR is unchanged (!= magic) + """ + _header(topology, 'Provisioning+wrong container excluded scope: Add an dummy entry and check its ALLOCATED_ATTR is unchanged (!= magic)') + + topology.standalone.add_s(Entry((DUMMY_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(), + 'cn': DUMMY_USER1_CN, + 'sn': DUMMY_USER1_CN, + ALLOCATED_ATTR: str(20)}))) + ent = topology.standalone.getEntry(DUMMY_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)") + assert ent.hasAttr(ALLOCATED_ATTR) + assert ent.getValue(ALLOCATED_ATTR) == str(20) + topology.standalone.log.debug('%s.%s=%s' % (DUMMY_USER1_CN, ALLOCATED_ATTR, ent.getValue(ALLOCATED_ATTR))) + topology.standalone.delete_s(DUMMY_USER1_DN) + +def test_ticket47828_run_25(topology): + ''' + Exclude a wrong container + ''' + _header(topology, 'Exclude a wrong container') + + dn_config = "cn=excluded scope, cn=%s, %s" % (PLUGIN_DNA, DN_PLUGIN) + + try: + mod = [(ldap.MOD_REPLACE, 'dnaExcludeScope', "invalidDN,%s" % SUFFIX)] + topology.standalone.modify_s(dn_config, mod) + raise ValueError("invalid dnaExcludeScope value (not a DN)") + except ldap.INVALID_SYNTAX: + pass + +def test_ticket47828_run_26(topology): + """ + Wrong container excluded scope: Add an active entry and check its ALLOCATED_ATTR is set + """ + _header(topology, 'Wrong container excluded scope: Add an active entry and check its ALLOCATED_ATTR is set') + + topology.standalone.add_s(Entry((ACTIVE_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(), + 'cn': ACTIVE_USER1_CN, + 'sn': ACTIVE_USER1_CN, + ALLOCATED_ATTR: str(-1)}))) + ent = topology.standalone.getEntry(ACTIVE_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)") + assert ent.hasAttr(ALLOCATED_ATTR) + assert ent.getValue(ALLOCATED_ATTR) != str(-1) + topology.standalone.log.debug('%s.%s=%s' % (ACTIVE_USER1_CN, ALLOCATED_ATTR, ent.getValue(ALLOCATED_ATTR))) + topology.standalone.delete_s(ACTIVE_USER1_DN) + +def test_ticket47828_run_27(topology): + """ + Wrong container excluded scope: Add an active entry and check its ALLOCATED_ATTR is unchanged (!= magic) + """ + _header(topology, 'Wrong container excluded scope: Add an active entry and check its ALLOCATED_ATTR is unchanged (!= magic)') + + topology.standalone.add_s(Entry((ACTIVE_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(), + 'cn': ACTIVE_USER1_CN, + 'sn': ACTIVE_USER1_CN, + ALLOCATED_ATTR: str(20)}))) + ent = topology.standalone.getEntry(ACTIVE_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)") + assert ent.hasAttr(ALLOCATED_ATTR) + assert ent.getValue(ALLOCATED_ATTR) == str(20) + topology.standalone.log.debug('%s.%s=%s' % (ACTIVE_USER1_CN, ALLOCATED_ATTR, ent.getValue(ALLOCATED_ATTR))) + topology.standalone.delete_s(ACTIVE_USER1_DN) + +def test_ticket47828_run_28(topology): + """ + Wrong container excluded scope: Add a staged entry and check its ALLOCATED_ATTR is not set + """ + _header(topology, 'Wrong container excluded scope: Add a staged entry and check its ALLOCATED_ATTR is not set') + + topology.standalone.add_s(Entry((STAGED_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(), + 'cn': STAGED_USER1_CN, + 'sn': STAGED_USER1_CN, + ALLOCATED_ATTR: str(-1)}))) + ent = topology.standalone.getEntry(STAGED_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)") + assert ent.hasAttr(ALLOCATED_ATTR) + assert ent.getValue(ALLOCATED_ATTR) == str(-1) + topology.standalone.log.debug('%s.%s=%s' % (STAGED_USER1_CN, ALLOCATED_ATTR, ent.getValue(ALLOCATED_ATTR))) + topology.standalone.delete_s(STAGED_USER1_DN) + +def test_ticket47828_run_29(topology): + """ + Wrong container excluded scope: Add a staged entry and check its ALLOCATED_ATTR is unchanged (!= magic) + """ + _header(topology, 'Wrong container excluded scope: Add a staged entry and check its ALLOCATED_ATTR is unchanged (!= magic)') + + topology.standalone.add_s(Entry((STAGED_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(), + 'cn': STAGED_USER1_CN, + 'sn': STAGED_USER1_CN, + ALLOCATED_ATTR: str(20)}))) + ent = topology.standalone.getEntry(STAGED_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)") + assert ent.hasAttr(ALLOCATED_ATTR) + assert ent.getValue(ALLOCATED_ATTR) == str(20) + topology.standalone.log.debug('%s.%s=%s' % (STAGED_USER1_CN, ALLOCATED_ATTR, ent.getValue(ALLOCATED_ATTR))) + topology.standalone.delete_s(STAGED_USER1_DN) + +def test_ticket47828_run_30(topology): + """ + Wrong container excluded scope: Add an dummy entry and check its ALLOCATED_ATTR is set + """ + _header(topology, 'Wrong container excluded scope: Add an dummy entry and check its ALLOCATED_ATTR is set') + + topology.standalone.add_s(Entry((DUMMY_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(), + 'cn': DUMMY_USER1_CN, + 'sn': DUMMY_USER1_CN, + ALLOCATED_ATTR: str(-1)}))) + ent = topology.standalone.getEntry(DUMMY_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)") + assert ent.hasAttr(ALLOCATED_ATTR) + assert ent.getValue(ALLOCATED_ATTR) != str(-1) + topology.standalone.log.debug('%s.%s=%s' % (DUMMY_USER1_CN, ALLOCATED_ATTR, ent.getValue(ALLOCATED_ATTR))) + topology.standalone.delete_s(DUMMY_USER1_DN) + +def test_ticket47828_run_31(topology): + """ + Wrong container excluded scope: Add an dummy entry and check its ALLOCATED_ATTR is unchanged (!= magic) + """ + _header(topology, 'Wrong container excluded scope: Add an dummy entry and check its ALLOCATED_ATTR is unchanged (!= magic)') + + topology.standalone.add_s(Entry((DUMMY_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(), + 'cn': DUMMY_USER1_CN, + 'sn': DUMMY_USER1_CN, + ALLOCATED_ATTR: str(20)}))) + ent = topology.standalone.getEntry(DUMMY_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)") + assert ent.hasAttr(ALLOCATED_ATTR) + assert ent.getValue(ALLOCATED_ATTR) == str(20) + topology.standalone.log.debug('%s.%s=%s' % (DUMMY_USER1_CN, ALLOCATED_ATTR, ent.getValue(ALLOCATED_ATTR))) + topology.standalone.delete_s(DUMMY_USER1_DN) + +def test_ticket47828_final(topology): + topology.standalone.plugins.disable(name=PLUGIN_DNA) + topology.standalone.stop(timeout=10) + +def run_isolated(): + ''' + run_isolated is used to run these test cases independently of a test scheduler (xunit, py.test..) + To run isolated without py.test, you need to + - edit this file and comment '@pytest.fixture' line before 'topology' function. + - set the installation prefix + - run this program + ''' + global installation_prefix + installation_prefix = None + + topo = topology(True) + test_ticket47828_init(topo) + + test_ticket47828_run_0(topo) + test_ticket47828_run_1(topo) + test_ticket47828_run_2(topo) + test_ticket47828_run_3(topo) + test_ticket47828_run_4(topo) + test_ticket47828_run_5(topo) + test_ticket47828_run_6(topo) + test_ticket47828_run_7(topo) + test_ticket47828_run_8(topo) + test_ticket47828_run_9(topo) + test_ticket47828_run_10(topo) + test_ticket47828_run_11(topo) + test_ticket47828_run_12(topo) + test_ticket47828_run_13(topo) + test_ticket47828_run_14(topo) + test_ticket47828_run_15(topo) + test_ticket47828_run_16(topo) + test_ticket47828_run_17(topo) + test_ticket47828_run_18(topo) + test_ticket47828_run_19(topo) + test_ticket47828_run_20(topo) + test_ticket47828_run_21(topo) + test_ticket47828_run_22(topo) + test_ticket47828_run_23(topo) + test_ticket47828_run_24(topo) + test_ticket47828_run_25(topo) + test_ticket47828_run_26(topo) + test_ticket47828_run_27(topo) + test_ticket47828_run_28(topo) + test_ticket47828_run_29(topo) + test_ticket47828_run_30(topo) + test_ticket47828_run_31(topo) + + test_ticket47828_final(topo) + + +if __name__ == '__main__': + run_isolated() diff --git a/dirsrvtests/tests/tickets/ticket47829_test.py b/dirsrvtests/tests/tickets/ticket47829_test.py new file mode 100644 index 0000000..2acebf9 --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket47829_test.py @@ -0,0 +1,656 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2015 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import os +import sys +import time +import ldap +import logging +import pytest +from lib389 import DirSrv, Entry, tools +from lib389.tools import DirSrvTools +from lib389._constants import * +from lib389.properties import * + + +SCOPE_IN_CN = 'in' +SCOPE_OUT_CN = 'out' +SCOPE_IN_DN = 'cn=%s,%s' % (SCOPE_IN_CN, SUFFIX) +SCOPE_OUT_DN = 'cn=%s,%s' % (SCOPE_OUT_CN, SUFFIX) + +PROVISIONING_CN = "provisioning" +PROVISIONING_DN = "cn=%s,%s" % (PROVISIONING_CN, SCOPE_IN_DN) + +ACTIVE_CN = "accounts" +STAGE_CN = "staged users" +DELETE_CN = "deleted users" +ACTIVE_DN = "cn=%s,%s" % (ACTIVE_CN, SCOPE_IN_DN) +STAGE_DN = "cn=%s,%s" % (STAGE_CN, PROVISIONING_DN) +DELETE_DN = "cn=%s,%s" % (DELETE_CN, PROVISIONING_DN) + +STAGE_USER_CN = "stage guy" +STAGE_USER_DN = "cn=%s,%s" % (STAGE_USER_CN, STAGE_DN) + +ACTIVE_USER_CN = "active guy" +ACTIVE_USER_DN = "cn=%s,%s" % (ACTIVE_USER_CN, ACTIVE_DN) + +OUT_USER_CN = "out guy" +OUT_USER_DN = "cn=%s,%s" % (OUT_USER_CN, SCOPE_OUT_DN) + +STAGE_GROUP_CN = "stage group" +STAGE_GROUP_DN = "cn=%s,%s" % (STAGE_GROUP_CN, STAGE_DN) + +ACTIVE_GROUP_CN = "active group" +ACTIVE_GROUP_DN = "cn=%s,%s" % (ACTIVE_GROUP_CN, ACTIVE_DN) + +OUT_GROUP_CN = "out group" +OUT_GROUP_DN = "cn=%s,%s" % (OUT_GROUP_CN, SCOPE_OUT_DN) + +INDIRECT_ACTIVE_GROUP_CN = "indirect active group" +INDIRECT_ACTIVE_GROUP_DN = "cn=%s,%s" % (INDIRECT_ACTIVE_GROUP_CN, ACTIVE_DN) + +log = logging.getLogger(__name__) + +installation_prefix = None + + +class TopologyStandalone(object): + def __init__(self, standalone): + standalone.open() + self.standalone = standalone + + +@pytest.fixture(scope="module") +def topology(request): + ''' + This fixture is used to standalone topology for the 'module'. + ''' + global installation_prefix + + if installation_prefix: + args_instance[SER_DEPLOYED_DIR] = installation_prefix + + standalone = DirSrv(verbose=False) + + # Args for the standalone instance + args_instance[SER_HOST] = HOST_STANDALONE + args_instance[SER_PORT] = PORT_STANDALONE + args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE + args_standalone = args_instance.copy() + standalone.allocate(args_standalone) + + # Get the status of the instance and restart it if it exists + instance_standalone = standalone.exists() + + # Remove the instance + if instance_standalone: + standalone.delete() + + # Create the instance + standalone.create() + + # Used to retrieve configuration information (dbdir, confdir...) + standalone.open() + + # clear the tmp directory + standalone.clearTmpDir(__file__) + + # Here we have standalone instance up and running + return TopologyStandalone(standalone) + + +def _header(topology, label): + topology.standalone.log.info("\n\n###############################################") + topology.standalone.log.info("#######") + topology.standalone.log.info("####### %s" % label) + topology.standalone.log.info("#######") + topology.standalone.log.info("###############################################") + + +def _add_user(topology, type='active'): + if type == 'active': + topology.standalone.add_s(Entry((ACTIVE_USER_DN, { + 'objectclass': "top person inetuser".split(), + 'sn': ACTIVE_USER_CN, + 'cn': ACTIVE_USER_CN}))) + elif type == 'stage': + topology.standalone.add_s(Entry((STAGE_USER_DN, { + 'objectclass': "top person inetuser".split(), + 'sn': STAGE_USER_CN, + 'cn': STAGE_USER_CN}))) + else: + topology.standalone.add_s(Entry((OUT_USER_DN, { + 'objectclass': "top person inetuser".split(), + 'sn': OUT_USER_CN, + 'cn': OUT_USER_CN}))) + + +def _find_memberof(topology, user_dn=None, group_dn=None, find_result=True): + assert(topology) + assert(user_dn) + assert(group_dn) + ent = topology.standalone.getEntry(user_dn, ldap.SCOPE_BASE, "(objectclass=*)", ['memberof']) + found = False + if ent.hasAttr('memberof'): + + for val in ent.getValues('memberof'): + topology.standalone.log.info("!!!!!!! %s: memberof->%s" % (user_dn, val)) + if val == group_dn: + found = True + break + + if find_result: + assert(found) + else: + assert(not found) + + +def _find_member(topology, user_dn=None, group_dn=None, find_result=True): + assert(topology) + assert(user_dn) + assert(group_dn) + ent = topology.standalone.getEntry(group_dn, ldap.SCOPE_BASE, "(objectclass=*)", ['member']) + found = False + if ent.hasAttr('member'): + + for val in ent.getValues('member'): + topology.standalone.log.info("!!!!!!! %s: member ->%s" % (group_dn, val)) + if val == user_dn: + found = True + break + + if find_result: + assert(found) + else: + assert(not found) + + +def _modrdn_entry(topology=None, entry_dn=None, new_rdn=None, del_old=0, new_superior=None): + assert topology is not None + assert entry_dn is not None + assert new_rdn is not None + + topology.standalone.log.info("\n\n######################### MODRDN %s ######################\n" % new_rdn) + try: + if new_superior: + topology.standalone.rename_s(entry_dn, new_rdn, newsuperior=new_superior, delold=del_old) + else: + topology.standalone.rename_s(entry_dn, new_rdn, delold=del_old) + except ldap.NO_SUCH_ATTRIBUTE: + topology.standalone.log.info("accepted failure due to 47833: modrdn reports error.. but succeeds") + attempt = 0 + if new_superior: + dn = "%s,%s" % (new_rdn, new_superior) + base = new_superior + else: + base = ','.join(entry_dn.split(",")[1:]) + dn = "%s, %s" % (new_rdn, base) + myfilter = entry_dn.split(',')[0] + + while attempt < 10: + try: + ent = topology.standalone.getEntry(dn, ldap.SCOPE_BASE, myfilter) + break + except ldap.NO_SUCH_OBJECT: + topology.standalone.log.info("Accept failure due to 47833: unable to find (base) a modrdn entry") + attempt += 1 + time.sleep(1) + if attempt == 10: + ent = topology.standalone.getEntry(base, ldap.SCOPE_SUBTREE, myfilter) + ent = topology.standalone.getEntry(dn, ldap.SCOPE_BASE, myfilter) + + +def _check_memberof(topology=None, action=None, user_dn=None, group_dn=None, find_result=None): + assert(topology) + assert(user_dn) + assert(group_dn) + if action == ldap.MOD_ADD: + txt = 'add' + elif action == ldap.MOD_DELETE: + txt = 'delete' + else: + txt = 'replace' + topology.standalone.log.info('\n%s entry %s' % (txt, user_dn)) + topology.standalone.log.info('to group %s' % group_dn) + + topology.standalone.modify_s(group_dn, [(action, 'member', user_dn)]) + time.sleep(1) + _find_memberof(topology, user_dn=user_dn, group_dn=group_dn, find_result=find_result) + + +def test_ticket47829_init(topology): + topology.standalone.add_s(Entry((SCOPE_IN_DN, { + 'objectclass': "top nscontainer".split(), + 'cn': SCOPE_IN_DN}))) + topology.standalone.add_s(Entry((SCOPE_OUT_DN, { + 'objectclass': "top nscontainer".split(), + 'cn': SCOPE_OUT_DN}))) + topology.standalone.add_s(Entry((PROVISIONING_DN, { + 'objectclass': "top nscontainer".split(), + 'cn': PROVISIONING_CN}))) + topology.standalone.add_s(Entry((ACTIVE_DN, { + 'objectclass': "top nscontainer".split(), + 'cn': ACTIVE_CN}))) + topology.standalone.add_s(Entry((STAGE_DN, { + 'objectclass': "top nscontainer".split(), + 'cn': STAGE_DN}))) + topology.standalone.add_s(Entry((DELETE_DN, { + 'objectclass': "top nscontainer".split(), + 'cn': DELETE_CN}))) + + # add groups + topology.standalone.add_s(Entry((ACTIVE_GROUP_DN, { + 'objectclass': "top groupOfNames inetuser".split(), + 'cn': ACTIVE_GROUP_CN}))) + topology.standalone.add_s(Entry((STAGE_GROUP_DN, { + 'objectclass': "top groupOfNames inetuser".split(), + 'cn': STAGE_GROUP_CN}))) + topology.standalone.add_s(Entry((OUT_GROUP_DN, { + 'objectclass': "top groupOfNames inetuser".split(), + 'cn': OUT_GROUP_CN}))) + topology.standalone.add_s(Entry((INDIRECT_ACTIVE_GROUP_DN, { + 'objectclass': "top groupOfNames".split(), + 'cn': INDIRECT_ACTIVE_GROUP_CN}))) + + # add users + _add_user(topology, 'active') + _add_user(topology, 'stage') + _add_user(topology, 'out') + + # enable memberof of with scope IN except provisioning + topology.standalone.plugins.enable(name=PLUGIN_MEMBER_OF) + dn = "cn=%s,%s" % (PLUGIN_MEMBER_OF, DN_PLUGIN) + topology.standalone.modify_s(dn, [(ldap.MOD_REPLACE, 'memberOfEntryScope', SCOPE_IN_DN)]) + topology.standalone.modify_s(dn, [(ldap.MOD_REPLACE, 'memberOfEntryScopeExcludeSubtree', PROVISIONING_DN)]) + + # enable RI with scope IN except provisioning + topology.standalone.plugins.enable(name=PLUGIN_REFER_INTEGRITY) + dn = "cn=%s,%s" % (PLUGIN_REFER_INTEGRITY, DN_PLUGIN) + topology.standalone.modify_s(dn, [(ldap.MOD_REPLACE, 'nsslapd-pluginentryscope', SCOPE_IN_DN)]) + topology.standalone.modify_s(dn, [(ldap.MOD_REPLACE, 'nsslapd-plugincontainerscope', SCOPE_IN_DN)]) + topology.standalone.modify_s(dn, [(ldap.MOD_REPLACE, 'nsslapd-pluginExcludeEntryScope', PROVISIONING_DN)]) + + topology.standalone.restart(timeout=10) + + +def test_ticket47829_mod_active_user_1(topology): + _header(topology, 'MOD: add an active user to an active group') + + # add active user to active group + _check_memberof(topology, action=ldap.MOD_ADD, user_dn=ACTIVE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=True) + _find_member(topology, user_dn=ACTIVE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=True) + + # remove active user to active group + _check_memberof(topology, action=ldap.MOD_DELETE, user_dn=ACTIVE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=False) + + +def test_ticket47829_mod_active_user_2(topology): + _header(topology, 'MOD: add an Active user to a Stage group') + + # add active user to stage group + _check_memberof(topology, action=ldap.MOD_ADD, user_dn=ACTIVE_USER_DN, group_dn=STAGE_GROUP_DN, find_result=False) + _find_member(topology, user_dn=ACTIVE_USER_DN, group_dn=STAGE_GROUP_DN, find_result=True) + + # remove active user to stage group + _check_memberof(topology, action=ldap.MOD_DELETE, user_dn=ACTIVE_USER_DN, group_dn=STAGE_GROUP_DN, find_result=False) + + +def test_ticket47829_mod_active_user_3(topology): + _header(topology, 'MOD: add an Active user to a out of scope group') + + # add active user to out of scope group + _check_memberof(topology, action=ldap.MOD_ADD, user_dn=ACTIVE_USER_DN, group_dn=OUT_GROUP_DN, find_result=False) + _find_member(topology, user_dn=ACTIVE_USER_DN, group_dn=OUT_GROUP_DN, find_result=True) + + # remove active user to out of scope group + _check_memberof(topology, action=ldap.MOD_DELETE, user_dn=ACTIVE_USER_DN, group_dn=OUT_GROUP_DN, find_result=False) + + +def test_ticket47829_mod_stage_user_1(topology): + _header(topology, 'MOD: add an Stage user to a Active group') + + # add stage user to active group + _check_memberof(topology, action=ldap.MOD_ADD, user_dn=STAGE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=False) + _find_member(topology, user_dn=STAGE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=True) + + # remove stage user to active group + _check_memberof(topology, action=ldap.MOD_DELETE, user_dn=STAGE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=False) + + +def test_ticket47829_mod_stage_user_2(topology): + _header(topology, 'MOD: add an Stage user to a Stage group') + + # add stage user to stage group + _check_memberof(topology, action=ldap.MOD_ADD, user_dn=STAGE_USER_DN, group_dn=STAGE_GROUP_DN, find_result=False) + _find_member(topology, user_dn=STAGE_USER_DN, group_dn=STAGE_GROUP_DN, find_result=True) + + # remove stage user to stage group + _check_memberof(topology, action=ldap.MOD_DELETE, user_dn=STAGE_USER_DN, group_dn=STAGE_GROUP_DN, find_result=False) + + +def test_ticket47829_mod_stage_user_3(topology): + _header(topology, 'MOD: add an Stage user to a out of scope group') + + # add stage user to an out of scope group + _check_memberof(topology, action=ldap.MOD_ADD, user_dn=STAGE_USER_DN, group_dn=OUT_GROUP_DN, find_result=False) + _find_member(topology, user_dn=STAGE_USER_DN, group_dn=OUT_GROUP_DN, find_result=True) + + # remove stage user to out of scope group + _check_memberof(topology, action=ldap.MOD_DELETE, user_dn=STAGE_USER_DN, group_dn=OUT_GROUP_DN, find_result=False) + + +def test_ticket47829_mod_out_user_1(topology): + _header(topology, 'MOD: add an out of scope user to an active group') + + # add out of scope user to active group + _check_memberof(topology, action=ldap.MOD_ADD, user_dn=OUT_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=False) + _find_member(topology, user_dn=OUT_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=True) + + # remove out of scope user to active group + _check_memberof(topology, action=ldap.MOD_DELETE, user_dn=OUT_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=False) + + +def test_ticket47829_mod_out_user_2(topology): + _header(topology, 'MOD: add an out of scope user to a Stage group') + + # add out of scope user to stage group + _check_memberof(topology, action=ldap.MOD_ADD, user_dn=OUT_USER_DN, group_dn=STAGE_GROUP_DN, find_result=False) + _find_member(topology, user_dn=OUT_USER_DN, group_dn=STAGE_GROUP_DN, find_result=True) + + # remove out of scope user to stage group + _check_memberof(topology, action=ldap.MOD_DELETE, user_dn=OUT_USER_DN, group_dn=STAGE_GROUP_DN, find_result=False) + +def test_ticket47829_mod_out_user_3(topology): + _header(topology, 'MOD: add an out of scope user to an out of scope group') + + # add out of scope user to stage group + _check_memberof(topology, action=ldap.MOD_ADD, user_dn=OUT_USER_DN, group_dn=OUT_GROUP_DN, find_result=False) + _find_member(topology, user_dn=OUT_USER_DN, group_dn=OUT_GROUP_DN, find_result=True) + + # remove out of scope user to stage group + _check_memberof(topology, action=ldap.MOD_DELETE, user_dn=OUT_USER_DN, group_dn=OUT_GROUP_DN, find_result=False) + + +def test_ticket47829_mod_active_user_modrdn_active_user_1(topology): + _header(topology, 'add an Active user to a Active group. Then move Active user to Active') + + # add Active user to active group + _check_memberof(topology, action=ldap.MOD_ADD, user_dn=ACTIVE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=True) + _find_member(topology, user_dn=ACTIVE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=True) + + # move the Active entry to active, expect 'member' and 'memberof' + _modrdn_entry(topology, entry_dn=ACTIVE_USER_DN, new_rdn="cn=x%s" % ACTIVE_USER_CN, new_superior=ACTIVE_DN) + _find_memberof(topology, user_dn="cn=x%s,%s" % (ACTIVE_USER_CN, ACTIVE_DN), group_dn=ACTIVE_GROUP_DN, find_result=True) + _find_member(topology, user_dn="cn=x%s,%s" % (ACTIVE_USER_CN, ACTIVE_DN), group_dn=ACTIVE_GROUP_DN, find_result=True) + + # move the Active entry to active, expect 'member' and no 'memberof' + _modrdn_entry(topology, entry_dn="cn=x%s,%s" % (ACTIVE_USER_CN, ACTIVE_DN), new_rdn="cn=%s" % ACTIVE_USER_CN, new_superior=ACTIVE_DN) + _find_memberof(topology, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, ACTIVE_DN), group_dn=ACTIVE_GROUP_DN, find_result=True) + _find_member(topology, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, ACTIVE_DN), group_dn=ACTIVE_GROUP_DN, find_result=True) + + # remove active user to active group + _check_memberof(topology, action=ldap.MOD_DELETE, user_dn=ACTIVE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=False) + + +def test_ticket47829_mod_active_user_modrdn_stage_user_1(topology): + _header(topology, 'add an Active user to a Active group. Then move Active user to Stage') + + # add Active user to active group + _check_memberof(topology, action=ldap.MOD_ADD, user_dn=ACTIVE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=True) + _find_member(topology, user_dn=ACTIVE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=True) + + # move the Active entry to stage, expect no 'member' and 'memberof' + _modrdn_entry(topology, entry_dn=ACTIVE_USER_DN, new_rdn="cn=%s" % ACTIVE_USER_CN, new_superior=STAGE_DN) + _find_memberof(topology, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, STAGE_DN), group_dn=ACTIVE_GROUP_DN, find_result=False) + _find_member(topology, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, STAGE_DN), group_dn=ACTIVE_GROUP_DN, find_result=False) + + # move the Active entry to Stage, expect 'member' and no 'memberof' + _modrdn_entry(topology, entry_dn="cn=%s,%s" % (ACTIVE_USER_CN, STAGE_DN), new_rdn="cn=%s" % ACTIVE_USER_CN, new_superior=ACTIVE_DN) + _find_memberof(topology, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, ACTIVE_DN), group_dn=ACTIVE_GROUP_DN, find_result=False) + _find_member(topology, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, ACTIVE_DN), group_dn=ACTIVE_GROUP_DN, find_result=False) + + +def test_ticket47829_mod_active_user_modrdn_out_user_1(topology): + _header(topology, 'add an Active user to a Active group. Then move Active user to out of scope') + + # add Active user to active group + _check_memberof(topology, action=ldap.MOD_ADD, user_dn=ACTIVE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=True) + _find_member(topology, user_dn=ACTIVE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=True) + + # move the Active entry to out of scope, expect no 'member' and no 'memberof' + _modrdn_entry(topology, entry_dn=ACTIVE_USER_DN, new_rdn="cn=%s" % ACTIVE_USER_CN, new_superior=OUT_GROUP_DN) + _find_memberof(topology, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, OUT_GROUP_DN), group_dn=ACTIVE_GROUP_DN, find_result=False) + _find_member(topology, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, OUT_GROUP_DN), group_dn=ACTIVE_GROUP_DN, find_result=False) + + # move the Active entry to out of scope, expect no 'member' and no 'memberof' + _modrdn_entry(topology, entry_dn="cn=%s,%s" % (ACTIVE_USER_CN, OUT_GROUP_DN), new_rdn="cn=%s" % ACTIVE_USER_CN, new_superior=ACTIVE_DN) + _find_memberof(topology, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, ACTIVE_DN), group_dn=ACTIVE_GROUP_DN, find_result=False) + _find_member(topology, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, ACTIVE_DN), group_dn=ACTIVE_GROUP_DN, find_result=False) + + +def test_ticket47829_mod_modrdn_1(topology): + _header(topology, 'add an Stage user to a Active group. Then move Stage user to Active') + + # add Stage user to active group + _check_memberof(topology, action=ldap.MOD_ADD, user_dn=STAGE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=False) + _find_member(topology, user_dn=STAGE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=True) + + # move the Stage entry to active, expect 'member' and 'memberof' + _modrdn_entry(topology, entry_dn=STAGE_USER_DN, new_rdn="cn=%s" % STAGE_USER_CN, new_superior=ACTIVE_DN) + _find_memberof(topology, user_dn="cn=%s,%s" % (STAGE_USER_CN, ACTIVE_DN), group_dn=ACTIVE_GROUP_DN, find_result=True) + _find_member(topology, user_dn="cn=%s,%s" % (STAGE_USER_CN, ACTIVE_DN), group_dn=ACTIVE_GROUP_DN, find_result=True) + + # move the Active entry to Stage, expect no 'member' and no 'memberof' + _modrdn_entry(topology, entry_dn="cn=%s,%s" % (STAGE_USER_CN, ACTIVE_DN), new_rdn="cn=%s" % STAGE_USER_CN, new_superior=STAGE_DN) + _find_memberof(topology, user_dn="cn=%s,%s" % (STAGE_USER_CN, STAGE_DN), group_dn=ACTIVE_GROUP_DN, find_result=False) + _find_member(topology, user_dn="cn=%s,%s" % (STAGE_USER_CN, ACTIVE_DN), group_dn=ACTIVE_GROUP_DN, find_result=False) + + +def test_ticket47829_mod_stage_user_modrdn_active_user_1(topology): + _header(topology, 'add an Stage user to a Active group. Then move Stage user to Active') + + stage_user_dn = STAGE_USER_DN + stage_user_rdn = "cn=%s" % STAGE_USER_CN + active_user_dn = "cn=%s,%s" % (STAGE_USER_CN, ACTIVE_DN) + + # add Stage user to active group + _check_memberof(topology, action=ldap.MOD_ADD, user_dn=stage_user_dn, group_dn=ACTIVE_GROUP_DN, find_result=False) + _find_member(topology, user_dn=stage_user_dn, group_dn=ACTIVE_GROUP_DN, find_result=True) + + # move the Stage entry to Actve, expect 'member' and 'memberof' + _modrdn_entry(topology, entry_dn=stage_user_dn, new_rdn=stage_user_rdn, new_superior=ACTIVE_DN) + _find_memberof(topology, user_dn=active_user_dn, group_dn=ACTIVE_GROUP_DN, find_result=True) + _find_member(topology, user_dn=active_user_dn, group_dn=ACTIVE_GROUP_DN, find_result=True) + + # move the Active entry to Stage, expect no 'member' and no 'memberof' + _modrdn_entry(topology, entry_dn=active_user_dn, new_rdn=stage_user_rdn, new_superior=STAGE_DN) + _find_memberof(topology, user_dn=stage_user_dn, group_dn=ACTIVE_GROUP_DN, find_result=False) + _find_member(topology, user_dn=stage_user_dn, group_dn=ACTIVE_GROUP_DN, find_result=False) + + +def test_ticket47829_mod_stage_user_modrdn_stage_user_1(topology): + _header(topology, 'add an Stage user to a Active group. Then move Stage user to Stage') + + _header(topology, 'Return because it requires a fix for 47833') + return + + old_stage_user_dn = STAGE_USER_DN + old_stage_user_rdn = "cn=%s" % STAGE_USER_CN + new_stage_user_rdn = "cn=x%s" % STAGE_USER_CN + new_stage_user_dn = "%s,%s" % (new_stage_user_rdn, STAGE_DN) + + # add Stage user to active group + _check_memberof(topology, action=ldap.MOD_ADD, user_dn=old_stage_user_dn, group_dn=ACTIVE_GROUP_DN, find_result=False) + _find_member(topology, user_dn=old_stage_user_dn, group_dn=ACTIVE_GROUP_DN, find_result=True) + + # move the Stage entry to Stage, expect no 'member' and 'memberof' + _modrdn_entry(topology, entry_dn=old_stage_user_dn, new_rdn=new_stage_user_rdn, new_superior=STAGE_DN) + _find_memberof(topology, user_dn=new_stage_user_dn, group_dn=ACTIVE_GROUP_DN, find_result=False) + _find_member(topology, user_dn=new_stage_user_dn, group_dn=ACTIVE_GROUP_DN, find_result=False) + + # move the Stage entry to Stage, expect no 'member' and no 'memberof' + _modrdn_entry(topology, entry_dn=new_stage_user_dn, new_rdn=old_stage_user_rdn, new_superior=STAGE_DN) + _find_memberof(topology, user_dn=old_stage_user_dn, group_dn=ACTIVE_GROUP_DN, find_result=False) + _find_member(topology, user_dn=old_stage_user_dn, group_dn=ACTIVE_GROUP_DN, find_result=False) + + +def test_ticket47829_indirect_active_group_1(topology): + _header(topology, 'add an Active group (G1) to an active group (G0). Then add active user to G1') + + topology.standalone.modify_s(INDIRECT_ACTIVE_GROUP_DN, [(ldap.MOD_ADD, 'member', ACTIVE_GROUP_DN)]) + + # add an active user to G1. Checks that user is memberof G1 + _check_memberof(topology, action=ldap.MOD_ADD, user_dn=ACTIVE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=True) + _find_memberof(topology, user_dn=ACTIVE_USER_DN, group_dn=INDIRECT_ACTIVE_GROUP_DN, find_result=True) + + # remove G1 from G0 + topology.standalone.modify_s(INDIRECT_ACTIVE_GROUP_DN, [(ldap.MOD_DELETE, 'member', ACTIVE_GROUP_DN)]) + _find_memberof(topology, user_dn=ACTIVE_USER_DN, group_dn=INDIRECT_ACTIVE_GROUP_DN, find_result=False) + _find_memberof(topology, user_dn=ACTIVE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=True) + + # remove active user from G1 + _check_memberof(topology, action=ldap.MOD_DELETE, user_dn=ACTIVE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=False) + + +def test_ticket47829_indirect_active_group_2(topology): + _header(topology, 'add an Active group (G1) to an active group (G0). Then add active user to G1. Then move active user to stage') + + topology.standalone.modify_s(INDIRECT_ACTIVE_GROUP_DN, [(ldap.MOD_ADD, 'member', ACTIVE_GROUP_DN)]) + + # add an active user to G1. Checks that user is memberof G1 + _check_memberof(topology, action=ldap.MOD_ADD, user_dn=ACTIVE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=True) + _find_memberof(topology, user_dn=ACTIVE_USER_DN, group_dn=INDIRECT_ACTIVE_GROUP_DN, find_result=True) + + # remove G1 from G0 + topology.standalone.modify_s(INDIRECT_ACTIVE_GROUP_DN, [(ldap.MOD_DELETE, 'member', ACTIVE_GROUP_DN)]) + _find_memberof(topology, user_dn=ACTIVE_USER_DN, group_dn=INDIRECT_ACTIVE_GROUP_DN, find_result=False) + _find_memberof(topology, user_dn=ACTIVE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=True) + + # move active user to stage + _modrdn_entry(topology, entry_dn=ACTIVE_USER_DN, new_rdn="cn=%s" % ACTIVE_USER_CN, new_superior=STAGE_DN) + + # stage user is no long member of active group and indirect active group + _find_memberof(topology, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, STAGE_DN), group_dn=ACTIVE_GROUP_DN, find_result=False) + _find_memberof(topology, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, STAGE_DN), group_dn=INDIRECT_ACTIVE_GROUP_DN, find_result=False) + + # active group and indirect active group do no longer have stage user as member + _find_member(topology, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, STAGE_DN), group_dn=ACTIVE_GROUP_DN, find_result=False) + _find_member(topology, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, STAGE_DN), group_dn=INDIRECT_ACTIVE_GROUP_DN, find_result=False) + + # return back the entry to active. It remains not member + _modrdn_entry(topology, entry_dn="cn=%s,%s" % (ACTIVE_USER_CN, STAGE_DN), new_rdn="cn=%s" % ACTIVE_USER_CN, new_superior=ACTIVE_DN) + _find_member(topology, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, ACTIVE_DN), group_dn=ACTIVE_GROUP_DN, find_result=False) + _find_member(topology, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, ACTIVE_DN), group_dn=INDIRECT_ACTIVE_GROUP_DN, find_result=False) + + +def test_ticket47829_indirect_active_group_3(topology): + _header(topology, 'add an Active group (G1) to an active group (G0). Then add active user to G1. Then move active user to out of the scope') + + topology.standalone.modify_s(INDIRECT_ACTIVE_GROUP_DN, [(ldap.MOD_ADD, 'member', ACTIVE_GROUP_DN)]) + + # add an active user to G1. Checks that user is memberof G1 + _check_memberof(topology, action=ldap.MOD_ADD, user_dn=ACTIVE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=True) + _find_memberof(topology, user_dn=ACTIVE_USER_DN, group_dn=INDIRECT_ACTIVE_GROUP_DN, find_result=True) + + # remove G1 from G0 + topology.standalone.modify_s(INDIRECT_ACTIVE_GROUP_DN, [(ldap.MOD_DELETE, 'member', ACTIVE_GROUP_DN)]) + _find_memberof(topology, user_dn=ACTIVE_USER_DN, group_dn=INDIRECT_ACTIVE_GROUP_DN, find_result=False) + _find_memberof(topology, user_dn=ACTIVE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=True) + + # move active user to out of the scope + _modrdn_entry(topology, entry_dn=ACTIVE_USER_DN, new_rdn="cn=%s" % ACTIVE_USER_CN, new_superior=SCOPE_OUT_DN) + + # stage user is no long member of active group and indirect active group + _find_memberof(topology, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, SCOPE_OUT_DN), group_dn=ACTIVE_GROUP_DN, find_result=False) + _find_memberof(topology, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, SCOPE_OUT_DN), group_dn=INDIRECT_ACTIVE_GROUP_DN, find_result=False) + + # active group and indirect active group do no longer have stage user as member + _find_member(topology, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, SCOPE_OUT_DN), group_dn=ACTIVE_GROUP_DN, find_result=False) + _find_member(topology, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, SCOPE_OUT_DN), group_dn=INDIRECT_ACTIVE_GROUP_DN, find_result=False) + + # return back the entry to active. It remains not member + _modrdn_entry(topology, entry_dn="cn=%s,%s" % (ACTIVE_USER_CN, SCOPE_OUT_DN), new_rdn="cn=%s" % ACTIVE_USER_CN, new_superior=ACTIVE_DN) + _find_member(topology, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, ACTIVE_DN), group_dn=ACTIVE_GROUP_DN, find_result=False) + _find_member(topology, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, ACTIVE_DN), group_dn=INDIRECT_ACTIVE_GROUP_DN, find_result=False) + + +def test_ticket47829_indirect_active_group_4(topology): + _header(topology, 'add an Active group (G1) to an active group (G0). Then add stage user to G1. Then move user to active. Then move it back') + + topology.standalone.modify_s(INDIRECT_ACTIVE_GROUP_DN, [(ldap.MOD_ADD, 'member', ACTIVE_GROUP_DN)]) + + # add stage user to active group + _check_memberof(topology, action=ldap.MOD_ADD, user_dn=STAGE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=False) + _find_member(topology, user_dn=STAGE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=True) + _find_member(topology, user_dn=STAGE_USER_DN, group_dn=INDIRECT_ACTIVE_GROUP_DN, find_result=False) + _find_memberof(topology, user_dn=STAGE_USER_DN, group_dn=INDIRECT_ACTIVE_GROUP_DN, find_result=False) + _find_memberof(topology, user_dn=STAGE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=False) + + # move stage user to active + _modrdn_entry(topology, entry_dn=STAGE_USER_DN, new_rdn="cn=%s" % STAGE_USER_CN, new_superior=ACTIVE_DN) + renamed_stage_dn = "cn=%s,%s" % (STAGE_USER_CN, ACTIVE_DN) + _find_member(topology, user_dn=renamed_stage_dn, group_dn=ACTIVE_GROUP_DN, find_result=True) + _find_member(topology, user_dn=renamed_stage_dn, group_dn=INDIRECT_ACTIVE_GROUP_DN, find_result=False) + _find_memberof(topology, user_dn=renamed_stage_dn, group_dn=INDIRECT_ACTIVE_GROUP_DN, find_result=True) + _find_memberof(topology, user_dn=renamed_stage_dn, group_dn=ACTIVE_GROUP_DN, find_result=True) + + # move back active to stage + _modrdn_entry(topology, entry_dn=renamed_stage_dn, new_rdn="cn=%s" % STAGE_USER_CN, new_superior=STAGE_DN) + _find_member(topology, user_dn=STAGE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=False) + _find_member(topology, user_dn=STAGE_USER_DN, group_dn=INDIRECT_ACTIVE_GROUP_DN, find_result=False) + _find_memberof(topology, user_dn=STAGE_USER_DN, group_dn=INDIRECT_ACTIVE_GROUP_DN, find_result=False) + _find_memberof(topology, user_dn=STAGE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=False) + + +def test_ticket47829_final(topology): + topology.standalone.delete() + log.info('Testcase PASSED') + + +def run_isolated(): + ''' + run_isolated is used to run these test cases independently of a test scheduler (xunit, py.test..) + To run isolated without py.test, you need to + - edit this file and comment '@pytest.fixture' line before 'topology' function. + - set the installation prefix + - run this program + ''' + global installation_prefix + installation_prefix = None + + topo = topology(True) + test_ticket47829_init(topo) + + test_ticket47829_mod_active_user_1(topo) + test_ticket47829_mod_active_user_2(topo) + test_ticket47829_mod_active_user_3(topo) + test_ticket47829_mod_stage_user_1(topo) + test_ticket47829_mod_stage_user_2(topo) + test_ticket47829_mod_stage_user_3(topo) + test_ticket47829_mod_out_user_1(topo) + test_ticket47829_mod_out_user_2(topo) + test_ticket47829_mod_out_user_3(topo) + + test_ticket47829_mod_active_user_modrdn_active_user_1(topo) + test_ticket47829_mod_active_user_modrdn_stage_user_1(topo) + test_ticket47829_mod_active_user_modrdn_out_user_1(topo) + + test_ticket47829_mod_stage_user_modrdn_active_user_1(topo) + test_ticket47829_mod_stage_user_modrdn_stage_user_1(topo) + + test_ticket47829_indirect_active_group_1(topo) + test_ticket47829_indirect_active_group_2(topo) + test_ticket47829_indirect_active_group_3(topo) + test_ticket47829_indirect_active_group_4(topo) + + test_ticket47829_final(topo) + + +if __name__ == '__main__': + run_isolated() + diff --git a/dirsrvtests/tests/tickets/ticket47833_test.py b/dirsrvtests/tests/tickets/ticket47833_test.py new file mode 100644 index 0000000..f1fb634 --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket47833_test.py @@ -0,0 +1,274 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2015 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import os +import sys +import time +import ldap +import logging +import pytest +from lib389 import DirSrv, Entry, tools, tasks +from lib389.tools import DirSrvTools +from lib389._constants import * +from lib389.properties import * +from lib389.tasks import * +from lib389.utils import * + +SCOPE_IN_CN = 'in' +SCOPE_OUT_CN = 'out' +SCOPE_IN_DN = 'cn=%s,%s' % (SCOPE_IN_CN, SUFFIX) +SCOPE_OUT_DN = 'cn=%s,%s' % (SCOPE_OUT_CN, SUFFIX) + +PROVISIONING_CN = "provisioning" +PROVISIONING_DN = "cn=%s,%s" % (PROVISIONING_CN, SCOPE_IN_DN) + +ACTIVE_CN = "accounts" +STAGE_CN = "staged users" +DELETE_CN = "deleted users" +ACTIVE_DN = "cn=%s,%s" % (ACTIVE_CN, SCOPE_IN_DN) +STAGE_DN = "cn=%s,%s" % (STAGE_CN, PROVISIONING_DN) +DELETE_DN = "cn=%s,%s" % (DELETE_CN, PROVISIONING_DN) + +STAGE_USER_CN = "stage guy" +STAGE_USER_DN = "cn=%s,%s" % (STAGE_USER_CN, STAGE_DN) + +ACTIVE_USER_CN = "active guy" +ACTIVE_USER_DN = "cn=%s,%s" % (ACTIVE_USER_CN, ACTIVE_DN) + +OUT_USER_CN = "out guy" +OUT_USER_DN = "cn=%s,%s" % (OUT_USER_CN, SCOPE_OUT_DN) + +STAGE_GROUP_CN = "stage group" +STAGE_GROUP_DN = "cn=%s,%s" % (STAGE_GROUP_CN, STAGE_DN) + +ACTIVE_GROUP_CN = "active group" +ACTIVE_GROUP_DN = "cn=%s,%s" % (ACTIVE_GROUP_CN, ACTIVE_DN) + +OUT_GROUP_CN = "out group" +OUT_GROUP_DN = "cn=%s,%s" % (OUT_GROUP_CN, SCOPE_OUT_DN) + + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + +installation1_prefix = None + + +class TopologyStandalone(object): + def __init__(self, standalone): + standalone.open() + self.standalone = standalone + + +@pytest.fixture(scope="module") +def topology(request): + global installation1_prefix + if installation1_prefix: + args_instance[SER_DEPLOYED_DIR] = installation1_prefix + + # Creating standalone instance ... + standalone = DirSrv(verbose=False) + if installation1_prefix: + args_instance[SER_DEPLOYED_DIR] = installation1_prefix + args_instance[SER_HOST] = HOST_STANDALONE + args_instance[SER_PORT] = PORT_STANDALONE + args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE + args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX + args_standalone = args_instance.copy() + standalone.allocate(args_standalone) + instance_standalone = standalone.exists() + if instance_standalone: + standalone.delete() + standalone.create() + standalone.open() + + # Clear out the tmp dir + standalone.clearTmpDir(__file__) + + return TopologyStandalone(standalone) + + +def _header(topology, label): + topology.standalone.log.info("\n\n###############################################") + topology.standalone.log.info("#######") + topology.standalone.log.info("####### %s" % label) + topology.standalone.log.info("#######") + topology.standalone.log.info("###############################################") + +def _add_user(topology, type='active'): + if type == 'active': + topology.standalone.add_s(Entry((ACTIVE_USER_DN, { + 'objectclass': "top person inetuser".split(), + 'sn': ACTIVE_USER_CN, + 'cn': ACTIVE_USER_CN}))) + elif type == 'stage': + topology.standalone.add_s(Entry((STAGE_USER_DN, { + 'objectclass': "top person inetuser".split(), + 'sn': STAGE_USER_CN, + 'cn': STAGE_USER_CN}))) + else: + topology.standalone.add_s(Entry((OUT_USER_DN, { + 'objectclass': "top person inetuser".split(), + 'sn': OUT_USER_CN, + 'cn': OUT_USER_CN}))) + +def _find_memberof(topology, user_dn=None, group_dn=None, find_result=True): + assert(topology) + assert(user_dn) + assert(group_dn) + ent = topology.standalone.getEntry(user_dn, ldap.SCOPE_BASE, "(objectclass=*)", ['memberof']) + found = False + if ent.hasAttr('memberof'): + + for val in ent.getValues('memberof'): + topology.standalone.log.info("!!!!!!! %s: memberof->%s" % (user_dn, val)) + if val == group_dn: + found = True + break + + if find_result: + assert(found) + else: + assert(not found) + +def _find_member(topology, user_dn=None, group_dn=None, find_result=True): + assert(topology) + assert(user_dn) + assert(group_dn) + ent = topology.standalone.getEntry(group_dn, ldap.SCOPE_BASE, "(objectclass=*)", ['member']) + found = False + if ent.hasAttr('member'): + + for val in ent.getValues('member'): + topology.standalone.log.info("!!!!!!! %s: member ->%s" % (group_dn, val)) + if val == user_dn: + found = True + break + + if find_result: + assert(found) + else: + assert(not found) + +def _modrdn_entry(topology=None, entry_dn=None, new_rdn=None, del_old=0, new_superior=None): + assert topology != None + assert entry_dn != None + assert new_rdn != None + + + topology.standalone.log.info("\n\n######################### MODRDN %s ######################\n" % new_rdn) + if new_superior: + topology.standalone.rename_s(entry_dn, new_rdn, newsuperior=new_superior, delold=del_old) + else: + topology.standalone.rename_s(entry_dn, new_rdn, delold=del_old) + +def _check_memberof(topology=None, action=None, user_dn=None, group_dn=None, find_result=None): + assert(topology) + assert(user_dn) + assert(group_dn) + if action == ldap.MOD_ADD: + txt = 'add' + elif action == ldap.MOD_DELETE: + txt = 'delete' + else: + txt = 'replace' + topology.standalone.log.info('\n%s entry %s' % (txt, user_dn)) + topology.standalone.log.info('to group %s' % group_dn) + + topology.standalone.modify_s(group_dn, [(action, 'member', user_dn)]) + time.sleep(1) + _find_memberof(topology, user_dn=user_dn, group_dn=group_dn, find_result=find_result) + + + + +def test_ticket47829_init(topology): + topology.standalone.add_s(Entry((SCOPE_IN_DN, { + 'objectclass': "top nscontainer".split(), + 'cn': SCOPE_IN_DN}))) + topology.standalone.add_s(Entry((SCOPE_OUT_DN, { + 'objectclass': "top nscontainer".split(), + 'cn': SCOPE_OUT_DN}))) + topology.standalone.add_s(Entry((PROVISIONING_DN, { + 'objectclass': "top nscontainer".split(), + 'cn': PROVISIONING_CN}))) + topology.standalone.add_s(Entry((ACTIVE_DN, { + 'objectclass': "top nscontainer".split(), + 'cn': ACTIVE_CN}))) + topology.standalone.add_s(Entry((STAGE_DN, { + 'objectclass': "top nscontainer".split(), + 'cn': STAGE_DN}))) + topology.standalone.add_s(Entry((DELETE_DN, { + 'objectclass': "top nscontainer".split(), + 'cn': DELETE_CN}))) + + # add groups + topology.standalone.add_s(Entry((ACTIVE_GROUP_DN, { + 'objectclass': "top groupOfNames".split(), + 'cn': ACTIVE_GROUP_CN}))) + topology.standalone.add_s(Entry((STAGE_GROUP_DN, { + 'objectclass': "top groupOfNames".split(), + 'cn': STAGE_GROUP_CN}))) + topology.standalone.add_s(Entry((OUT_GROUP_DN, { + 'objectclass': "top groupOfNames".split(), + 'cn': OUT_GROUP_CN}))) + + # add users + _add_user(topology, 'active') + _add_user(topology, 'stage') + _add_user(topology, 'out') + + + + # enable memberof of with scope account + topology.standalone.plugins.enable(name=PLUGIN_MEMBER_OF) + dn = "cn=%s,%s" % (PLUGIN_MEMBER_OF, DN_PLUGIN) + topology.standalone.modify_s(dn, [(ldap.MOD_REPLACE, 'memberOfEntryScope', ACTIVE_DN)]) + + + + topology.standalone.restart(timeout=10) + + + + +def test_ticket47829_mod_stage_user_modrdn_stage_user_1(topology): + _header(topology, 'add an Stage user to a Active group. Then move Stage user to Stage') + + old_stage_user_dn = STAGE_USER_DN + old_stage_user_rdn = "cn=%s" % STAGE_USER_CN + new_stage_user_rdn = "cn=x%s" % STAGE_USER_CN + new_stage_user_dn = "%s,%s" % (new_stage_user_rdn, STAGE_DN) + + # add Stage user to active group + _check_memberof(topology, action=ldap.MOD_ADD, user_dn=old_stage_user_dn, group_dn=ACTIVE_GROUP_DN, find_result=False) + _find_member (topology, user_dn=old_stage_user_dn, group_dn=ACTIVE_GROUP_DN, find_result=True) + + # move the Stage entry to Stage, expect no 'member' and 'memberof' + _modrdn_entry (topology, entry_dn=old_stage_user_dn, new_rdn=new_stage_user_rdn, new_superior=STAGE_DN) + _find_memberof(topology, user_dn=new_stage_user_dn, group_dn=ACTIVE_GROUP_DN, find_result=False) + _find_member (topology, user_dn=new_stage_user_dn, group_dn=ACTIVE_GROUP_DN, find_result=False) + + +def test_ticket47833_final(topology): + topology.standalone.delete() + log.info('Testcase PASSED') + + +def run_isolated(): + global installation1_prefix + installation1_prefix = None + + topo = topology(True) + test_ticket47829_init(topo) + test_ticket47829_mod_stage_user_modrdn_stage_user_1(topo) + test_ticket47833_final(topo) + +if __name__ == '__main__': + run_isolated() + diff --git a/dirsrvtests/tests/tickets/ticket47838_test.py b/dirsrvtests/tests/tickets/ticket47838_test.py new file mode 100644 index 0000000..42d25fd --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket47838_test.py @@ -0,0 +1,841 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2015 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import os +import sys +import time +import ldap +import logging +import pytest +import shutil +from lib389 import DirSrv, Entry, tools +from lib389 import DirSrvTools +from lib389.tools import DirSrvTools +from lib389._constants import * +from lib389.properties import * + +log = logging.getLogger(__name__) + +installation_prefix = None + +CONFIG_DN = 'cn=config' +ENCRYPTION_DN = 'cn=encryption,%s' % CONFIG_DN +RSA = 'RSA' +RSA_DN = 'cn=%s,%s' % (RSA, ENCRYPTION_DN) +LDAPSPORT = '10636' +SERVERCERT = 'Server-Cert' +plus_all_ecount = 0 +plus_all_dcount = 0 +plus_all_ecount_noweak = 0 +plus_all_dcount_noweak = 0 + + +class TopologyStandalone(object): + def __init__(self, standalone): + standalone.open() + self.standalone = standalone + + +@pytest.fixture(scope="module") +def topology(request): + ''' + This fixture is used to standalone topology for the 'module'. + ''' + global installation_prefix + + if installation_prefix: + args_instance[SER_DEPLOYED_DIR] = installation_prefix + + standalone = DirSrv(verbose=False) + + # Args for the standalone instance + args_instance[SER_HOST] = HOST_STANDALONE + args_instance[SER_PORT] = PORT_STANDALONE + args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE + args_standalone = args_instance.copy() + standalone.allocate(args_standalone) + + # Get the status of the instance and restart it if it exists + instance_standalone = standalone.exists() + + # Remove the instance + if instance_standalone: + standalone.delete() + + # Create the instance + standalone.create() + + # Used to retrieve configuration information (dbdir, confdir...) + standalone.open() + + # clear the tmp directory + standalone.clearTmpDir(__file__) + + # Here we have standalone instance up and running + return TopologyStandalone(standalone) + + +def _header(topology, label): + topology.standalone.log.info("\n\n###############################################") + topology.standalone.log.info("#######") + topology.standalone.log.info("####### %s" % label) + topology.standalone.log.info("#######") + topology.standalone.log.info("###############################################") + + +def test_ticket47838_init(topology): + """ + Generate self signed cert and import it to the DS cert db. + Enable SSL + """ + _header(topology, 'Testing Ticket 47838 - harden the list of ciphers available by default') + + conf_dir = topology.standalone.confdir + + log.info("\n######################### Checking existing certs ######################\n") + os.system('certutil -L -d %s -n "CA certificate"' % conf_dir) + os.system('certutil -L -d %s -n "%s"' % (conf_dir, SERVERCERT)) + + log.info("\n######################### Create a password file ######################\n") + pwdfile = '%s/pwdfile.txt' % (conf_dir) + opasswd = os.popen("(ps -ef ; w ) | sha1sum | awk '{print $1}'", "r") + passwd = opasswd.readline() + pwdfd = open(pwdfile, "w") + pwdfd.write(passwd) + pwdfd.close() + + log.info("\n######################### Create a noise file ######################\n") + noisefile = '%s/noise.txt' % (conf_dir) + noise = os.popen("(w ; ps -ef ; date ) | sha1sum | awk '{print $1}'", "r") + noisewdfd = open(noisefile, "w") + noisewdfd.write(noise.readline()) + noisewdfd.close() + + log.info("\n######################### Create key3.db and cert8.db database ######################\n") + os.system("ls %s" % pwdfile) + os.system("cat %s" % pwdfile) + os.system('certutil -N -d %s -f %s' % (conf_dir, pwdfile)) + + log.info("\n######################### Creating encryption key for CA ######################\n") + os.system('certutil -G -d %s -z %s -f %s' % (conf_dir, noisefile, pwdfile)) + + log.info("\n######################### Creating self-signed CA certificate ######################\n") + os.system('( echo y ; echo ; echo y ) | certutil -S -n "CA certificate" -s "cn=CAcert" -x -t "CT,," -m 1000 -v 120 -d %s -z %s -f %s -2' % (conf_dir, noisefile, pwdfile)) + + log.info("\n######################### Exporting the CA certificate to cacert.asc ######################\n") + cafile = '%s/cacert.asc' % conf_dir + catxt = os.popen('certutil -L -d %s -n "CA certificate" -a' % conf_dir) + cafd = open(cafile, "w") + while True: + line = catxt.readline() + if (line == ''): + break + cafd.write(line) + cafd.close() + + log.info("\n######################### Generate the server certificate ######################\n") + ohostname = os.popen('hostname --fqdn', "r") + myhostname = ohostname.readline() + os.system('certutil -S -n "%s" -s "cn=%s,ou=389 Directory Server" -c "CA certificate" -t "u,u,u" -m 1001 -v 120 -d %s -z %s -f %s' % (SERVERCERT, myhostname.rstrip(), conf_dir, noisefile, pwdfile)) + + log.info("\n######################### create the pin file ######################\n") + pinfile = '%s/pin.txt' % (conf_dir) + pintxt = 'Internal (Software) Token:%s' % passwd + pinfd = open(pinfile, "w") + pinfd.write(pintxt) + pinfd.close() + + log.info("\n######################### enable SSL in the directory server with all ciphers ######################\n") + topology.standalone.simple_bind_s(DN_DM, PASSWORD) + topology.standalone.modify_s(ENCRYPTION_DN, [(ldap.MOD_REPLACE, 'nsSSL3', 'off'), + (ldap.MOD_REPLACE, 'nsTLS1', 'on'), + (ldap.MOD_REPLACE, 'nsSSLClientAuth', 'allowed'), + (ldap.MOD_REPLACE, 'allowWeakCipher', 'on'), + (ldap.MOD_REPLACE, 'nsSSL3Ciphers', '+all')]) + + topology.standalone.modify_s(CONFIG_DN, [(ldap.MOD_REPLACE, 'nsslapd-security', 'on'), + (ldap.MOD_REPLACE, 'nsslapd-ssl-check-hostname', 'off'), + (ldap.MOD_REPLACE, 'nsslapd-secureport', LDAPSPORT)]) + + topology.standalone.add_s(Entry((RSA_DN, {'objectclass': "top nsEncryptionModule".split(), + 'cn': RSA, + 'nsSSLPersonalitySSL': SERVERCERT, + 'nsSSLToken': 'internal (software)', + 'nsSSLActivation': 'on'}))) + + +def comp_nsSSLEnableCipherCount(topology, ecount): + """ + Check nsSSLEnabledCipher count with ecount + """ + log.info("Checking nsSSLEnabledCiphers...") + msgid = topology.standalone.search_ext(ENCRYPTION_DN, ldap.SCOPE_BASE, 'cn=*', ['nsSSLEnabledCiphers']) + enabledciphercnt = 0 + rtype, rdata, rmsgid = topology.standalone.result2(msgid) + topology.standalone.log.info("%d results" % len(rdata)) + + topology.standalone.log.info("Results:") + for dn, attrs in rdata: + topology.standalone.log.info("dn: %s" % dn) + if 'nsSSLEnabledCiphers' in attrs: + enabledciphercnt = len(attrs['nsSSLEnabledCiphers']) + topology.standalone.log.info("enabledCipherCount: %d" % enabledciphercnt) + assert ecount == enabledciphercnt + + +def test_ticket47838_run_0(topology): + """ + Check nsSSL3Ciphers: +all + All ciphers are enabled except null. + Note: allowWeakCipher: on + """ + _header(topology, 'Test Case 1 - Check the ciphers availability for "+all"; allowWeakCipher: on') + + topology.standalone.simple_bind_s(DN_DM, PASSWORD) + topology.standalone.modify_s(CONFIG_DN, [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', '64')]) + + log.info("\n######################### Restarting the server ######################\n") + topology.standalone.restart(timeout=120) + + enabled = os.popen('egrep "SSL alert:" %s | egrep \": enabled\" | wc -l' % topology.standalone.errlog) + disabled = os.popen('egrep "SSL alert:" %s | egrep \": disabled\" | wc -l' % topology.standalone.errlog) + ecount = int(enabled.readline().rstrip()) + dcount = int(disabled.readline().rstrip()) + + log.info("Enabled ciphers: %d" % ecount) + log.info("Disabled ciphers: %d" % dcount) + assert ecount >= 60 + assert dcount <= 7 + global plus_all_ecount + global plus_all_dcount + plus_all_ecount = ecount + plus_all_dcount = dcount + weak = os.popen('egrep "SSL alert:" %s | egrep "WEAK CIPHER" | wc -l' % topology.standalone.errlog) + wcount = int(weak.readline().rstrip()) + log.info("Weak ciphers: %d" % wcount) + assert wcount <= 29 + + comp_nsSSLEnableCipherCount(topology, ecount) + + +def test_ticket47838_run_1(topology): + """ + Check nsSSL3Ciphers: +all + All ciphers are enabled except null. + Note: default allowWeakCipher (i.e., off) for +all + """ + _header(topology, 'Test Case 2 - Check the ciphers availability for "+all" with default allowWeakCiphers') + + topology.standalone.simple_bind_s(DN_DM, PASSWORD) + topology.standalone.modify_s(CONFIG_DN, [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', '64')]) + # Make sure allowWeakCipher is not set. + topology.standalone.modify_s(ENCRYPTION_DN, [(ldap.MOD_DELETE, 'allowWeakCipher', None)]) + + log.info("\n######################### Restarting the server ######################\n") + log.info("\n######################### Restarting the server ######################\n") + topology.standalone.stop(timeout=10) + os.system('mv %s %s.47838_0' % (topology.standalone.errlog, topology.standalone.errlog)) + os.system('touch %s' % (topology.standalone.errlog)) + topology.standalone.start(timeout=120) + + enabled = os.popen('egrep "SSL alert:" %s | egrep \": enabled\" | wc -l' % topology.standalone.errlog) + disabled = os.popen('egrep "SSL alert:" %s | egrep \": disabled\" | wc -l' % topology.standalone.errlog) + ecount = int(enabled.readline().rstrip()) + dcount = int(disabled.readline().rstrip()) + + global plus_all_ecount_noweak + global plus_all_dcount_noweak + plus_all_ecount_noweak = ecount + plus_all_dcount_noweak = dcount + + log.info("Enabled ciphers: %d" % ecount) + log.info("Disabled ciphers: %d" % dcount) + assert ecount >= 31 + assert dcount <= 36 + weak = os.popen('egrep "SSL alert:" %s | egrep "WEAK CIPHER" | wc -l' % topology.standalone.errlog) + wcount = int(weak.readline().rstrip()) + log.info("Weak ciphers: %d" % wcount) + assert wcount <= 29 + + comp_nsSSLEnableCipherCount(topology, ecount) + + +def test_ticket47838_run_2(topology): + """ + Check nsSSL3Ciphers: +rsa_aes_128_sha,+rsa_aes_256_sha + rsa_aes_128_sha, tls_rsa_aes_128_sha, rsa_aes_256_sha, tls_rsa_aes_256_sha are enabled. + default allowWeakCipher + """ + _header(topology, 'Test Case 3 - Check the ciphers availability for "+rsa_aes_128_sha,+rsa_aes_256_sha" with default allowWeakCipher') + + topology.standalone.simple_bind_s(DN_DM, PASSWORD) + topology.standalone.modify_s(ENCRYPTION_DN, [(ldap.MOD_REPLACE, 'nsSSL3Ciphers', '+rsa_aes_128_sha,+rsa_aes_256_sha')]) + + log.info("\n######################### Restarting the server ######################\n") + topology.standalone.stop(timeout=10) + os.system('mv %s %s.47838_1' % (topology.standalone.errlog, topology.standalone.errlog)) + os.system('touch %s' % (topology.standalone.errlog)) + topology.standalone.start(timeout=120) + + enabled = os.popen('egrep "SSL alert:" %s | egrep \": enabled\" | wc -l' % topology.standalone.errlog) + disabled = os.popen('egrep "SSL alert:" %s | egrep \": disabled\" | wc -l' % topology.standalone.errlog) + ecount = int(enabled.readline().rstrip()) + dcount = int(disabled.readline().rstrip()) + + log.info("Enabled ciphers: %d" % ecount) + log.info("Disabled ciphers: %d" % dcount) + global plus_all_ecount + global plus_all_dcount + assert ecount == 2 + assert dcount == (plus_all_ecount + plus_all_dcount - ecount) + + comp_nsSSLEnableCipherCount(topology, ecount) + + +def test_ticket47838_run_3(topology): + """ + Check nsSSL3Ciphers: -all + All ciphers are disabled. + default allowWeakCipher + """ + _header(topology, 'Test Case 4 - Check the ciphers availability for "-all"') + + topology.standalone.simple_bind_s(DN_DM, PASSWORD) + topology.standalone.modify_s(ENCRYPTION_DN, [(ldap.MOD_REPLACE, 'nsSSL3Ciphers', '-all')]) + + log.info("\n######################### Restarting the server ######################\n") + topology.standalone.stop(timeout=10) + os.system('mv %s %s.47838_2' % (topology.standalone.errlog, topology.standalone.errlog)) + os.system('touch %s' % (topology.standalone.errlog)) + topology.standalone.start(timeout=120) + + enabled = os.popen('egrep "SSL alert:" %s | egrep \": enabled\" | wc -l' % topology.standalone.errlog) + ecount = int(enabled.readline().rstrip()) + + log.info("Enabled ciphers: %d" % ecount) + global plus_all_ecount + assert ecount == 0 + + disabledmsg = os.popen('egrep "Disabling SSL" %s' % topology.standalone.errlog) + log.info("Disabling SSL message?: %s" % disabledmsg.readline()) + assert disabledmsg != '' + + comp_nsSSLEnableCipherCount(topology, ecount) + + +def test_ticket47838_run_4(topology): + """ + Check no nsSSL3Ciphers + Default ciphers are enabled. + default allowWeakCipher + """ + _header(topology, 'Test Case 5 - Check no nsSSL3Ciphers (default setting) with default allowWeakCipher') + + topology.standalone.simple_bind_s(DN_DM, PASSWORD) + topology.standalone.modify_s(ENCRYPTION_DN, [(ldap.MOD_DELETE, 'nsSSL3Ciphers', '-all')]) + + log.info("\n######################### Restarting the server ######################\n") + topology.standalone.stop(timeout=10) + os.system('mv %s %s.47838_3' % (topology.standalone.errlog, topology.standalone.errlog)) + os.system('touch %s' % (topology.standalone.errlog)) + topology.standalone.start(timeout=120) + + enabled = os.popen('egrep "SSL alert:" %s | egrep \": enabled\" | wc -l' % topology.standalone.errlog) + disabled = os.popen('egrep "SSL alert:" %s | egrep \": disabled\" | wc -l' % topology.standalone.errlog) + ecount = int(enabled.readline().rstrip()) + dcount = int(disabled.readline().rstrip()) + + log.info("Enabled ciphers: %d" % ecount) + log.info("Disabled ciphers: %d" % dcount) + global plus_all_ecount + global plus_all_dcount + assert ecount == 12 + assert dcount == (plus_all_ecount + plus_all_dcount - ecount) + weak = os.popen('egrep "SSL alert:" %s | egrep \": enabled\" | egrep "WEAK CIPHER" | wc -l' % topology.standalone.errlog) + wcount = int(weak.readline().rstrip()) + log.info("Weak ciphers in the default setting: %d" % wcount) + assert wcount == 0 + + comp_nsSSLEnableCipherCount(topology, ecount) + + +def test_ticket47838_run_5(topology): + """ + Check nsSSL3Ciphers: default + Default ciphers are enabled. + default allowWeakCipher + """ + _header(topology, 'Test Case 6 - Check default nsSSL3Ciphers (default setting) with default allowWeakCipher') + + topology.standalone.simple_bind_s(DN_DM, PASSWORD) + topology.standalone.modify_s(ENCRYPTION_DN, [(ldap.MOD_REPLACE, 'nsSSL3Ciphers', 'default')]) + + log.info("\n######################### Restarting the server ######################\n") + topology.standalone.stop(timeout=10) + os.system('mv %s %s.47838_4' % (topology.standalone.errlog, topology.standalone.errlog)) + os.system('touch %s' % (topology.standalone.errlog)) + topology.standalone.start(timeout=120) + + enabled = os.popen('egrep "SSL alert:" %s | egrep \": enabled\" | wc -l' % topology.standalone.errlog) + disabled = os.popen('egrep "SSL alert:" %s | egrep \": disabled\" | wc -l' % topology.standalone.errlog) + ecount = int(enabled.readline().rstrip()) + dcount = int(disabled.readline().rstrip()) + + log.info("Enabled ciphers: %d" % ecount) + log.info("Disabled ciphers: %d" % dcount) + global plus_all_ecount + global plus_all_dcount + assert ecount == 12 + assert dcount == (plus_all_ecount + plus_all_dcount - ecount) + weak = os.popen('egrep "SSL alert:" %s | egrep \": enabled\" | egrep "WEAK CIPHER" | wc -l' % topology.standalone.errlog) + wcount = int(weak.readline().rstrip()) + log.info("Weak ciphers in the default setting: %d" % wcount) + assert wcount == 0 + + comp_nsSSLEnableCipherCount(topology, ecount) + + +def test_ticket47838_run_6(topology): + """ + Check nsSSL3Ciphers: +all,-rsa_rc4_128_md5 + All ciphers are disabled. + default allowWeakCipher + """ + _header(topology, 'Test Case 7 - Check nsSSL3Ciphers: +all,-tls_dhe_rsa_aes_128_gcm_sha with default allowWeakCipher') + + topology.standalone.simple_bind_s(DN_DM, PASSWORD) + topology.standalone.modify_s(ENCRYPTION_DN, [(ldap.MOD_REPLACE, 'nsSSL3Ciphers', '+all,-tls_dhe_rsa_aes_128_gcm_sha')]) + + log.info("\n######################### Restarting the server ######################\n") + topology.standalone.stop(timeout=10) + os.system('mv %s %s.47838_5' % (topology.standalone.errlog, topology.standalone.errlog)) + os.system('touch %s' % (topology.standalone.errlog)) + topology.standalone.start(timeout=120) + + enabled = os.popen('egrep "SSL alert:" %s | egrep \": enabled\" | wc -l' % topology.standalone.errlog) + disabled = os.popen('egrep "SSL alert:" %s | egrep \": disabled\" | wc -l' % topology.standalone.errlog) + ecount = int(enabled.readline().rstrip()) + dcount = int(disabled.readline().rstrip()) + + log.info("Enabled ciphers: %d" % ecount) + log.info("Disabled ciphers: %d" % dcount) + global plus_all_ecount_noweak + global plus_all_dcount_noweak + log.info("ALL Ecount: %d" % plus_all_ecount_noweak) + log.info("ALL Dcount: %d" % plus_all_dcount_noweak) + assert ecount == (plus_all_ecount_noweak - 1) + assert dcount == (plus_all_dcount_noweak + 1) + + comp_nsSSLEnableCipherCount(topology, ecount) + + +def test_ticket47838_run_7(topology): + """ + Check nsSSL3Ciphers: -all,+rsa_rc4_128_md5 + All ciphers are disabled. + default allowWeakCipher + """ + _header(topology, 'Test Case 8 - Check nsSSL3Ciphers: -all,+rsa_rc4_128_md5 with default allowWeakCipher') + + topology.standalone.simple_bind_s(DN_DM, PASSWORD) + topology.standalone.modify_s(ENCRYPTION_DN, [(ldap.MOD_REPLACE, 'nsSSL3Ciphers', '-all,+rsa_rc4_128_md5')]) + + log.info("\n######################### Restarting the server ######################\n") + topology.standalone.stop(timeout=10) + os.system('mv %s %s.47838_6' % (topology.standalone.errlog, topology.standalone.errlog)) + os.system('touch %s' % (topology.standalone.errlog)) + topology.standalone.start(timeout=120) + + enabled = os.popen('egrep "SSL alert:" %s | egrep \": enabled\" | wc -l' % topology.standalone.errlog) + disabled = os.popen('egrep "SSL alert:" %s | egrep \": disabled\" | wc -l' % topology.standalone.errlog) + ecount = int(enabled.readline().rstrip()) + dcount = int(disabled.readline().rstrip()) + + log.info("Enabled ciphers: %d" % ecount) + log.info("Disabled ciphers: %d" % dcount) + global plus_all_ecount + global plus_all_dcount + assert ecount == 1 + assert dcount == (plus_all_ecount + plus_all_dcount - ecount) + + comp_nsSSLEnableCipherCount(topology, ecount) + + +def test_ticket47838_run_8(topology): + """ + Check nsSSL3Ciphers: default + allowWeakCipher: off + Strong Default ciphers are enabled. + """ + _header(topology, 'Test Case 9 - Check default nsSSL3Ciphers (default setting + allowWeakCipher: off)') + + topology.standalone.simple_bind_s(DN_DM, PASSWORD) + topology.standalone.modify_s(ENCRYPTION_DN, [(ldap.MOD_REPLACE, 'nsSSL3Ciphers', 'default'), + (ldap.MOD_REPLACE, 'allowWeakCipher', 'off')]) + + log.info("\n######################### Restarting the server ######################\n") + topology.standalone.stop(timeout=10) + os.system('mv %s %s.47838_7' % (topology.standalone.errlog, topology.standalone.errlog)) + os.system('touch %s' % (topology.standalone.errlog)) + topology.standalone.start(timeout=120) + + enabled = os.popen('egrep "SSL alert:" %s | egrep \": enabled\" | wc -l' % topology.standalone.errlog) + disabled = os.popen('egrep "SSL alert:" %s | egrep \": disabled\" | wc -l' % topology.standalone.errlog) + ecount = int(enabled.readline().rstrip()) + dcount = int(disabled.readline().rstrip()) + + log.info("Enabled ciphers: %d" % ecount) + log.info("Disabled ciphers: %d" % dcount) + global plus_all_ecount + global plus_all_dcount + assert ecount == 12 + assert dcount == (plus_all_ecount + plus_all_dcount - ecount) + weak = os.popen('egrep "SSL alert:" %s | egrep \": enabled\" | egrep "WEAK CIPHER" | wc -l' % topology.standalone.errlog) + wcount = int(weak.readline().rstrip()) + log.info("Weak ciphers in the default setting: %d" % wcount) + assert wcount == 0 + + comp_nsSSLEnableCipherCount(topology, ecount) + + +def test_ticket47838_run_9(topology): + """ + Check no nsSSL3Ciphers + Default ciphers are enabled. + allowWeakCipher: on + nsslapd-errorlog-level: 0 + """ + _header(topology, 'Test Case 10 - Check no nsSSL3Ciphers (default setting) with no errorlog-level & allowWeakCipher on') + + topology.standalone.simple_bind_s(DN_DM, PASSWORD) + topology.standalone.modify_s(ENCRYPTION_DN, [(ldap.MOD_REPLACE, 'nsSSL3Ciphers', None), + (ldap.MOD_REPLACE, 'allowWeakCipher', 'on')]) + topology.standalone.modify_s(CONFIG_DN, [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', None)]) + + log.info("\n######################### Restarting the server ######################\n") + topology.standalone.stop(timeout=10) + os.system('mv %s %s.47838_8' % (topology.standalone.errlog, topology.standalone.errlog)) + os.system('touch %s' % (topology.standalone.errlog)) + topology.standalone.start(timeout=120) + + enabled = os.popen('egrep "SSL alert:" %s | egrep \": enabled\" | wc -l' % topology.standalone.errlog) + disabled = os.popen('egrep "SSL alert:" %s | egrep \": disabled\" | wc -l' % topology.standalone.errlog) + ecount = int(enabled.readline().rstrip()) + dcount = int(disabled.readline().rstrip()) + + log.info("Enabled ciphers: %d" % ecount) + log.info("Disabled ciphers: %d" % dcount) + assert ecount == 23 + assert dcount == 0 + weak = os.popen('egrep "SSL alert:" %s | egrep \": enabled\" | egrep "WEAK CIPHER" | wc -l' % topology.standalone.errlog) + wcount = int(weak.readline().rstrip()) + log.info("Weak ciphers in the default setting: %d" % wcount) + assert wcount == 11 + + comp_nsSSLEnableCipherCount(topology, ecount) + + +def test_ticket47838_run_10(topology): + """ + Check nsSSL3Ciphers: -TLS_RSA_WITH_NULL_MD5,+TLS_RSA_WITH_RC4_128_MD5, + +TLS_RSA_EXPORT_WITH_RC4_40_MD5,+TLS_RSA_EXPORT_WITH_RC2_CBC_40_MD5, + +TLS_DHE_RSA_WITH_DES_CBC_SHA,+SSL_RSA_FIPS_WITH_DES_CBC_SHA, + +TLS_DHE_RSA_WITH_3DES_EDE_CBC_SHA,+SSL_RSA_FIPS_WITH_3DES_EDE_CBC_SHA, + +TLS_RSA_EXPORT1024_WITH_RC4_56_SHA,+TLS_RSA_EXPORT1024_WITH_DES_CBC_SHA, + -SSL_CK_RC4_128_WITH_MD5,-SSL_CK_RC4_128_EXPORT40_WITH_MD5, + -SSL_CK_RC2_128_CBC_WITH_MD5,-SSL_CK_RC2_128_CBC_EXPORT40_WITH_MD5, + -SSL_CK_DES_64_CBC_WITH_MD5,-SSL_CK_DES_192_EDE3_CBC_WITH_MD5 + allowWeakCipher: on + nsslapd-errorlog-level: 0 + """ + _header(topology, 'Test Case 11 - Check nsSSL3Ciphers: long list using the NSS Cipher Suite name with allowWeakCipher on') + + topology.standalone.simple_bind_s(DN_DM, PASSWORD) + topology.standalone.modify_s(ENCRYPTION_DN, [(ldap.MOD_REPLACE, 'nsSSL3Ciphers', + '-TLS_RSA_WITH_NULL_MD5,+TLS_RSA_WITH_RC4_128_MD5,+TLS_RSA_EXPORT_WITH_RC4_40_MD5,+TLS_RSA_EXPORT_WITH_RC2_CBC_40_MD5,+TLS_DHE_RSA_WITH_DES_CBC_SHA,+SSL_RSA_FIPS_WITH_DES_CBC_SHA,+TLS_DHE_RSA_WITH_3DES_EDE_CBC_SHA,+SSL_RSA_FIPS_WITH_3DES_EDE_CBC_SHA,+TLS_RSA_EXPORT1024_WITH_RC4_56_SHA,+TLS_RSA_EXPORT1024_WITH_DES_CBC_SHA,-SSL_CK_RC4_128_WITH_MD5,-SSL_CK_RC4_128_EXPORT40_WITH_MD5,-SSL_CK_RC2_128_CBC_WITH_MD5,-SSL_CK_RC2_128_CBC_EXPORT40_WITH_MD5,-SSL_CK_DES_64_CBC_WITH_MD5,-SSL_CK_DES_192_EDE3_CBC_WITH_MD5')]) + + log.info("\n######################### Restarting the server ######################\n") + topology.standalone.stop(timeout=10) + os.system('mv %s %s.47838_9' % (topology.standalone.errlog, topology.standalone.errlog)) + os.system('touch %s' % (topology.standalone.errlog)) + topology.standalone.start(timeout=120) + + enabled = os.popen('egrep "SSL alert:" %s | egrep \": enabled\" | wc -l' % topology.standalone.errlog) + disabled = os.popen('egrep "SSL alert:" %s | egrep \": disabled\" | wc -l' % topology.standalone.errlog) + ecount = int(enabled.readline().rstrip()) + dcount = int(disabled.readline().rstrip()) + + log.info("Enabled ciphers: %d" % ecount) + log.info("Disabled ciphers: %d" % dcount) + global plus_all_ecount + global plus_all_dcount + assert ecount == 9 + assert dcount == 0 + weak = os.popen('egrep "SSL alert:" %s | egrep \": enabled\" | egrep "WEAK CIPHER" | wc -l' % topology.standalone.errlog) + wcount = int(weak.readline().rstrip()) + log.info("Weak ciphers in the default setting: %d" % wcount) + + topology.standalone.log.info("ticket47838 was successfully verified.") + + comp_nsSSLEnableCipherCount(topology, ecount) + + +def test_ticket47838_run_11(topology): + """ + Check nsSSL3Ciphers: +fortezza + SSL_GetImplementedCiphers does not return this as a secuire cipher suite + """ + _header(topology, 'Test Case 12 - Check nsSSL3Ciphers: +fortezza, which is not supported') + + topology.standalone.simple_bind_s(DN_DM, PASSWORD) + topology.standalone.modify_s(ENCRYPTION_DN, [(ldap.MOD_REPLACE, 'nsSSL3Ciphers', '+fortezza')]) + + log.info("\n######################### Restarting the server ######################\n") + topology.standalone.stop(timeout=10) + os.system('mv %s %s.47838_10' % (topology.standalone.errlog, topology.standalone.errlog)) + os.system('touch %s' % (topology.standalone.errlog)) + topology.standalone.start(timeout=120) + + errmsg = os.popen('egrep "SSL alert:" %s | egrep "is not available in NSS"' % topology.standalone.errlog) + if errmsg != "": + log.info("Expected error message:") + log.info("%s" % errmsg.readline()) + else: + log.info("Expected error message was not found") + assert False + + comp_nsSSLEnableCipherCount(topology, 0) + + +def test_ticket47928_run_0(topology): + """ + No SSL version config parameters. + Check SSL3 (TLS1.0) is off. + """ + _header(topology, 'Test Case 13 - No SSL version config parameters') + + topology.standalone.simple_bind_s(DN_DM, PASSWORD) + # add them once and remove them + topology.standalone.modify_s(ENCRYPTION_DN, [(ldap.MOD_REPLACE, 'nsSSL3', 'off'), + (ldap.MOD_REPLACE, 'nsTLS1', 'on'), + (ldap.MOD_REPLACE, 'sslVersionMin', 'TLS1.1'), + (ldap.MOD_REPLACE, 'sslVersionMax', 'TLS1.2')]) + topology.standalone.modify_s(ENCRYPTION_DN, [(ldap.MOD_DELETE, 'nsSSL3', None), + (ldap.MOD_DELETE, 'nsTLS1', None), + (ldap.MOD_DELETE, 'sslVersionMin', None), + (ldap.MOD_DELETE, 'sslVersionMax', None)]) + topology.standalone.modify_s(CONFIG_DN, [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', '64')]) + + log.info("\n######################### Restarting the server ######################\n") + topology.standalone.stop(timeout=10) + os.system('mv %s %s.47838_11' % (topology.standalone.errlog, topology.standalone.errlog)) + os.system('touch %s' % (topology.standalone.errlog)) + topology.standalone.start(timeout=120) + + errmsg = os.popen('egrep "SSL alert:" %s | egrep "Default SSL Version settings; Configuring the version range as min: TLS1.1"' % topology.standalone.errlog) + if errmsg != "": + log.info("Expected message:") + log.info("%s" % errmsg.readline()) + else: + log.info("Expected message was not found") + assert False + + +def test_ticket47928_run_1(topology): + """ + No nsSSL3, nsTLS1; sslVersionMin > sslVersionMax + Check sslVersionMax is ignored. + """ + _header(topology, 'Test Case 14 - No nsSSL3, nsTLS1; sslVersionMin > sslVersionMax') + + topology.standalone.simple_bind_s(DN_DM, PASSWORD) + topology.standalone.modify_s(ENCRYPTION_DN, [(ldap.MOD_REPLACE, 'sslVersionMin', 'TLS1.2'), + (ldap.MOD_REPLACE, 'sslVersionMax', 'TLS1.1')]) + + log.info("\n######################### Restarting the server ######################\n") + topology.standalone.stop(timeout=10) + os.system('mv %s %s.47838_12' % (topology.standalone.errlog, topology.standalone.errlog)) + os.system('touch %s' % (topology.standalone.errlog)) + topology.standalone.start(timeout=120) + + errmsg = os.popen('egrep "SSL alert:" %s | egrep "The min value of NSS version range"' % topology.standalone.errlog) + if errmsg != "": + log.info("Expected message:") + log.info("%s" % errmsg.readline()) + else: + log.info("Expected message was not found") + assert False + + errmsg = os.popen('egrep "SSL Initialization" %s | egrep "Configured SSL version range: min: TLS1.2, max: TLS1"' % topology.standalone.errlog) + if errmsg != "": + log.info("Expected message:") + log.info("%s" % errmsg.readline()) + else: + log.info("Expected message was not found") + assert False + + +def test_ticket47928_run_2(topology): + """ + nsSSL3: on; sslVersionMin: TLS1.1; sslVersionMax: TLS1.2 + Conflict between nsSSL3 and range; nsSSL3 is disabled + """ + _header(topology, 'Test Case 15 - nsSSL3: on; sslVersionMin: TLS1.1; sslVersionMax: TLS1.2') + + topology.standalone.simple_bind_s(DN_DM, PASSWORD) + topology.standalone.modify_s(ENCRYPTION_DN, [(ldap.MOD_REPLACE, 'sslVersionMin', 'TLS1.1'), + (ldap.MOD_REPLACE, 'sslVersionMax', 'TLS1.2'), + (ldap.MOD_REPLACE, 'nsSSL3', 'on')]) + + log.info("\n######################### Restarting the server ######################\n") + topology.standalone.stop(timeout=10) + os.system('mv %s %s.47838_13' % (topology.standalone.errlog, topology.standalone.errlog)) + os.system('touch %s' % (topology.standalone.errlog)) + topology.standalone.start(timeout=120) + + errmsg = os.popen('egrep "SSL alert:" %s | egrep "Found unsecure configuration: nsSSL3: on"' % topology.standalone.errlog) + if errmsg != "": + log.info("Expected message:") + log.info("%s" % errmsg.readline()) + else: + log.info("Expected message was not found") + assert False + + errmsg = os.popen('egrep "SSL alert:" %s | egrep "Respect the supported range."' % topology.standalone.errlog) + if errmsg != "": + log.info("Expected message:") + log.info("%s" % errmsg.readline()) + else: + log.info("Expected message was not found") + assert False + + errmsg = os.popen('egrep "SSL Initialization" %s | egrep "Configured SSL version range: min: TLS1.1, max: TLS1"' % topology.standalone.errlog) + if errmsg != "": + log.info("Expected message:") + log.info("%s" % errmsg.readline()) + else: + log.info("Expected message was not found") + assert False + + +def test_ticket47928_run_3(topology): + """ + nsSSL3: on; nsTLS1: off; sslVersionMin: TLS1.1; sslVersionMax: TLS1.2 + Conflict between nsSSL3/nsTLS1 and range; nsSSL3 is disabled; nsTLS1 is enabled. + """ + _header(topology, 'Test Case 16 - nsSSL3: on; nsTLS1: off; sslVersionMin: TLS1.1; sslVersionMax: TLS1.2') + + topology.standalone.simple_bind_s(DN_DM, PASSWORD) + topology.standalone.modify_s(ENCRYPTION_DN, [(ldap.MOD_REPLACE, 'sslVersionMin', 'TLS1.1'), + (ldap.MOD_REPLACE, 'sslVersionMax', 'TLS1.2'), + (ldap.MOD_REPLACE, 'nsSSL3', 'on'), + (ldap.MOD_REPLACE, 'nsTLS1', 'off')]) + + log.info("\n######################### Restarting the server ######################\n") + topology.standalone.stop(timeout=10) + os.system('mv %s %s.47838_14' % (topology.standalone.errlog, topology.standalone.errlog)) + os.system('touch %s' % (topology.standalone.errlog)) + topology.standalone.start(timeout=120) + + errmsg = os.popen('egrep "SSL alert:" %s | egrep "Found unsecure configuration: nsSSL3: on"' % topology.standalone.errlog) + if errmsg != "": + log.info("Expected message:") + log.info("%s" % errmsg.readline()) + else: + log.info("Expected message was not found") + assert False + + errmsg = os.popen('egrep "SSL alert:" %s | egrep "Respect the configured range."' % topology.standalone.errlog) + if errmsg != "": + log.info("Expected message:") + log.info("%s" % errmsg.readline()) + else: + log.info("Expected message was not found") + assert False + + errmsg = os.popen('egrep "SSL Initialization" %s | egrep "Configured SSL version range: min: TLS1.1, max: TLS1"' % topology.standalone.errlog) + if errmsg != "": + log.info("Expected message:") + log.info("%s" % errmsg.readline()) + else: + log.info("Expected message was not found") + assert False + + +def test_ticket47838_run_last(topology): + """ + Check nsSSL3Ciphers: all <== invalid value + All ciphers are disabled. + """ + _header(topology, 'Test Case 17 - Check nsSSL3Ciphers: all, which is invalid') + + topology.standalone.simple_bind_s(DN_DM, PASSWORD) + topology.standalone.modify_s(CONFIG_DN, [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', None)]) + topology.standalone.modify_s(ENCRYPTION_DN, [(ldap.MOD_REPLACE, 'nsSSL3Ciphers', 'all')]) + + log.info("\n######################### Restarting the server ######################\n") + topology.standalone.stop(timeout=10) + os.system('mv %s %s.47838_15' % (topology.standalone.errlog, topology.standalone.errlog)) + os.system('touch %s' % (topology.standalone.errlog)) + topology.standalone.start(timeout=120) + + errmsg = os.popen('egrep "SSL alert:" %s | egrep "invalid ciphers"' % topology.standalone.errlog) + if errmsg != "": + log.info("Expected error message:") + log.info("%s" % errmsg.readline()) + else: + log.info("Expected error message was not found") + assert False + + comp_nsSSLEnableCipherCount(topology, 0) + + topology.standalone.log.info("ticket47838, 47880, 47908, 47928 were successfully verified.") + + +def test_ticket47838_final(topology): + topology.standalone.delete() + log.info('Testcase PASSED') + + +def run_isolated(): + ''' + run_isolated is used to run these test cases independently of a test scheduler (xunit, py.test..) + To run isolated without py.test, you need to + - edit this file and comment '@pytest.fixture' line before 'topology' function. + - set the installation prefix + - run this program + ''' + global installation_prefix + installation_prefix = None + + topo = topology(True) + test_ticket47838_init(topo) + + test_ticket47838_run_0(topo) + test_ticket47838_run_1(topo) + test_ticket47838_run_2(topo) + test_ticket47838_run_3(topo) + test_ticket47838_run_4(topo) + test_ticket47838_run_5(topo) + test_ticket47838_run_6(topo) + test_ticket47838_run_7(topo) + test_ticket47838_run_8(topo) + test_ticket47838_run_9(topo) + test_ticket47838_run_10(topo) + test_ticket47838_run_11(topo) + test_ticket47928_run_0(topo) + test_ticket47928_run_1(topo) + test_ticket47928_run_2(topo) + test_ticket47928_run_3(topo) + + test_ticket47838_run_last(topo) + + test_ticket47838_final(topo) + + +if __name__ == '__main__': + run_isolated() diff --git a/dirsrvtests/tests/tickets/ticket47869MMR_test.py b/dirsrvtests/tests/tickets/ticket47869MMR_test.py new file mode 100644 index 0000000..630cb93 --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket47869MMR_test.py @@ -0,0 +1,346 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2015 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import os +import sys +import time +import ldap +import logging +import pytest +from lib389 import DirSrv, Entry, tools +from lib389.tools import DirSrvTools +from lib389._constants import * +from lib389.properties import * + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + +# +# important part. We can deploy Master1 and Master2 on different versions +# +installation1_prefix = None +installation2_prefix = None + +TEST_REPL_DN = "cn=test_repl, %s" % SUFFIX +ENTRY_NAME = 'test_entry' +MAX_ENTRIES = 10 + +BIND_NAME = 'bind_entry' +BIND_DN = 'cn=%s, %s' % (BIND_NAME, SUFFIX) +BIND_PW = 'password' + + +class TopologyMaster1Master2(object): + def __init__(self, master1, master2): + master1.open() + self.master1 = master1 + + master2.open() + self.master2 = master2 + + +@pytest.fixture(scope="module") +def topology(request): + ''' + This fixture is used to create a replicated topology for the 'module'. + The replicated topology is MASTER1 <-> Master2. + ''' + global installation1_prefix + global installation2_prefix + + # allocate master1 on a given deployement + master1 = DirSrv(verbose=False) + if installation1_prefix: + args_instance[SER_DEPLOYED_DIR] = installation1_prefix + + # Args for the master1 instance + args_instance[SER_HOST] = HOST_MASTER_1 + args_instance[SER_PORT] = PORT_MASTER_1 + args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1 + args_master = args_instance.copy() + master1.allocate(args_master) + + # allocate master1 on a given deployement + master2 = DirSrv(verbose=False) + if installation2_prefix: + args_instance[SER_DEPLOYED_DIR] = installation2_prefix + + # Args for the consumer instance + args_instance[SER_HOST] = HOST_MASTER_2 + args_instance[SER_PORT] = PORT_MASTER_2 + args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_2 + args_master = args_instance.copy() + master2.allocate(args_master) + + # Get the status of the instance + instance_master1 = master1.exists() + instance_master2 = master2.exists() + + # Remove all the instances + if instance_master1: + master1.delete() + if instance_master2: + master2.delete() + + # Create the instances + master1.create() + master1.open() + master2.create() + master2.open() + + # + # Now prepare the Master-Consumer topology + # + # First Enable replication + master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_1) + master2.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_2) + + # Initialize the supplier->consumer + + properties = {RA_NAME: r'meTo_$host:$port', + RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], + RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], + RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], + RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} + repl_agreement = master1.agreement.create(suffix=SUFFIX, host=master2.host, port=master2.port, properties=properties) + + if not repl_agreement: + log.fatal("Fail to create a replica agreement") + sys.exit(1) + + log.debug("%s created" % repl_agreement) + + properties = {RA_NAME: r'meTo_$host:$port', + RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], + RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], + RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], + RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} + master2.agreement.create(suffix=SUFFIX, host=master1.host, port=master1.port, properties=properties) + + master1.agreement.init(SUFFIX, HOST_MASTER_2, PORT_MASTER_2) + master1.waitForReplInit(repl_agreement) + + # Check replication is working fine + if master1.testReplication(DEFAULT_SUFFIX, master2): + log.info('Replication is working.') + else: + log.fatal('Replication is not working.') + assert False + + # clear the tmp directory + master1.clearTmpDir(__file__) + + # Here we have two instances master and consumer + return TopologyMaster1Master2(master1, master2) + + +def test_ticket47869_init(topology): + """ + It adds an entry ('bind_entry') and 10 test entries + It sets the anonymous aci + + """ + # enable acl error logging + mod = [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', str(8192))] # REPL + topology.master1.modify_s(DN_CONFIG, mod) + topology.master2.modify_s(DN_CONFIG, mod) + + # entry used to bind with + topology.master1.log.info("Add %s" % BIND_DN) + topology.master1.add_s(Entry((BIND_DN, { + 'objectclass': "top person".split(), + 'sn': BIND_NAME, + 'cn': BIND_NAME, + 'userpassword': BIND_PW}))) + loop = 0 + ent = None + while loop <= 10: + try: + ent = topology.master2.getEntry(BIND_DN, ldap.SCOPE_BASE, "(objectclass=*)") + break + except ldap.NO_SUCH_OBJECT: + time.sleep(1) + loop += 1 + if ent is None: + assert False + + # keep anonymous ACI for use 'read-search' aci in SEARCH test + ACI_ANONYMOUS = "(targetattr!=\"userPassword\")(version 3.0; acl \"Enable anonymous access\"; allow (read, search, compare) userdn=\"ldap:///anyone\";)" + mod = [(ldap.MOD_REPLACE, 'aci', ACI_ANONYMOUS)] + topology.master1.modify_s(SUFFIX, mod) + topology.master2.modify_s(SUFFIX, mod) + + # add entries + for cpt in range(MAX_ENTRIES): + name = "%s%d" % (ENTRY_NAME, cpt) + mydn = "cn=%s,%s" % (name, SUFFIX) + topology.master1.add_s(Entry((mydn, + {'objectclass': "top person".split(), + 'sn': name, + 'cn': name}))) + loop = 0 + ent = None + while loop <= 10: + try: + ent = topology.master2.getEntry(mydn, ldap.SCOPE_BASE, "(objectclass=*)") + break + except ldap.NO_SUCH_OBJECT: + time.sleep(1) + loop += 1 + if ent is None: + assert False + + +def test_ticket47869_check(topology): + ''' + On Master 1 and 2: + Bind as Directory Manager. + Search all specifying nscpEntryWsi in the attribute list. + Check nscpEntryWsi is returned. + On Master 1 and 2: + Bind as Bind Entry. + Search all specifying nscpEntryWsi in the attribute list. + Check nscpEntryWsi is not returned. + On Master 1 and 2: + Bind as anonymous. + Search all specifying nscpEntryWsi in the attribute list. + Check nscpEntryWsi is not returned. + ''' + topology.master1.log.info("\n\n######################### CHECK nscpentrywsi ######################\n") + + topology.master1.log.info("##### Master1: Bind as %s #####" % DN_DM) + topology.master1.simple_bind_s(DN_DM, PASSWORD) + + topology.master1.log.info("Master1: Calling search_ext...") + msgid = topology.master1.search_ext(SUFFIX, ldap.SCOPE_SUBTREE, 'objectclass=*', ['nscpentrywsi']) + nscpentrywsicnt = 0 + rtype, rdata, rmsgid = topology.master1.result2(msgid) + topology.master1.log.info("%d results" % len(rdata)) + + topology.master1.log.info("Results:") + for dn, attrs in rdata: + topology.master1.log.info("dn: %s" % dn) + if 'nscpentrywsi' in attrs: + nscpentrywsicnt += 1 + + topology.master1.log.info("Master1: count of nscpentrywsi: %d" % nscpentrywsicnt) + + topology.master2.log.info("##### Master2: Bind as %s #####" % DN_DM) + topology.master2.simple_bind_s(DN_DM, PASSWORD) + + topology.master2.log.info("Master2: Calling search_ext...") + msgid = topology.master2.search_ext(SUFFIX, ldap.SCOPE_SUBTREE, 'objectclass=*', ['nscpentrywsi']) + nscpentrywsicnt = 0 + rtype, rdata, rmsgid = topology.master2.result2(msgid) + topology.master2.log.info("%d results" % len(rdata)) + + topology.master2.log.info("Results:") + for dn, attrs in rdata: + topology.master2.log.info("dn: %s" % dn) + if 'nscpentrywsi' in attrs: + nscpentrywsicnt += 1 + + topology.master2.log.info("Master2: count of nscpentrywsi: %d" % nscpentrywsicnt) + + # bind as bind_entry + topology.master1.log.info("##### Master1: Bind as %s #####" % BIND_DN) + topology.master1.simple_bind_s(BIND_DN, BIND_PW) + + topology.master1.log.info("Master1: Calling search_ext...") + msgid = topology.master1.search_ext(SUFFIX, ldap.SCOPE_SUBTREE, 'objectclass=*', ['nscpentrywsi']) + nscpentrywsicnt = 0 + rtype, rdata, rmsgid = topology.master1.result2(msgid) + topology.master1.log.info("%d results" % len(rdata)) + + for dn, attrs in rdata: + if 'nscpentrywsi' in attrs: + nscpentrywsicnt += 1 + assert nscpentrywsicnt == 0 + topology.master1.log.info("Master1: count of nscpentrywsi: %d" % nscpentrywsicnt) + + # bind as bind_entry + topology.master2.log.info("##### Master2: Bind as %s #####" % BIND_DN) + topology.master2.simple_bind_s(BIND_DN, BIND_PW) + + topology.master2.log.info("Master2: Calling search_ext...") + msgid = topology.master2.search_ext(SUFFIX, ldap.SCOPE_SUBTREE, 'objectclass=*', ['nscpentrywsi']) + nscpentrywsicnt = 0 + rtype, rdata, rmsgid = topology.master2.result2(msgid) + topology.master2.log.info("%d results" % len(rdata)) + + for dn, attrs in rdata: + if 'nscpentrywsi' in attrs: + nscpentrywsicnt += 1 + assert nscpentrywsicnt == 0 + topology.master2.log.info("Master2: count of nscpentrywsi: %d" % nscpentrywsicnt) + + # bind as anonymous + topology.master1.log.info("##### Master1: Bind as anonymous #####") + topology.master1.simple_bind_s("", "") + + topology.master1.log.info("Master1: Calling search_ext...") + msgid = topology.master1.search_ext(SUFFIX, ldap.SCOPE_SUBTREE, 'objectclass=*', ['nscpentrywsi']) + nscpentrywsicnt = 0 + rtype, rdata, rmsgid = topology.master1.result2(msgid) + topology.master1.log.info("%d results" % len(rdata)) + + for dn, attrs in rdata: + if 'nscpentrywsi' in attrs: + nscpentrywsicnt += 1 + assert nscpentrywsicnt == 0 + topology.master1.log.info("Master1: count of nscpentrywsi: %d" % nscpentrywsicnt) + + # bind as bind_entry + topology.master2.log.info("##### Master2: Bind as anonymous #####") + topology.master2.simple_bind_s("", "") + + topology.master2.log.info("Master2: Calling search_ext...") + msgid = topology.master2.search_ext(SUFFIX, ldap.SCOPE_SUBTREE, 'objectclass=*', ['nscpentrywsi']) + nscpentrywsicnt = 0 + rtype, rdata, rmsgid = topology.master2.result2(msgid) + topology.master2.log.info("%d results" % len(rdata)) + + for dn, attrs in rdata: + if 'nscpentrywsi' in attrs: + nscpentrywsicnt += 1 + assert nscpentrywsicnt == 0 + topology.master2.log.info("Master2: count of nscpentrywsi: %d" % nscpentrywsicnt) + + topology.master1.log.info("##### ticket47869 was successfully verified. #####") + + +def test_ticket47869_final(topology): + topology.master1.delete() + topology.master2.delete() + log.info('Testcase PASSED') + + +def run_isolated(): + ''' + run_isolated is used to run these test cases independently of a test scheduler (xunit, py.test..) + To run isolated without py.test, you need to + - edit this file and comment '@pytest.fixture' line before 'topology' function. + - set the installation prefix + - run this program + ''' + global installation1_prefix + global installation2_prefix + installation1_prefix = None + installation2_prefix = None + + topo = topology(True) + test_ticket47869_init(topo) + + test_ticket47869_check(topo) + + test_ticket47869_final(topo) + + +if __name__ == '__main__': + run_isolated() + diff --git a/dirsrvtests/tests/tickets/ticket47871_test.py b/dirsrvtests/tests/tickets/ticket47871_test.py new file mode 100644 index 0000000..d6ea214 --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket47871_test.py @@ -0,0 +1,226 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2015 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +''' +Created on Nov 7, 2013 + +@author: tbordaz +''' +import os +import sys +import time +import ldap +import logging +import pytest +from lib389 import DirSrv, Entry, tools +from lib389.tools import DirSrvTools +from lib389._constants import * +from lib389.properties import * + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + +installation_prefix = None + +TEST_REPL_DN = "cn=test_repl, %s" % SUFFIX +ENTRY_DN = "cn=test_entry, %s" % SUFFIX + +OTHER_NAME = 'other_entry' +MAX_OTHERS = 10 + +ATTRIBUTES = ['street', 'countryName', 'description', 'postalAddress', 'postalCode', 'title', 'l', 'roomNumber'] + + +class TopologyMasterConsumer(object): + def __init__(self, master, consumer): + master.open() + self.master = master + + consumer.open() + self.consumer = consumer + + def __repr__(self): + return "Master[%s] -> Consumer[%s" % (self.master, self.consumer) + + +@pytest.fixture(scope="module") +def topology(request): + ''' + This fixture is used to create a replicated topology for the 'module'. + The replicated topology is MASTER -> Consumer. + ''' + global installation_prefix + + if installation_prefix: + args_instance[SER_DEPLOYED_DIR] = installation_prefix + + master = DirSrv(verbose=False) + consumer = DirSrv(verbose=False) + + # Args for the master instance + args_instance[SER_HOST] = HOST_MASTER_1 + args_instance[SER_PORT] = PORT_MASTER_1 + args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1 + args_master = args_instance.copy() + master.allocate(args_master) + + # Args for the consumer instance + args_instance[SER_HOST] = HOST_CONSUMER_1 + args_instance[SER_PORT] = PORT_CONSUMER_1 + args_instance[SER_SERVERID_PROP] = SERVERID_CONSUMER_1 + args_consumer = args_instance.copy() + consumer.allocate(args_consumer) + + # Get the status of the instance and restart it if it exists + instance_master = master.exists() + instance_consumer = consumer.exists() + + # Remove all the instances + if instance_master: + master.delete() + if instance_consumer: + consumer.delete() + + # Create the instances + master.create() + master.open() + consumer.create() + consumer.open() + + # + # Now prepare the Master-Consumer topology + # + # First Enable replication + master.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_1) + consumer.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_CONSUMER) + + # Initialize the supplier->consumer + + properties = {RA_NAME: r'meTo_$host:$port', + RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], + RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], + RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], + RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} + repl_agreement = master.agreement.create(suffix=SUFFIX, host=consumer.host, port=consumer.port, properties=properties) + + if not repl_agreement: + log.fatal("Fail to create a replica agreement") + sys.exit(1) + + log.debug("%s created" % repl_agreement) + master.agreement.init(SUFFIX, HOST_CONSUMER_1, PORT_CONSUMER_1) + master.waitForReplInit(repl_agreement) + + # Check replication is working fine + if master.testReplication(DEFAULT_SUFFIX, consumer): + log.info('Replication is working.') + else: + log.fatal('Replication is not working.') + assert False + + # clear the tmp directory + master.clearTmpDir(__file__) + + # + # Here we have two instances master and consumer + # with replication working. Either coming from a backup recovery + # or from a fresh (re)init + # Time to return the topology + return TopologyMasterConsumer(master, consumer) + + +def test_ticket47871_init(topology): + """ + Initialize the test environment + """ + topology.master.plugins.enable(name=PLUGIN_RETRO_CHANGELOG) + mod = [(ldap.MOD_REPLACE, 'nsslapd-changelogmaxage', "10s"), # 10 second triming + (ldap.MOD_REPLACE, 'nsslapd-changelog-trim-interval', "5s")] + topology.master.modify_s("cn=%s,%s" % (PLUGIN_RETRO_CHANGELOG, DN_PLUGIN), mod) + #topology.master.plugins.enable(name=PLUGIN_MEMBER_OF) + #topology.master.plugins.enable(name=PLUGIN_REFER_INTEGRITY) + topology.master.stop(timeout=10) + topology.master.start(timeout=10) + + topology.master.log.info("test_ticket47871_init topology %r" % (topology)) + # the test case will check if a warning message is logged in the + # error log of the supplier + topology.master.errorlog_file = open(topology.master.errlog, "r") + + +def test_ticket47871_1(topology): + ''' + ADD entries and check they are all in the retrocl + ''' + # add dummy entries + for cpt in range(MAX_OTHERS): + name = "%s%d" % (OTHER_NAME, cpt) + topology.master.add_s(Entry(("cn=%s,%s" % (name, SUFFIX), { + 'objectclass': "top person".split(), + 'sn': name, + 'cn': name}))) + + topology.master.log.info("test_ticket47871_init: %d entries ADDed %s[0..%d]" % (MAX_OTHERS, OTHER_NAME, MAX_OTHERS - 1)) + + # Check the number of entries in the retro changelog + time.sleep(1) + ents = topology.master.search_s(RETROCL_SUFFIX, ldap.SCOPE_ONELEVEL, "(objectclass=*)") + assert len(ents) == MAX_OTHERS + topology.master.log.info("Added entries are") + for ent in ents: + topology.master.log.info("%s" % ent.dn) + + +def test_ticket47871_2(topology): + ''' + Wait until there is just a last entries + ''' + MAX_TRIES = 10 + TRY_NO = 1 + while TRY_NO <= MAX_TRIES: + time.sleep(6) # at least 1 trimming occurred + ents = topology.master.search_s(RETROCL_SUFFIX, ldap.SCOPE_ONELEVEL, "(objectclass=*)") + assert len(ents) <= MAX_OTHERS + topology.master.log.info("\nTry no %d it remains %d entries" % (TRY_NO, len(ents))) + for ent in ents: + topology.master.log.info("%s" % ent.dn) + if len(ents) > 1: + TRY_NO += 1 + else: + break + assert TRY_NO <= MAX_TRIES + assert len(ents) <= 1 + + +def test_ticket47871_final(topology): + topology.master.delete() + topology.consumer.delete() + log.info('Testcase PASSED') + + +def run_isolated(): + ''' + run_isolated is used to run these test cases independently of a test scheduler (xunit, py.test..) + To run isolated without py.test, you need to + - edit this file and comment '@pytest.fixture' line before 'topology' function. + - set the installation prefix + - run this program + ''' + global installation_prefix + installation_prefix = None + + topo = topology(True) + test_ticket47871_init(topo) + test_ticket47871_1(topo) + test_ticket47871_2(topo) + + test_ticket47871_final(topo) + + +if __name__ == '__main__': + run_isolated() diff --git a/dirsrvtests/tests/tickets/ticket47900_test.py b/dirsrvtests/tests/tickets/ticket47900_test.py new file mode 100644 index 0000000..c01b733 --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket47900_test.py @@ -0,0 +1,344 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2015 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import os +import sys +import time +import ldap +import logging +import pytest +from lib389 import DirSrv, Entry, tools +from lib389.tools import DirSrvTools +from lib389._constants import * +from lib389.properties import * + +log = logging.getLogger(__name__) + +installation_prefix = None + +CONFIG_DN = 'cn=config' +ADMIN_NAME = 'passwd_admin' +ADMIN_DN = 'cn=%s,%s' % (ADMIN_NAME, SUFFIX) +ADMIN_PWD = 'adminPassword_1' +ENTRY_NAME = 'Joe Schmo' +ENTRY_DN = 'cn=%s,%s' % (ENTRY_NAME, SUFFIX) +INVALID_PWDS = ('2_Short', 'No_Number', 'N0Special', '{SSHA}bBy8UdtPZwu8uZna9QOYG3Pr41RpIRVDl8wddw==') + + +class TopologyStandalone(object): + def __init__(self, standalone): + standalone.open() + self.standalone = standalone + + +@pytest.fixture(scope="module") +def topology(request): + ''' + This fixture is used to standalone topology for the 'module'. + ''' + global installation_prefix + + if installation_prefix: + args_instance[SER_DEPLOYED_DIR] = installation_prefix + + standalone = DirSrv(verbose=False) + + # Args for the standalone instance + args_instance[SER_HOST] = HOST_STANDALONE + args_instance[SER_PORT] = PORT_STANDALONE + args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE + args_standalone = args_instance.copy() + standalone.allocate(args_standalone) + + # Get the status of the instance and restart it if it exists + instance_standalone = standalone.exists() + + # Remove the instance + if instance_standalone: + standalone.delete() + + # Create the instance + standalone.create() + + # Used to retrieve configuration information (dbdir, confdir...) + standalone.open() + + # clear the tmp directory + standalone.clearTmpDir(__file__) + + # Here we have standalone instance up and running + return TopologyStandalone(standalone) + + +def test_ticket47900(topology): + """ + Test that password administrators/root DN can + bypass password syntax/policy. + + We need to test how passwords are modified in + existing entries, and when adding new entries. + + Create the Password Admin entry, but do not set + it as an admin yet. Use the entry to verify invalid + passwords are caught. Then activate the password + admin and make sure it can bypass password policy. + """ + + # Prepare the Password Administator + entry = Entry(ADMIN_DN) + entry.setValues('objectclass', 'top', 'person') + entry.setValues('sn', ADMIN_NAME) + entry.setValues('cn', ADMIN_NAME) + entry.setValues('userpassword', ADMIN_PWD) + + topology.standalone.log.info("Creating Password Administator entry %s..." % ADMIN_DN) + try: + topology.standalone.add_s(entry) + except ldap.LDAPError as e: + topology.standalone.log.error('Unexpected result ' + e.message['desc']) + assert False + topology.standalone.log.error("Failed to add Password Administator %s, error: %s " + % (ADMIN_DN, e.message['desc'])) + assert False + + topology.standalone.log.info("Configuring password policy...") + try: + topology.standalone.modify_s(CONFIG_DN, [(ldap.MOD_REPLACE, 'nsslapd-pwpolicy-local' , 'on'), + (ldap.MOD_REPLACE, 'passwordCheckSyntax', 'on'), + (ldap.MOD_REPLACE, 'passwordMinCategories' , '1'), + (ldap.MOD_REPLACE, 'passwordMinTokenLength' , '1'), + (ldap.MOD_REPLACE, 'passwordExp' , 'on'), + (ldap.MOD_REPLACE, 'passwordMinDigits' , '1'), + (ldap.MOD_REPLACE, 'passwordMinSpecials' , '1')]) + except ldap.LDAPError as e: + topology.standalone.log.error('Failed configure password policy: ' + e.message['desc']) + assert False + + # + # Add an aci to allow everyone all access (just makes things easier) + # + topology.standalone.log.info("Add aci to allow password admin to add/update entries...") + + ACI_TARGET = "(target = \"ldap:///%s\")" % SUFFIX + ACI_TARGETATTR = "(targetattr = *)" + ACI_ALLOW = "(version 3.0; acl \"Password Admin Access\"; allow (all) " + ACI_SUBJECT = "(userdn = \"ldap:///anyone\");)" + ACI_BODY = ACI_TARGET + ACI_TARGETATTR + ACI_ALLOW + ACI_SUBJECT + mod = [(ldap.MOD_ADD, 'aci', ACI_BODY)] + try: + topology.standalone.modify_s(SUFFIX, mod) + except ldap.LDAPError as e: + topology.standalone.log.error('Failed to add aci for password admin: ' + e.message['desc']) + assert False + + # + # Bind as the Password Admin + # + topology.standalone.log.info("Bind as the Password Administator (before activating)...") + try: + topology.standalone.simple_bind_s(ADMIN_DN, ADMIN_PWD) + except ldap.LDAPError as e: + topology.standalone.log.error('Failed to bind as the Password Admin: ' + e.message['desc']) + assert False + + # + # Setup our test entry, and test password policy is working + # + entry = Entry(ENTRY_DN) + entry.setValues('objectclass', 'top', 'person') + entry.setValues('sn', ENTRY_NAME) + entry.setValues('cn', ENTRY_NAME) + + # + # Start by attempting to add an entry with an invalid password + # + topology.standalone.log.info("Attempt to add entries with invalid passwords, these adds should fail...") + for passwd in INVALID_PWDS: + failed_as_expected = False + entry.setValues('userpassword', passwd) + topology.standalone.log.info("Create a regular user entry %s with password (%s)..." % (ENTRY_DN, passwd)) + try: + topology.standalone.add_s(entry) + except ldap.LDAPError as e: + # We failed as expected + failed_as_expected = True + topology.standalone.log.info('Add failed as expected: password (%s) result (%s)' + % (passwd, e.message['desc'])) + + if not failed_as_expected: + topology.standalone.log.error("We were incorrectly able to add an entry " + + "with an invalid password (%s)" % (passwd)) + assert False + + # + # Now activate a password administator, bind as root dn to do the config + # update, then rebind as the password admin + # + topology.standalone.log.info("Activate the Password Administator...") + + # Bind as Root DN + try: + topology.standalone.simple_bind_s(DN_DM, PASSWORD) + except ldap.LDAPError as e: + topology.standalone.log.error('Root DN failed to authenticate: ' + e.message['desc']) + assert False + + # Update config + try: + topology.standalone.modify_s(CONFIG_DN, [(ldap.MOD_REPLACE, 'passwordAdminDN', ADMIN_DN)]) + except ldap.LDAPError as e: + topology.standalone.log.error('Failed to add password admin to config: ' + e.message['desc']) + assert False + + # Bind as Password Admin + try: + topology.standalone.simple_bind_s(ADMIN_DN, ADMIN_PWD) + except ldap.LDAPError as e: + topology.standalone.log.error('Failed to bind as the Password Admin: ' + e.message['desc']) + assert False + + # + # Start adding entries with invalid passwords, delete the entry after each pass. + # + for passwd in INVALID_PWDS: + entry.setValues('userpassword', passwd) + topology.standalone.log.info("Create a regular user entry %s with password (%s)..." % (ENTRY_DN, passwd)) + try: + topology.standalone.add_s(entry) + except ldap.LDAPError as e: + topology.standalone.log.error('Failed to add entry with password (%s) result (%s)' + % (passwd, e.message['desc'])) + assert False + + topology.standalone.log.info('Succesfully added entry (%s)' % ENTRY_DN) + + # Delete entry for the next pass + try: + topology.standalone.delete_s(ENTRY_DN) + except ldap.LDAPError as e: + topology.standalone.log.error('Failed to delete entry: %s' % (e.message['desc'])) + assert False + + # + # Add the entry for the next round of testing (modify password) + # + entry.setValues('userpassword', ADMIN_PWD) + try: + topology.standalone.add_s(entry) + except ldap.LDAPError as e: + topology.standalone.log.error('Failed to add entry with valid password (%s) result (%s)' + % (passwd, e.message['desc'])) + assert False + + # + # Deactivate the password admin and make sure invalid password updates fail + # + topology.standalone.log.info("Deactivate Password Administator and try invalid password updates...") + + # Bind as root DN + try: + topology.standalone.simple_bind_s(DN_DM, PASSWORD) + except ldap.LDAPError as e: + topology.standalone.log.error('Root DN failed to authenticate: ' + e.message['desc']) + assert False + + # Update config + try: + topology.standalone.modify_s(CONFIG_DN, [(ldap.MOD_DELETE, 'passwordAdminDN', None)]) + except ldap.LDAPError as e: + topology.standalone.log.error('Failed to remove password admin from config: ' + e.message['desc']) + assert False + + # Bind as Password Admin + try: + topology.standalone.simple_bind_s(ADMIN_DN, ADMIN_PWD) + except ldap.LDAPError as e: + topology.standalone.log.error('Failed to bind as the Password Admin: ' + e.message['desc']) + assert False + + # + # Make invalid password updates that should fail + # + for passwd in INVALID_PWDS: + failed_as_expected = False + entry.setValues('userpassword', passwd) + try: + topology.standalone.modify_s(ENTRY_DN, [(ldap.MOD_REPLACE, 'userpassword', passwd)]) + except ldap.LDAPError as e: + # We failed as expected + failed_as_expected = True + topology.standalone.log.info('Password update failed as expected: password (%s) result (%s)' + % (passwd, e.message['desc'])) + + if not failed_as_expected: + topology.standalone.log.error("We were incorrectly able to add an invalid password (%s)" + % (passwd)) + assert False + + # + # Now activate a password administator + # + topology.standalone.log.info("Activate Password Administator and try updates again...") + + # Bind as root DN + try: + topology.standalone.simple_bind_s(DN_DM, PASSWORD) + except ldap.LDAPError as e: + topology.standalone.log.error('Root DN failed to authenticate: ' + e.message['desc']) + assert False + + # Update config + try: + topology.standalone.modify_s(CONFIG_DN, [(ldap.MOD_REPLACE, 'passwordAdminDN', ADMIN_DN)]) + except ldap.LDAPError as e: + topology.standalone.log.error('Failed to add password admin to config: ' + e.message['desc']) + assert False + + # Bind as Password Admin + try: + topology.standalone.simple_bind_s(ADMIN_DN, ADMIN_PWD) + except ldap.LDAPError as e: + topology.standalone.log.error('Failed to bind as the Password Admin: ' + e.message['desc']) + assert False + + # + # Make the same password updates, but this time they should succeed + # + for passwd in INVALID_PWDS: + entry.setValues('userpassword', passwd) + try: + topology.standalone.modify_s(ENTRY_DN, [(ldap.MOD_REPLACE, 'userpassword', passwd)]) + except ldap.LDAPError as e: + topology.standalone.log.error('Password update failed unexpectedly: password (%s) result (%s)' + % (passwd, e.message['desc'])) + assert False + topology.standalone.log.info('Password update succeeded (%s)' % passwd) + + +def test_ticket47900_final(topology): + topology.standalone.delete() + log.info('Testcase PASSED') + + +def run_isolated(): + ''' + run_isolated is used to run these test cases independently of a test scheduler (xunit, py.test..) + To run isolated without py.test, you need to + - edit this file and comment '@pytest.fixture' line before 'topology' function. + - set the installation prefix + - run this program + ''' + global installation_prefix + installation_prefix = None + + topo = topology(True) + test_ticket47900(topo) + test_ticket47900_final(topo) + +if __name__ == '__main__': + run_isolated() diff --git a/dirsrvtests/tests/tickets/ticket47910_test.py b/dirsrvtests/tests/tickets/ticket47910_test.py new file mode 100644 index 0000000..afcfd88 --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket47910_test.py @@ -0,0 +1,205 @@ +# Copyright (C) 2015 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import os +import sys +import time +import ldap +import logging +import pytest +import re +import subprocess +from lib389 import DirSrv, Entry, tools, tasks +from lib389.tools import DirSrvTools +from lib389._constants import * +from lib389.properties import * +from lib389.tasks import * +from datetime import datetime, timedelta + + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + +installation1_prefix = None + + +class TopologyStandalone(object): + def __init__(self, standalone): + standalone.open() + self.standalone = standalone + + +@pytest.fixture(scope="module") +def topology(request): + global installation1_prefix + if installation1_prefix: + args_instance[SER_DEPLOYED_DIR] = installation1_prefix + + # Creating standalone instance ... + standalone = DirSrv(verbose=False) + args_instance[SER_HOST] = HOST_STANDALONE + args_instance[SER_PORT] = PORT_STANDALONE + args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE + args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX + args_standalone = args_instance.copy() + standalone.allocate(args_standalone) + instance_standalone = standalone.exists() + if instance_standalone: + standalone.delete() + standalone.create() + standalone.open() + + # Clear out the tmp dir + standalone.clearTmpDir(__file__) + + def fin(): + standalone.delete() + request.addfinalizer(fin) + + return TopologyStandalone(standalone) + + +@pytest.fixture(scope="module") +def log_dir(topology): + ''' + Do a search operation + and disable access log buffering + to generate the access log + ''' + + log.info("Diable access log buffering") + topology.standalone.setAccessLogBuffering(False) + + log.info("Do a ldapsearch operation") + topology.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, "(objectclass=*)") + + log.info("sleep for sometime so that access log file get generated") + time.sleep( 1 ) + + return topology.standalone.accesslog + + +def format_time(local_datetime): + formatted_time = (local_datetime.strftime("[%d/%b/%Y:%H:%M:%S]")) + return formatted_time + + +def execute_logconv(start_time_stamp, end_time_stamp, access_log): + ''' + This function will take start time and end time + as input parameter and + assign these values to -S and -E options of logconv + and, it will execute logconv and return result value + ''' + + log.info("Executing logconv.pl with -S current time and -E end time") + cmd = ['logconv.pl', '-S', start_time_stamp, '-E', end_time_stamp, access_log] + log.info(" ".join(cmd)) + proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + stdout, stderr = proc.communicate() + log.info("standard output" + stdout) + log.info("standard errors" + stderr) + return proc.returncode + + +def test_ticket47910_logconv_start_end_positive(topology, log_dir): + ''' + Execute logconv.pl with -S and -E(endtime) with random time stamp + This is execute successfully + ''' + # + # Execute logconv.pl -S -E with random timestamp + # + log.info('Running test_ticket47910 - Execute logconv.pl -S -E with random values') + + log.info("taking current time with offset of 2 mins and formatting it to feed -S") + start_time_stamp = (datetime.now() - timedelta(minutes=2)) + formatted_start_time_stamp = format_time(start_time_stamp) + + log.info("taking current time with offset of 2 mins and formatting it to feed -E") + end_time_stamp = (datetime.now() + timedelta(minutes=2)) + formatted_end_time_stamp = format_time(end_time_stamp) + + log.info("Executing logconv.pl with -S and -E") + result = execute_logconv(formatted_start_time_stamp, formatted_end_time_stamp, log_dir) + assert result == 0 + + +def test_ticket47910_logconv_start_end_negative(topology, log_dir): + ''' + Execute logconv.pl with -S and -E(endtime) with random time stamp + This is a negative test case, where endtime will be lesser than the + starttime + This should give error message + ''' + + # + # Execute logconv.pl -S and -E with random timestamp + # + log.info('Running test_ticket47910 - Execute logconv.pl -S -E with starttime>endtime') + + log.info("taking current time with offset of 2 mins and formatting it to feed -S") + start_time_stamp = (datetime.now() + timedelta(minutes=2)) + formatted_start_time_stamp = format_time(start_time_stamp) + + log.info("taking current time with offset of 2 mins and formatting it to feed -E") + end_time_stamp = (datetime.now() - timedelta(minutes=2)) + formatted_end_time_stamp = format_time(end_time_stamp) + + log.info("Executing logconv.pl with -S and -E") + result = execute_logconv(formatted_start_time_stamp, formatted_end_time_stamp, log_dir) + assert result == 1 + + +def test_ticket47910_logconv_start_end_invalid(topology, log_dir): + ''' + Execute logconv.pl with -S and -E(endtime) with invalid time stamp + This is a negative test case, where it should give error message + ''' + # + # Execute logconv.pl -S and -E with invalid timestamp + # + log.info('Running test_ticket47910 - Execute logconv.pl -S -E with invalid timestamp') + log.info("Set start time and end time to invalid values") + start_time_stamp = "invalid" + end_time_stamp = "invalid" + + log.info("Executing logconv.pl with -S and -E") + result = execute_logconv(start_time_stamp, end_time_stamp, log_dir) + assert result == 1 + + +def test_ticket47910_logconv_noaccesslogs(topology, log_dir): + + ''' + Execute logconv.pl -S(starttime) without specify + access logs location + ''' + + # + # Execute logconv.pl -S with random timestamp and no access log location + # + log.info('Running test_ticket47910 - Execute logconv.pl without access logs') + + log.info("taking current time with offset of 2 mins and formatting it to feed -S") + time_stamp = (datetime.now() - timedelta(minutes=2)) + formatted_time_stamp = format_time(time_stamp) + log.info("Executing logconv.pl with -S current time") + cmd = ['logconv.pl', '-S', formatted_time_stamp] + log.info(" ".join(cmd)) + proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + stdout, stderr = proc.communicate() + log.info("standard output" + stdout) + log.info("standard errors" + stderr) + + assert proc.returncode == 1 + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + pytest.main("-s ticket47910_test.py") diff --git a/dirsrvtests/tests/tickets/ticket47920_test.py b/dirsrvtests/tests/tickets/ticket47920_test.py new file mode 100644 index 0000000..d4f6a53 --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket47920_test.py @@ -0,0 +1,194 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2015 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import os +import sys +import time +import ldap +import logging +import pytest +from lib389 import DirSrv, Entry, tools +from lib389.tools import DirSrvTools +from lib389._constants import * +from lib389.properties import * +from ldap.controls.readentry import PreReadControl,PostReadControl + + +SCOPE_IN_CN = 'in' +SCOPE_OUT_CN = 'out' +SCOPE_IN_DN = 'cn=%s,%s' % (SCOPE_IN_CN, SUFFIX) +SCOPE_OUT_DN = 'cn=%s,%s' % (SCOPE_OUT_CN, SUFFIX) + +PROVISIONING_CN = "provisioning" +PROVISIONING_DN = "cn=%s,%s" % (PROVISIONING_CN, SCOPE_IN_DN) + +ACTIVE_CN = "accounts" +STAGE_CN = "staged users" +DELETE_CN = "deleted users" +ACTIVE_DN = "cn=%s,%s" % (ACTIVE_CN, SCOPE_IN_DN) +STAGE_DN = "cn=%s,%s" % (STAGE_CN, PROVISIONING_DN) +DELETE_DN = "cn=%s,%s" % (DELETE_CN, PROVISIONING_DN) + +STAGE_USER_CN = "stage guy" +STAGE_USER_DN = "cn=%s,%s" % (STAGE_USER_CN, STAGE_DN) + +ACTIVE_USER_CN = "active guy" +ACTIVE_USER_DN = "cn=%s,%s" % (ACTIVE_USER_CN, ACTIVE_DN) + +OUT_USER_CN = "out guy" +OUT_USER_DN = "cn=%s,%s" % (OUT_USER_CN, SCOPE_OUT_DN) + +STAGE_GROUP_CN = "stage group" +STAGE_GROUP_DN = "cn=%s,%s" % (STAGE_GROUP_CN, STAGE_DN) + +ACTIVE_GROUP_CN = "active group" +ACTIVE_GROUP_DN = "cn=%s,%s" % (ACTIVE_GROUP_CN, ACTIVE_DN) + +OUT_GROUP_CN = "out group" +OUT_GROUP_DN = "cn=%s,%s" % (OUT_GROUP_CN, SCOPE_OUT_DN) + +INDIRECT_ACTIVE_GROUP_CN = "indirect active group" +INDIRECT_ACTIVE_GROUP_DN = "cn=%s,%s" % (INDIRECT_ACTIVE_GROUP_CN, ACTIVE_DN) + +INITIAL_DESC = "inital description" +FINAL_DESC = "final description" + +log = logging.getLogger(__name__) + +installation_prefix = None + + +class TopologyStandalone(object): + def __init__(self, standalone): + standalone.open() + self.standalone = standalone + + +@pytest.fixture(scope="module") +def topology(request): + ''' + This fixture is used to standalone topology for the 'module'. + ''' + global installation_prefix + + if installation_prefix: + args_instance[SER_DEPLOYED_DIR] = installation_prefix + + standalone = DirSrv(verbose=False) + + # Args for the standalone instance + args_instance[SER_HOST] = HOST_STANDALONE + args_instance[SER_PORT] = PORT_STANDALONE + args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE + args_standalone = args_instance.copy() + standalone.allocate(args_standalone) + + # Get the status of the instance and restart it if it exists + instance_standalone = standalone.exists() + + # Remove the instance + if instance_standalone: + standalone.delete() + + # Create the instance + standalone.create() + + # Used to retrieve configuration information (dbdir, confdir...) + standalone.open() + + # clear the tmp directory + standalone.clearTmpDir(__file__) + + # Here we have standalone instance up and running + return TopologyStandalone(standalone) + + +def _header(topology, label): + topology.standalone.log.info("\n\n###############################################") + topology.standalone.log.info("#######") + topology.standalone.log.info("####### %s" % label) + topology.standalone.log.info("#######") + topology.standalone.log.info("###############################################") + + +def _add_user(topology, type='active'): + if type == 'active': + topology.standalone.add_s(Entry((ACTIVE_USER_DN, { + 'objectclass': "top person inetuser".split(), + 'sn': ACTIVE_USER_CN, + 'cn': ACTIVE_USER_CN, + 'description': INITIAL_DESC}))) + elif type == 'stage': + topology.standalone.add_s(Entry((STAGE_USER_DN, { + 'objectclass': "top person inetuser".split(), + 'sn': STAGE_USER_CN, + 'cn': STAGE_USER_CN}))) + else: + topology.standalone.add_s(Entry((OUT_USER_DN, { + 'objectclass': "top person inetuser".split(), + 'sn': OUT_USER_CN, + 'cn': OUT_USER_CN}))) + + +def test_ticket47920_init(topology): + topology.standalone.add_s(Entry((SCOPE_IN_DN, { + 'objectclass': "top nscontainer".split(), + 'cn': SCOPE_IN_DN}))) + topology.standalone.add_s(Entry((ACTIVE_DN, { + 'objectclass': "top nscontainer".split(), + 'cn': ACTIVE_CN}))) + + # add users + _add_user(topology, 'active') + + +def test_ticket47920_mod_readentry_ctrl(topology): + _header(topology, 'MOD: with a readentry control') + + topology.standalone.log.info("Check the initial value of the entry") + ent = topology.standalone.getEntry(ACTIVE_USER_DN, ldap.SCOPE_BASE, "(objectclass=*)", ['description']) + assert ent.hasAttr('description') + assert ent.getValue('description') == INITIAL_DESC + + pr = PostReadControl(criticality=True, attrList=['cn', 'description']) + _, _, _, resp_ctrls = topology.standalone.modify_ext_s(ACTIVE_USER_DN, [(ldap.MOD_REPLACE, 'description', [FINAL_DESC])], serverctrls=[pr]) + + assert resp_ctrls[0].dn == ACTIVE_USER_DN + assert 'description' in resp_ctrls[0].entry + assert 'cn' in resp_ctrls[0].entry + print(resp_ctrls[0].entry['description']) + + ent = topology.standalone.getEntry(ACTIVE_USER_DN, ldap.SCOPE_BASE, "(objectclass=*)", ['description']) + assert ent.hasAttr('description') + assert ent.getValue('description') == FINAL_DESC + + +def test_ticket47920_final(topology): + topology.standalone.delete() + log.info('Testcase PASSED') + + +def run_isolated(): + ''' + run_isolated is used to run these test cases independently of a test scheduler (xunit, py.test..) + To run isolated without py.test, you need to + - edit this file and comment '@pytest.fixture' line before 'topology' function. + - set the installation prefix + - run this program + ''' + global installation_prefix + installation_prefix = None + + topo = topology(True) + test_ticket47920_init(topo) + test_ticket47920_mod_readentry_ctrl(topo) + test_ticket47920_final(topo) + + +if __name__ == '__main__': + run_isolated() diff --git a/dirsrvtests/tests/tickets/ticket47921_test.py b/dirsrvtests/tests/tickets/ticket47921_test.py new file mode 100644 index 0000000..4f3d54e --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket47921_test.py @@ -0,0 +1,163 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2015 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import os +import sys +import time +import ldap +import logging +import pytest +from lib389 import DirSrv, Entry, tools, tasks +from lib389.tools import DirSrvTools +from lib389._constants import * +from lib389.properties import * +from lib389.tasks import * +from lib389.utils import * + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + +installation1_prefix = None + + +class TopologyStandalone(object): + def __init__(self, standalone): + standalone.open() + self.standalone = standalone + + +@pytest.fixture(scope="module") +def topology(request): + global installation1_prefix + if installation1_prefix: + args_instance[SER_DEPLOYED_DIR] = installation1_prefix + + # Creating standalone instance ... + standalone = DirSrv(verbose=False) + args_instance[SER_HOST] = HOST_STANDALONE + args_instance[SER_PORT] = PORT_STANDALONE + args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE + args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX + args_standalone = args_instance.copy() + standalone.allocate(args_standalone) + instance_standalone = standalone.exists() + if instance_standalone: + standalone.delete() + standalone.create() + standalone.open() + + # Clear out the tmp dir + standalone.clearTmpDir(__file__) + + return TopologyStandalone(standalone) + + +def test_ticket47921(topology): + ''' + Test that indirect cos reflects the current value of the indirect entry + ''' + + INDIRECT_COS_DN = 'cn=cos definition,' + DEFAULT_SUFFIX + MANAGER_DN = 'uid=my manager,ou=people,' + DEFAULT_SUFFIX + USER_DN = 'uid=user,ou=people,' + DEFAULT_SUFFIX + + # Add COS definition + try: + topology.standalone.add_s(Entry((INDIRECT_COS_DN, + {'objectclass': 'top cosSuperDefinition cosIndirectDefinition ldapSubEntry'.split(), + 'cosIndirectSpecifier': 'manager', + 'cosAttribute': 'roomnumber' + }))) + except ldap.LDAPError as e: + log.fatal('Failed to add cos defintion, error: ' + e.message['desc']) + assert False + + # Add manager entry + try: + topology.standalone.add_s(Entry((MANAGER_DN, + {'objectclass': 'top extensibleObject'.split(), + 'uid': 'my manager', + 'roomnumber': '1' + }))) + except ldap.LDAPError as e: + log.fatal('Failed to add manager entry, error: ' + e.message['desc']) + assert False + + # Add user entry + try: + topology.standalone.add_s(Entry((USER_DN, + {'objectclass': 'top person organizationalPerson inetorgperson'.split(), + 'sn': 'last', + 'cn': 'full', + 'givenname': 'mark', + 'uid': 'user', + 'manager': MANAGER_DN + }))) + except ldap.LDAPError as e: + log.fatal('Failed to add manager entry, error: ' + e.message['desc']) + assert False + + # Test COS is working + try: + entry = topology.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, + "uid=user", + ['roomnumber']) + if entry: + if entry[0].getValue('roomnumber') != '1': + log.fatal('COS is not working.') + assert False + else: + log.fatal('Failed to find user entry') + assert False + except ldap.LDAPError as e: + log.error('Failed to search for user entry: ' + e.message['desc']) + assert False + + # Modify manager entry + try: + topology.standalone.modify_s(MANAGER_DN, [(ldap.MOD_REPLACE, 'roomnumber', '2')]) + except ldap.LDAPError as e: + log.error('Failed to modify manager entry: ' + e.message['desc']) + assert False + + # Confirm COS is returning the new value + try: + entry = topology.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, + "uid=user", + ['roomnumber']) + if entry: + if entry[0].getValue('roomnumber') != '2': + log.fatal('COS is not working after manager update.') + assert False + else: + log.fatal('Failed to find user entry') + assert False + except ldap.LDAPError as e: + log.error('Failed to search for user entry: ' + e.message['desc']) + assert False + + log.info('Test complete') + + +def test_ticket47921_final(topology): + topology.standalone.delete() + log.info('Testcase PASSED') + + +def run_isolated(): + global installation1_prefix + installation1_prefix = None + + topo = topology(True) + test_ticket47921(topo) + test_ticket47921_final(topo) + + +if __name__ == '__main__': + run_isolated() + diff --git a/dirsrvtests/tests/tickets/ticket47927_test.py b/dirsrvtests/tests/tickets/ticket47927_test.py new file mode 100644 index 0000000..78e0b29 --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket47927_test.py @@ -0,0 +1,313 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2015 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import os +import sys +import time +import ldap +import logging +import pytest +from lib389 import DirSrv, Entry, tools, tasks +from lib389.tools import DirSrvTools +from lib389._constants import * +from lib389.properties import * +from lib389.tasks import * +from lib389.utils import * + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + +installation1_prefix = None + +EXCLUDED_CONTAINER_CN = "excluded_container" +EXCLUDED_CONTAINER_DN = "cn=%s,%s" % (EXCLUDED_CONTAINER_CN, SUFFIX) + +EXCLUDED_BIS_CONTAINER_CN = "excluded_bis_container" +EXCLUDED_BIS_CONTAINER_DN = "cn=%s,%s" % (EXCLUDED_BIS_CONTAINER_CN, SUFFIX) + +ENFORCED_CONTAINER_CN = "enforced_container" +ENFORCED_CONTAINER_DN = "cn=%s,%s" % (ENFORCED_CONTAINER_CN, SUFFIX) + +USER_1_CN = "test_1" +USER_1_DN = "cn=%s,%s" % (USER_1_CN, ENFORCED_CONTAINER_DN) +USER_2_CN = "test_2" +USER_2_DN = "cn=%s,%s" % (USER_2_CN, ENFORCED_CONTAINER_DN) +USER_3_CN = "test_3" +USER_3_DN = "cn=%s,%s" % (USER_3_CN, EXCLUDED_CONTAINER_DN) +USER_4_CN = "test_4" +USER_4_DN = "cn=%s,%s" % (USER_4_CN, EXCLUDED_BIS_CONTAINER_DN) + + +class TopologyStandalone(object): + def __init__(self, standalone): + standalone.open() + self.standalone = standalone + + +@pytest.fixture(scope="module") +def topology(request): + global installation1_prefix + + # Creating standalone instance ... + standalone = DirSrv(verbose=False) + if installation1_prefix: + args_instance[SER_DEPLOYED_DIR] = installation1_prefix + args_instance[SER_HOST] = HOST_STANDALONE + args_instance[SER_PORT] = PORT_STANDALONE + args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE + args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX + args_standalone = args_instance.copy() + standalone.allocate(args_standalone) + instance_standalone = standalone.exists() + if instance_standalone: + standalone.delete() + standalone.create() + standalone.open() + + # Clear out the tmp dir + standalone.clearTmpDir(__file__) + + return TopologyStandalone(standalone) + + +def test_ticket47927_init(topology): + topology.standalone.plugins.enable(name=PLUGIN_ATTR_UNIQUENESS) + try: + topology.standalone.modify_s('cn=' + PLUGIN_ATTR_UNIQUENESS + ',cn=plugins,cn=config', + [(ldap.MOD_REPLACE, 'uniqueness-attribute-name', 'telephonenumber'), + (ldap.MOD_REPLACE, 'uniqueness-subtrees', DEFAULT_SUFFIX), + ]) + except ldap.LDAPError as e: + log.fatal('test_ticket47927: Failed to configure plugin for "telephonenumber": error ' + e.message['desc']) + assert False + topology.standalone.restart(timeout=120) + + topology.standalone.add_s(Entry((EXCLUDED_CONTAINER_DN, {'objectclass': "top nscontainer".split(), + 'cn': EXCLUDED_CONTAINER_CN}))) + topology.standalone.add_s(Entry((EXCLUDED_BIS_CONTAINER_DN, {'objectclass': "top nscontainer".split(), + 'cn': EXCLUDED_BIS_CONTAINER_CN}))) + topology.standalone.add_s(Entry((ENFORCED_CONTAINER_DN, {'objectclass': "top nscontainer".split(), + 'cn': ENFORCED_CONTAINER_CN}))) + + # adding an entry on a stage with a different 'cn' + topology.standalone.add_s(Entry((USER_1_DN, { + 'objectclass': "top person".split(), + 'sn': USER_1_CN, + 'cn': USER_1_CN}))) + # adding an entry on a stage with a different 'cn' + topology.standalone.add_s(Entry((USER_2_DN, { + 'objectclass': "top person".split(), + 'sn': USER_2_CN, + 'cn': USER_2_CN}))) + topology.standalone.add_s(Entry((USER_3_DN, { + 'objectclass': "top person".split(), + 'sn': USER_3_CN, + 'cn': USER_3_CN}))) + topology.standalone.add_s(Entry((USER_4_DN, { + 'objectclass': "top person".split(), + 'sn': USER_4_CN, + 'cn': USER_4_CN}))) + + +def test_ticket47927_one(topology): + ''' + Check that uniqueness is enforce on all SUFFIX + ''' + UNIQUE_VALUE='1234' + try: + topology.standalone.modify_s(USER_1_DN, + [(ldap.MOD_REPLACE, 'telephonenumber', UNIQUE_VALUE)]) + except ldap.LDAPError as e: + log.fatal('test_ticket47927_one: Failed to set the telephonenumber for %s: %s' % (USER_1_DN, e.message['desc'])) + assert False + + # we expect to fail because user1 is in the scope of the plugin + try: + topology.standalone.modify_s(USER_2_DN, + [(ldap.MOD_REPLACE, 'telephonenumber', UNIQUE_VALUE)]) + log.fatal('test_ticket47927_one: unexpected success to set the telephonenumber for %s' % (USER_2_DN)) + assert False + except ldap.LDAPError as e: + log.fatal('test_ticket47927_one: Failed (expected) to set the telephonenumber for %s: %s' % (USER_2_DN, e.message['desc'])) + pass + + + # we expect to fail because user1 is in the scope of the plugin + try: + topology.standalone.modify_s(USER_3_DN, + [(ldap.MOD_REPLACE, 'telephonenumber', UNIQUE_VALUE)]) + log.fatal('test_ticket47927_one: unexpected success to set the telephonenumber for %s' % (USER_3_DN)) + assert False + except ldap.LDAPError as e: + log.fatal('test_ticket47927_one: Failed (expected) to set the telephonenumber for %s: %s' % (USER_3_DN, e.message['desc'])) + pass + + +def test_ticket47927_two(topology): + ''' + Exclude the EXCLUDED_CONTAINER_DN from the uniqueness plugin + ''' + try: + topology.standalone.modify_s('cn=' + PLUGIN_ATTR_UNIQUENESS + ',cn=plugins,cn=config', + [(ldap.MOD_REPLACE, 'uniqueness-exclude-subtrees', EXCLUDED_CONTAINER_DN)]) + except ldap.LDAPError as e: + log.fatal('test_ticket47927_two: Failed to configure plugin for to exclude %s: error %s' % (EXCLUDED_CONTAINER_DN, e.message['desc'])) + assert False + topology.standalone.restart(timeout=120) + + +def test_ticket47927_three(topology): + ''' + Check that uniqueness is enforced on full SUFFIX except EXCLUDED_CONTAINER_DN + First case: it exists an entry (with the same attribute value) in the scope + of the plugin and we set the value in an entry that is in an excluded scope + ''' + UNIQUE_VALUE='9876' + try: + topology.standalone.modify_s(USER_1_DN, + [(ldap.MOD_REPLACE, 'telephonenumber', UNIQUE_VALUE)]) + except ldap.LDAPError as e: + log.fatal('test_ticket47927_three: Failed to set the telephonenumber ' + e.message['desc']) + assert False + + # we should not be allowed to set this value (because user1 is in the scope) + try: + topology.standalone.modify_s(USER_2_DN, + [(ldap.MOD_REPLACE, 'telephonenumber', UNIQUE_VALUE)]) + log.fatal('test_ticket47927_three: unexpected success to set the telephonenumber for %s' % (USER_2_DN)) + assert False + except ldap.LDAPError as e: + log.fatal('test_ticket47927_three: Failed (expected) to set the telephonenumber for %s: %s' % (USER_2_DN , e.message['desc'])) + + + # USER_3_DN is in EXCLUDED_CONTAINER_DN so update should be successful + try: + topology.standalone.modify_s(USER_3_DN, + [(ldap.MOD_REPLACE, 'telephonenumber', UNIQUE_VALUE)]) + log.fatal('test_ticket47927_three: success to set the telephonenumber for %s' % (USER_3_DN)) + except ldap.LDAPError as e: + log.fatal('test_ticket47927_three: Failed (unexpected) to set the telephonenumber for %s: %s' % (USER_3_DN, e.message['desc'])) + assert False + + +def test_ticket47927_four(topology): + ''' + Check that uniqueness is enforced on full SUFFIX except EXCLUDED_CONTAINER_DN + Second case: it exists an entry (with the same attribute value) in an excluded scope + of the plugin and we set the value in an entry is in the scope + ''' + UNIQUE_VALUE='1111' + # USER_3_DN is in EXCLUDED_CONTAINER_DN so update should be successful + try: + topology.standalone.modify_s(USER_3_DN, + [(ldap.MOD_REPLACE, 'telephonenumber', UNIQUE_VALUE)]) + log.fatal('test_ticket47927_four: success to set the telephonenumber for %s' % USER_3_DN) + except ldap.LDAPError as e: + log.fatal('test_ticket47927_four: Failed (unexpected) to set the telephonenumber for %s: %s' % (USER_3_DN, e.message['desc'])) + assert False + + + # we should be allowed to set this value (because user3 is excluded from scope) + try: + topology.standalone.modify_s(USER_1_DN, + [(ldap.MOD_REPLACE, 'telephonenumber', UNIQUE_VALUE)]) + except ldap.LDAPError as e: + log.fatal('test_ticket47927_four: Failed to set the telephonenumber for %s: %s' % (USER_1_DN, e.message['desc'])) + assert False + + # we should not be allowed to set this value (because user1 is in the scope) + try: + topology.standalone.modify_s(USER_2_DN, + [(ldap.MOD_REPLACE, 'telephonenumber', UNIQUE_VALUE)]) + log.fatal('test_ticket47927_four: unexpected success to set the telephonenumber %s' % USER_2_DN) + assert False + except ldap.LDAPError as e: + log.fatal('test_ticket47927_four: Failed (expected) to set the telephonenumber for %s: %s' % (USER_2_DN, e.message['desc'])) + pass + + +def test_ticket47927_five(topology): + ''' + Exclude the EXCLUDED_BIS_CONTAINER_DN from the uniqueness plugin + ''' + try: + topology.standalone.modify_s('cn=' + PLUGIN_ATTR_UNIQUENESS + ',cn=plugins,cn=config', + [(ldap.MOD_ADD, 'uniqueness-exclude-subtrees', EXCLUDED_BIS_CONTAINER_DN)]) + except ldap.LDAPError as e: + log.fatal('test_ticket47927_five: Failed to configure plugin for to exclude %s: error %s' % (EXCLUDED_BIS_CONTAINER_DN, e.message['desc'])) + assert False + topology.standalone.restart(timeout=120) + topology.standalone.getEntry('cn=' + PLUGIN_ATTR_UNIQUENESS + ',cn=plugins,cn=config', ldap.SCOPE_BASE) + + +def test_ticket47927_six(topology): + ''' + Check that uniqueness is enforced on full SUFFIX except EXCLUDED_CONTAINER_DN + and EXCLUDED_BIS_CONTAINER_DN + First case: it exists an entry (with the same attribute value) in the scope + of the plugin and we set the value in an entry that is in an excluded scope + ''' + UNIQUE_VALUE = '222' + try: + topology.standalone.modify_s(USER_1_DN, + [(ldap.MOD_REPLACE, 'telephonenumber', UNIQUE_VALUE)]) + except ldap.LDAPError as e: + log.fatal('test_ticket47927_six: Failed to set the telephonenumber ' + e.message['desc']) + assert False + + # we should not be allowed to set this value (because user1 is in the scope) + try: + topology.standalone.modify_s(USER_2_DN, + [(ldap.MOD_REPLACE, 'telephonenumber', UNIQUE_VALUE)]) + log.fatal('test_ticket47927_six: unexpected success to set the telephonenumber for %s' % (USER_2_DN)) + assert False + except ldap.LDAPError as e: + log.fatal('test_ticket47927_six: Failed (expected) to set the telephonenumber for %s: %s' % (USER_2_DN , e.message['desc'])) + + + # USER_3_DN is in EXCLUDED_CONTAINER_DN so update should be successful + try: + topology.standalone.modify_s(USER_3_DN, + [(ldap.MOD_REPLACE, 'telephonenumber', UNIQUE_VALUE)]) + log.fatal('test_ticket47927_six: success to set the telephonenumber for %s' % (USER_3_DN)) + except ldap.LDAPError as e: + log.fatal('test_ticket47927_six: Failed (unexpected) to set the telephonenumber for %s: %s' % (USER_3_DN, e.message['desc'])) + assert False + # USER_4_DN is in EXCLUDED_CONTAINER_DN so update should be successful + try: + topology.standalone.modify_s(USER_4_DN, + [(ldap.MOD_REPLACE, 'telephonenumber', UNIQUE_VALUE)]) + log.fatal('test_ticket47927_six: success to set the telephonenumber for %s' % (USER_4_DN)) + except ldap.LDAPError as e: + log.fatal('test_ticket47927_six: Failed (unexpected) to set the telephonenumber for %s: %s' % (USER_4_DN, e.message['desc'])) + assert False + + +def test_ticket47927_final(topology): + topology.standalone.delete() + log.info('Testcase PASSED') + + +def run_isolated(): + global installation1_prefix + installation1_prefix = None + + topo = topology(True) + test_ticket47927_init(topo) + test_ticket47927_one(topo) + test_ticket47927_two(topo) + test_ticket47927_three(topo) + test_ticket47927_four(topo) + test_ticket47927_five(topo) + test_ticket47927_six(topo) + test_ticket47927_final(topo) + + +if __name__ == '__main__': + run_isolated() + diff --git a/dirsrvtests/tests/tickets/ticket47931_test.py b/dirsrvtests/tests/tickets/ticket47931_test.py new file mode 100644 index 0000000..9aa54fc --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket47931_test.py @@ -0,0 +1,207 @@ +import os +import sys +import time +import ldap +import logging +import pytest +import threading +from lib389 import DirSrv, Entry, tools, tasks +from lib389.tools import DirSrvTools +from lib389._constants import * +from lib389.properties import * +from lib389.tasks import * +from lib389.utils import * + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + +installation1_prefix = None +SECOND_SUFFIX = "dc=deadlock" +SECOND_BACKEND = "deadlock" +RETROCL_PLUGIN_DN = ('cn=' + PLUGIN_RETRO_CHANGELOG + ',cn=plugins,cn=config') +MEMBEROF_PLUGIN_DN = ('cn=' + PLUGIN_MEMBER_OF + ',cn=plugins,cn=config') +GROUP_DN = ("cn=group," + DEFAULT_SUFFIX) +MEMBER_DN_COMP = "uid=member" +TIME_OUT = 5 + + +class TopologyStandalone(object): + def __init__(self, standalone): + standalone.open() + self.standalone = standalone + + +class modifySecondBackendThread(threading.Thread): + def __init__(self, inst, timeout): + threading.Thread.__init__(self) + self.daemon = True + self.inst = inst + self.timeout = timeout + + def run(self): + conn = self.inst.openConnection() + conn.set_option(ldap.OPT_TIMEOUT, self.timeout) + log.info('Modify second suffix...') + for x in range(0, 5000): + try: + conn.modify_s(SECOND_SUFFIX, + [(ldap.MOD_REPLACE, + 'description', + 'new description')]) + except ldap.LDAPError as e: + log.fatal('Failed to modify second suffix - error: %s' % + (e.message['desc'])) + assert False + + conn.close() + log.info('Finished modifying second suffix') + + +@pytest.fixture(scope="module") +def topology(request): + global installation1_prefix + if installation1_prefix: + args_instance[SER_DEPLOYED_DIR] = installation1_prefix + + # Creating standalone instance ... + standalone = DirSrv(verbose=False) + args_instance[SER_HOST] = HOST_STANDALONE + args_instance[SER_PORT] = PORT_STANDALONE + args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE + args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX + args_standalone = args_instance.copy() + standalone.allocate(args_standalone) + instance_standalone = standalone.exists() + if instance_standalone: + standalone.delete() + standalone.create() + standalone.open() + + # Delete each instance in the end + def fin(): + standalone.delete() + request.addfinalizer(fin) + + # Clear out the tmp dir + standalone.clearTmpDir(__file__) + + return TopologyStandalone(standalone) + + +def test_ticket47931(topology): + """Test Retro Changelog and MemberOf deadlock fix. + Verification steps: + - Enable retro cl and memberOf. + - Create two backends: A & B. + - Configure retrocl scoping for backend A. + - Configure memberOf plugin for uniquemember + - Create group in backend A. + - In parallel, add members to the group on A, and make modifications + to entries in backend B. + - Make sure the server does not hang during the updates to both + backends. + + """ + + # Enable dynamic plugins to make plugin configuration easier + try: + topology.standalone.modify_s(DN_CONFIG, + [(ldap.MOD_REPLACE, + 'nsslapd-dynamic-plugins', + 'on')]) + except ldap.LDAPError as e: + ldap.error('Failed to enable dynamic plugins! ' + e.message['desc']) + assert False + + # Enable the plugins + topology.standalone.plugins.enable(name=PLUGIN_MEMBER_OF) + topology.standalone.plugins.enable(name=PLUGIN_RETRO_CHANGELOG) + + # Create second backend + topology.standalone.backend.create(SECOND_SUFFIX, {BACKEND_NAME: SECOND_BACKEND}) + topology.standalone.mappingtree.create(SECOND_SUFFIX, bename=SECOND_BACKEND) + + # Create the root node of the second backend + try: + topology.standalone.add_s(Entry((SECOND_SUFFIX, + {'objectclass': 'top domain'.split(), + 'dc': 'deadlock'}))) + except ldap.LDAPError as e: + log.fatal('Failed to create suffix entry: error ' + e.message['desc']) + assert False + + # Configure retrocl scope + try: + topology.standalone.modify_s(RETROCL_PLUGIN_DN, + [(ldap.MOD_REPLACE, + 'nsslapd-include-suffix', + DEFAULT_SUFFIX)]) + except ldap.LDAPError as e: + ldap.error('Failed to configure retrocl plugin: ' + e.message['desc']) + assert False + + # Configure memberOf group attribute + try: + topology.standalone.modify_s(MEMBEROF_PLUGIN_DN, + [(ldap.MOD_REPLACE, + 'memberofgroupattr', + 'uniquemember')]) + except ldap.LDAPError as e: + log.fatal('Failed to configure memberOf plugin: error ' + e.message['desc']) + assert False + + # Create group + try: + topology.standalone.add_s(Entry((GROUP_DN, + {'objectclass': 'top extensibleObject'.split(), + 'cn': 'group'}))) + except ldap.LDAPError as e: + log.fatal('Failed to add grouo: error ' + e.message['desc']) + assert False + + # Create 1500 entries (future members of the group) + for idx in range(1, 1500): + try: + USER_DN = ("uid=member%d,%s" % (idx, DEFAULT_SUFFIX)) + topology.standalone.add_s(Entry((USER_DN, + {'objectclass': 'top extensibleObject'.split(), + 'uid': 'member%d' % (x)}))) + except ldap.LDAPError as e: + log.fatal('Failed to add user (%s): error %s' % (USER_DN, e.message['desc'])) + assert False + + # Modify second backend (separate thread) + mod_backend_thrd = modifySecondBackendThread(topology.standalone, TIME_OUT) + mod_backend_thrd.start() + + # Add members to the group - set timeout + log.info('Adding members to the group...') + topology.standalone.set_option(ldap.OPT_TIMEOUT, TIME_OUT) + for idx in range(1, 1500): + try: + MEMBER_VAL = ("uid=member%d,%s" % (idx, DEFAULT_SUFFIX)) + topology.standalone.modify_s(GROUP_DN, + [(ldap.MOD_ADD, + 'uniquemember', + MEMBER_VAL)]) + except ldap.TIMEOUT: + log.fatal('Deadlock! Bug verification failed.') + assert False + except ldap.LDAPError as e: + log.fatal('Failed to update group(not a deadlock) member (%s) - error: %s' % + (MEMBER_VAL, e.message['desc'])) + assert False + log.info('Finished adding members to the group.') + + # Wait for the thread to finish + mod_backend_thrd.join() + + # No timeout, test passed! + log.info('Test complete\n') + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) \ No newline at end of file diff --git a/dirsrvtests/tests/tickets/ticket47937_test.py b/dirsrvtests/tests/tickets/ticket47937_test.py new file mode 100644 index 0000000..6c09cf8 --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket47937_test.py @@ -0,0 +1,188 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2015 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import os +import sys +import time +import ldap +import logging +import pytest +from lib389 import DirSrv, Entry, tools +from lib389.tools import DirSrvTools +from lib389._constants import * +from lib389.properties import * + +log = logging.getLogger(__name__) + +installation_prefix = None + + +class TopologyStandalone(object): + def __init__(self, standalone): + standalone.open() + self.standalone = standalone + + +@pytest.fixture(scope="module") +def topology(request): + ''' + This fixture is used to standalone topology for the 'module'. + ''' + global installation_prefix + + if installation_prefix: + args_instance[SER_DEPLOYED_DIR] = installation_prefix + + standalone = DirSrv(verbose=False) + + # Args for the standalone instance + args_instance[SER_HOST] = HOST_STANDALONE + args_instance[SER_PORT] = PORT_STANDALONE + args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE + args_standalone = args_instance.copy() + standalone.allocate(args_standalone) + + # Get the status of the instance and restart it if it exists + instance_standalone = standalone.exists() + + # Remove the instance + if instance_standalone: + standalone.delete() + + # Create the instance + standalone.create() + + # Used to retrieve configuration information (dbdir, confdir...) + standalone.open() + + # clear the tmp directory + standalone.clearTmpDir(__file__) + + # Here we have standalone instance up and running + return TopologyStandalone(standalone) + + +def test_ticket47937(topology): + """ + Test that DNA plugin only accepts valid attributes for "dnaType" + """ + + log.info("Creating \"ou=people\"...") + try: + topology.standalone.add_s(Entry(('ou=people,' + SUFFIX, { + 'objectclass': 'top organizationalunit'.split(), + 'ou': 'people' + }))) + + except ldap.ALREADY_EXISTS: + pass + except ldap.LDAPError as e: + log.error('Failed to add ou=people org unit: error ' + e.message['desc']) + assert False + + log.info("Creating \"ou=ranges\"...") + try: + topology.standalone.add_s(Entry(('ou=ranges,' + SUFFIX, { + 'objectclass': 'top organizationalunit'.split(), + 'ou': 'ranges' + }))) + + except ldap.LDAPError as e: + log.error('Failed to add ou=ranges org unit: error ' + e.message['desc']) + assert False + + log.info("Creating \"cn=entry\"...") + try: + topology.standalone.add_s(Entry(('cn=entry,ou=people,' + SUFFIX, { + 'objectclass': 'top groupofuniquenames'.split(), + 'cn': 'entry' + }))) + + except ldap.LDAPError as e: + log.error('Failed to add test entry: error ' + e.message['desc']) + assert False + + log.info("Creating DNA shared config entry...") + try: + topology.standalone.add_s(Entry(('dnaHostname=localhost.localdomain+dnaPortNum=389,ou=ranges,%s' % SUFFIX, { + 'objectclass': 'top dnaSharedConfig'.split(), + 'dnaHostname': 'localhost.localdomain', + 'dnaPortNum': '389', + 'dnaSecurePortNum': '636', + 'dnaRemainingValues': '9501' + }))) + + except ldap.LDAPError as e: + log.error('Failed to add shared config entry: error ' + e.message['desc']) + assert False + + log.info("Add dna plugin config entry...") + try: + topology.standalone.add_s(Entry(('cn=dna config,cn=Distributed Numeric Assignment Plugin,cn=plugins,cn=config', { + 'objectclass': 'top dnaPluginConfig'.split(), + 'dnaType': 'description', + 'dnaMaxValue': '10000', + 'dnaMagicRegen': '0', + 'dnaFilter': '(objectclass=top)', + 'dnaScope': 'ou=people,%s' % SUFFIX, + 'dnaNextValue': '500', + 'dnaSharedCfgDN': 'ou=ranges,%s' % SUFFIX + }))) + + except ldap.LDAPError as e: + log.error('Failed to add DNA config entry: error ' + e.message['desc']) + assert False + + log.info("Enable the DNA plugin...") + try: + topology.standalone.plugins.enable(name=PLUGIN_DNA) + except e: + log.error("Failed to enable DNA Plugin: error " + e.message['desc']) + assert False + + log.info("Restarting the server...") + topology.standalone.stop(timeout=120) + time.sleep(1) + topology.standalone.start(timeout=120) + time.sleep(3) + + log.info("Apply an invalid attribute to the DNA config(dnaType: foo)...") + + try: + topology.standalone.modify_s('cn=dna config,cn=Distributed Numeric Assignment Plugin,cn=plugins,cn=config', + [(ldap.MOD_REPLACE, 'dnaType', 'foo')]) + except ldap.LDAPError as e: + log.info('Operation failed as expected (error: %s)' % e.message['desc']) + else: + log.error('Operation incorectly succeeded! Test Failed!') + assert False + + +def test_ticket47937_final(topology): + topology.standalone.delete() + log.info('Testcase PASSED') + + +def run_isolated(): + ''' + run_isolated is used to run these test cases independently of a test scheduler (xunit, py.test..) + To run isolated without py.test, you need to + - edit this file and comment '@pytest.fixture' line before 'topology' function. + - set the installation prefix + - run this program + ''' + global installation_prefix + installation_prefix = None + + topo = topology(True) + test_ticket47937(topo) + test_ticket47937_final(topo) + + +if __name__ == '__main__': + run_isolated() diff --git a/dirsrvtests/tests/tickets/ticket47950_test.py b/dirsrvtests/tests/tickets/ticket47950_test.py new file mode 100644 index 0000000..7226637 --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket47950_test.py @@ -0,0 +1,223 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2015 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import os +import sys +import time +import ldap +import logging +import pytest +from lib389 import DirSrv, Entry, tools, tasks +from lib389.tools import DirSrvTools +from lib389._constants import * +from lib389.properties import * +from lib389.tasks import * + +log = logging.getLogger(__name__) + +installation_prefix = None + +USER1_DN = "uid=user1,%s" % DEFAULT_SUFFIX +USER2_DN = "uid=user2,%s" % DEFAULT_SUFFIX + + +class TopologyStandalone(object): + def __init__(self, standalone): + standalone.open() + self.standalone = standalone + + +@pytest.fixture(scope="module") +def topology(request): + ''' + This fixture is used to standalone topology for the 'module'. + ''' + global installation_prefix + + if installation_prefix: + args_instance[SER_DEPLOYED_DIR] = installation_prefix + + standalone = DirSrv(verbose=False) + + # Args for the standalone instance + args_instance[SER_HOST] = HOST_STANDALONE + args_instance[SER_PORT] = PORT_STANDALONE + args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE + args_standalone = args_instance.copy() + standalone.allocate(args_standalone) + + # Get the status of the instance and restart it if it exists + instance_standalone = standalone.exists() + + # Remove the instance + if instance_standalone: + standalone.delete() + + # Create the instance + standalone.create() + + # Used to retrieve configuration information (dbdir, confdir...) + standalone.open() + + # clear the tmp directory + standalone.clearTmpDir(__file__) + + # Here we have standalone instance up and running + return TopologyStandalone(standalone) + + +def test_ticket47950(topology): + """ + Testing nsslapd-plugin-binddn-tracking does not cause issues around + access control and reconfiguring replication/repl agmt. + """ + + log.info('Testing Ticket 47950 - Testing nsslapd-plugin-binddn-tracking') + + # + # Turn on bind dn tracking + # + try: + topology.standalone.modify_s("cn=config", [(ldap.MOD_REPLACE, 'nsslapd-plugin-binddn-tracking', 'on')]) + log.info('nsslapd-plugin-binddn-tracking enabled.') + except ldap.LDAPError as e: + log.error('Failed to enable bind dn tracking: ' + e.message['desc']) + assert False + + # + # Add two users + # + try: + topology.standalone.add_s(Entry((USER1_DN, { + 'objectclass': "top person inetuser".split(), + 'userpassword': "password", + 'sn': "1", + 'cn': "user 1"}))) + log.info('Added test user %s' % USER1_DN) + except ldap.LDAPError as e: + log.error('Failed to add %s: %s' % (USER1_DN, e.message['desc'])) + assert False + + try: + topology.standalone.add_s(Entry((USER2_DN, { + 'objectclass': "top person inetuser".split(), + 'sn': "2", + 'cn': "user 2"}))) + log.info('Added test user %s' % USER2_DN) + except ldap.LDAPError as e: + log.error('Failed to add user1: ' + e.message['desc']) + assert False + + # + # Add an aci + # + try: + acival = '(targetattr ="cn")(version 3.0;acl "Test bind dn tracking"' + \ + ';allow (all) (userdn = "ldap:///%s");)' % USER1_DN + + topology.standalone.modify_s(DEFAULT_SUFFIX, [(ldap.MOD_ADD, 'aci', acival)]) + log.info('Added aci') + except ldap.LDAPError as e: + log.error('Failed to add aci: ' + e.message['desc']) + assert False + + # + # Make modification as user + # + try: + topology.standalone.simple_bind_s(USER1_DN, "password") + log.info('Bind as user %s successful' % USER1_DN) + except ldap.LDAPError as e: + log.error('Failed to bind as user1: ' + e.message['desc']) + assert False + + try: + topology.standalone.modify_s(USER2_DN, [(ldap.MOD_REPLACE, 'cn', 'new value')]) + log.info('%s successfully modified user %s' % (USER1_DN, USER2_DN)) + except ldap.LDAPError as e: + log.error('Failed to update user2: ' + e.message['desc']) + assert False + + # + # Setup replica and create a repl agmt + # + try: + topology.standalone.simple_bind_s(DN_DM, PASSWORD) + log.info('Bind as %s successful' % DN_DM) + except ldap.LDAPError as e: + log.error('Failed to bind as rootDN: ' + e.message['desc']) + assert False + + try: + topology.standalone.replica.enableReplication(suffix=DEFAULT_SUFFIX, role=REPLICAROLE_MASTER, + replicaId=REPLICAID_MASTER_1) + log.info('Successfully enabled replication.') + except ValueError: + log.error('Failed to enable replication') + assert False + + properties = {RA_NAME: r'test plugin internal bind dn', + RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], + RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], + RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], + RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} + + try: + repl_agreement = topology.standalone.agreement.create(suffix=DEFAULT_SUFFIX, host="127.0.0.1", + port="7777", properties=properties) + log.info('Successfully created replication agreement') + except InvalidArgumentError as e: + log.error('Failed to create replication agreement: ' + e.message['desc']) + assert False + + # + # modify replica + # + try: + properties = {REPLICA_ID: "7"} + topology.standalone.replica.setProperties(DEFAULT_SUFFIX, None, None, properties) + log.info('Successfully modified replica') + except ldap.LDAPError as e: + log.error('Failed to update replica config: ' + e.message['desc']) + assert False + + # + # modify repl agmt + # + try: + properties = {RA_CONSUMER_PORT: "8888"} + topology.standalone.agreement.setProperties(None, repl_agreement, None, properties) + log.info('Successfully modified replication agreement') + except ValueError: + log.error('Failed to update replica agreement: ' + repl_agreement) + assert False + + +def test_ticket47953_final(topology): + topology.standalone.delete() + log.info('Testcase PASSED') + + +def run_isolated(): + ''' + run_isolated is used to run these test cases independently of a test scheduler (xunit, py.test..) + To run isolated without py.test, you need to + - edit this file and comment '@pytest.fixture' line before 'topology' function. + - set the installation prefix + - run this program + ''' + global installation_prefix + installation_prefix = None + + topo = topology(True) + test_ticket47950(topo) + test_ticket47953_final(topo) + + +if __name__ == '__main__': + run_isolated() diff --git a/dirsrvtests/tests/tickets/ticket47953_test.py b/dirsrvtests/tests/tickets/ticket47953_test.py new file mode 100644 index 0000000..f64d899 --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket47953_test.py @@ -0,0 +1,128 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2015 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import os +import sys +import time +import ldap +import logging +import pytest +from lib389 import DirSrv, Entry, tools, tasks +from lib389.tools import DirSrvTools +from lib389._constants import * +from lib389.properties import * +from lib389.tasks import * + +log = logging.getLogger(__name__) + +installation_prefix = None + + +class TopologyStandalone(object): + def __init__(self, standalone): + standalone.open() + self.standalone = standalone + + +@pytest.fixture(scope="module") +def topology(request): + ''' + This fixture is used to standalone topology for the 'module'. + ''' + global installation_prefix + + if installation_prefix: + args_instance[SER_DEPLOYED_DIR] = installation_prefix + + standalone = DirSrv(verbose=False) + + # Args for the standalone instance + args_instance[SER_HOST] = HOST_STANDALONE + args_instance[SER_PORT] = PORT_STANDALONE + args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE + args_standalone = args_instance.copy() + standalone.allocate(args_standalone) + + # Get the status of the instance and restart it if it exists + instance_standalone = standalone.exists() + + # Remove the instance + if instance_standalone: + standalone.delete() + + # Create the instance + standalone.create() + + # Used to retrieve configuration information (dbdir, confdir...) + standalone.open() + + # clear the tmp directory + standalone.clearTmpDir(__file__) + + # Here we have standalone instance up and running + return TopologyStandalone(standalone) + + +def test_ticket47953(topology): + """ + Test that we can delete an aci that has an invalid syntax. + Sart by importing an ldif with a "bad" aci, then simply try + to remove that value without error. + """ + + log.info('Testing Ticket 47953 - Test we can delete aci that has invalid syntax') + + # + # Import an invalid ldif + # + ldif_file = topology.standalone.getDir(__file__, DATA_DIR) + "ticket47953/ticket47953.ldif" + importTask = Tasks(topology.standalone) + args = {TASK_WAIT: True} + try: + importTask.importLDIF(DEFAULT_SUFFIX, None, ldif_file, args) + except ValueError: + assert False + + # + # Delete the invalid aci + # + acival = '(targetattr ="fffff")(version 3.0;acl "Directory Administrators Group"' + \ + ';allow (all) (groupdn = "ldap:///cn=Directory Administrators, dc=example,dc=com");)' + + log.info('Attempting to remove invalid aci...') + try: + topology.standalone.modify_s(DEFAULT_SUFFIX, [(ldap.MOD_DELETE, 'aci', acival)]) + log.info('Removed invalid aci.') + except ldap.LDAPError as e: + log.error('Failed to remove invalid aci: ' + e.message['desc']) + assert False + + +def test_ticket47953_final(topology): + topology.standalone.delete() + log.info('Testcase PASSED') + + +def run_isolated(): + ''' + run_isolated is used to run these test cases independently of a test scheduler (xunit, py.test..) + To run isolated without py.test, you need to + - edit this file and comment '@pytest.fixture' line before 'topology' function. + - set the installation prefix + - run this program + ''' + global installation_prefix + installation_prefix = None + + topo = topology(True) + test_ticket47953(topo) + test_ticket47953_final(topo) + + +if __name__ == '__main__': + run_isolated() diff --git a/dirsrvtests/tests/tickets/ticket47963_test.py b/dirsrvtests/tests/tickets/ticket47963_test.py new file mode 100644 index 0000000..deed905 --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket47963_test.py @@ -0,0 +1,199 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2015 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import os +import sys +import time +import ldap +import logging +import pytest +from lib389 import DirSrv, Entry, tools, tasks +from lib389.tools import DirSrvTools +from lib389._constants import * +from lib389.properties import * +from lib389.tasks import * + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + +installation1_prefix = None + + +class TopologyStandalone(object): + def __init__(self, standalone): + standalone.open() + self.standalone = standalone + + +@pytest.fixture(scope="module") +def topology(request): + global installation1_prefix + if installation1_prefix: + args_instance[SER_DEPLOYED_DIR] = installation1_prefix + + # Creating standalone instance ... + standalone = DirSrv(verbose=False) + args_instance[SER_HOST] = HOST_STANDALONE + args_instance[SER_PORT] = PORT_STANDALONE + args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE + args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX + args_standalone = args_instance.copy() + standalone.allocate(args_standalone) + instance_standalone = standalone.exists() + if instance_standalone: + standalone.delete() + standalone.create() + standalone.open() + + # Clear out the tmp dir + standalone.clearTmpDir(__file__) + + return TopologyStandalone(standalone) + + +def test_ticket47963(topology): + ''' + Test that the memberOf plugin works correctly after setting: + + memberofskipnested: on + + ''' + PLUGIN_DN = 'cn=' + PLUGIN_MEMBER_OF + ',cn=plugins,cn=config' + USER_DN = 'uid=test_user,' + DEFAULT_SUFFIX + GROUP_DN1 = 'cn=group1,' + DEFAULT_SUFFIX + GROUP_DN2 = 'cn=group2,' + DEFAULT_SUFFIX + GROUP_DN3 = 'cn=group3,' + DEFAULT_SUFFIX + + # + # Enable the plugin and configure the skiop nest attribute, then restart the server + # + topology.standalone.plugins.enable(name=PLUGIN_MEMBER_OF) + try: + topology.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, 'memberofskipnested', 'on')]) + except ldap.LDAPError as e: + log.error('test_automember: Failed to modify config entry: error ' + e.message['desc']) + assert False + + topology.standalone.restart(timeout=10) + + # + # Add our groups, users, memberships, etc + # + try: + topology.standalone.add_s(Entry((USER_DN, { + 'objectclass': 'top extensibleObject'.split(), + 'uid': 'test_user' + }))) + except ldap.LDAPError as e: + log.error('Failed to add teset user: error ' + e.message['desc']) + assert False + + try: + topology.standalone.add_s(Entry((GROUP_DN1, { + 'objectclass': 'top groupOfNames groupOfUniqueNames extensibleObject'.split(), + 'cn': 'group1', + 'member': USER_DN + }))) + except ldap.LDAPError as e: + log.error('Failed to add group1: error ' + e.message['desc']) + assert False + + try: + topology.standalone.add_s(Entry((GROUP_DN2, { + 'objectclass': 'top groupOfNames groupOfUniqueNames extensibleObject'.split(), + 'cn': 'group2', + 'member': USER_DN + }))) + except ldap.LDAPError as e: + log.error('Failed to add group2: error ' + e.message['desc']) + assert False + + # Add group with no member(yet) + try: + topology.standalone.add_s(Entry((GROUP_DN3, { + 'objectclass': 'top groupOfNames groupOfUniqueNames extensibleObject'.split(), + 'cn': 'group' + }))) + except ldap.LDAPError as e: + log.error('Failed to add group3: error ' + e.message['desc']) + assert False + time.sleep(1) + + # + # Test we have the correct memberOf values in the user entry + # + try: + member_filter = ('(&(memberOf=' + GROUP_DN1 + ')(memberOf=' + GROUP_DN2 + '))') + entries = topology.standalone.search_s(USER_DN, ldap.SCOPE_BASE, member_filter) + if not entries: + log.fatal('User is missing expected memberOf attrs') + assert False + except ldap.LDAPError as e: + log.fatal('Search for user1 failed: ' + e.message['desc']) + assert False + + # Add the user to the group + try: + topology.standalone.modify_s(GROUP_DN3, [(ldap.MOD_ADD, 'member', USER_DN)]) + except ldap.LDAPError as e: + log.error('Failed to member to group: error ' + e.message['desc']) + assert False + time.sleep(1) + + # Check that the test user is a "memberOf" all three groups + try: + member_filter = ('(&(memberOf=' + GROUP_DN1 + ')(memberOf=' + GROUP_DN2 + + ')(memberOf=' + GROUP_DN3 + '))') + entries = topology.standalone.search_s(USER_DN, ldap.SCOPE_BASE, member_filter) + if not entries: + log.fatal('User is missing expected memberOf attrs') + assert False + except ldap.LDAPError as e: + log.fatal('Search for user1 failed: ' + e.message['desc']) + assert False + + # + # Delete group2, and check memberOf values in the user entry + # + try: + topology.standalone.delete_s(GROUP_DN2) + except ldap.LDAPError as e: + log.error('Failed to delete test group2: ' + e.message['desc']) + assert False + time.sleep(1) + + try: + member_filter = ('(&(memberOf=' + GROUP_DN1 + ')(memberOf=' + GROUP_DN3 + '))') + entries = topology.standalone.search_s(USER_DN, ldap.SCOPE_BASE, member_filter) + if not entries: + log.fatal('User incorrect memberOf attrs') + assert False + except ldap.LDAPError as e: + log.fatal('Search for user1 failed: ' + e.message['desc']) + assert False + + log.info('Test complete') + + +def test_ticket47963_final(topology): + topology.standalone.delete() + log.info('Testcase PASSED') + + +def run_isolated(): + global installation1_prefix + installation1_prefix = None + + topo = topology(True) + test_ticket47963(topo) + test_ticket47963_final(topo) + + +if __name__ == '__main__': + run_isolated() + diff --git a/dirsrvtests/tests/tickets/ticket47966_test.py b/dirsrvtests/tests/tickets/ticket47966_test.py new file mode 100644 index 0000000..b311f47 --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket47966_test.py @@ -0,0 +1,227 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2015 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import os +import sys +import time +import ldap +import logging +import pytest +from lib389 import DirSrv, Entry, tools, tasks +from lib389.tools import DirSrvTools +from lib389._constants import * +from lib389.properties import * +from lib389.tasks import * +from lib389.utils import * + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + +installation1_prefix = None + +m1_m2_agmt = "" + +class TopologyReplication(object): + def __init__(self, master1, master2): + master1.open() + self.master1 = master1 + master2.open() + self.master2 = master2 + + +@pytest.fixture(scope="module") +def topology(request): + global installation1_prefix + if installation1_prefix: + args_instance[SER_DEPLOYED_DIR] = installation1_prefix + + # Creating master 1... + master1 = DirSrv(verbose=False) + args_instance[SER_HOST] = HOST_MASTER_1 + args_instance[SER_PORT] = PORT_MASTER_1 + args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1 + args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX + args_master = args_instance.copy() + master1.allocate(args_master) + instance_master1 = master1.exists() + if instance_master1: + master1.delete() + master1.create() + master1.open() + master1.replica.enableReplication(suffix=DEFAULT_SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_1) + + # Creating master 2... + master2 = DirSrv(verbose=False) + args_instance[SER_HOST] = HOST_MASTER_2 + args_instance[SER_PORT] = PORT_MASTER_2 + args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_2 + args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX + args_master = args_instance.copy() + master2.allocate(args_master) + instance_master2 = master2.exists() + if instance_master2: + master2.delete() + master2.create() + master2.open() + master2.replica.enableReplication(suffix=DEFAULT_SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_2) + + # + # Create all the agreements + # + # Creating agreement from master 1 to master 2 + properties = {RA_NAME: r'meTo_$host:$port', + RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], + RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], + RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], + RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} + global m1_m2_agmt + m1_m2_agmt = master1.agreement.create(suffix=DEFAULT_SUFFIX, host=master2.host, port=master2.port, properties=properties) + if not m1_m2_agmt: + log.fatal("Fail to create a master -> master replica agreement") + sys.exit(1) + log.debug("%s created" % m1_m2_agmt) + + # Creating agreement from master 2 to master 1 + properties = {RA_NAME: r'meTo_$host:$port', + RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], + RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], + RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], + RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} + m2_m1_agmt = master2.agreement.create(suffix=DEFAULT_SUFFIX, host=master1.host, port=master1.port, properties=properties) + if not m2_m1_agmt: + log.fatal("Fail to create a master -> master replica agreement") + sys.exit(1) + log.debug("%s created" % m2_m1_agmt) + + # Allow the replicas to get situated with the new agreements... + time.sleep(5) + + # + # Initialize all the agreements + # + master1.agreement.init(DEFAULT_SUFFIX, HOST_MASTER_2, PORT_MASTER_2) + master1.waitForReplInit(m1_m2_agmt) + + # Check replication is working... + if master1.testReplication(DEFAULT_SUFFIX, master2): + log.info('Replication is working.') + else: + log.fatal('Replication is not working.') + assert False + + # Clear out the tmp dir + master1.clearTmpDir(__file__) + + return TopologyReplication(master1, master2) + + +def test_ticket47966(topology): + ''' + Testing bulk import when the backend with VLV was recreated. + If the test passes without the server crash, 47966 is verified. + ''' + log.info('Testing Ticket 47966 - [VLV] slapd crashes during Dogtag clone reinstallation') + M1 = topology.master1 + M2 = topology.master2 + + log.info('0. Create a VLV index on Master 2.') + # get the backend entry + be = M2.replica.conn.backend.list(suffix=DEFAULT_SUFFIX) + if not be: + log.fatal("ticket47966: enable to retrieve the backend for %s" % DEFAULT_SUFFIX) + raise ValueError("no backend for suffix %s" % DEFAULT_SUFFIX) + bent = be[0] + beName = bent.getValue('cn') + beDn = "cn=%s,cn=ldbm database,cn=plugins,cn=config" % beName + + # generate vlvSearch entry + vlvSrchDn = "cn=vlvSrch,%s" % beDn + log.info('0-1. vlvSearch dn: %s' % vlvSrchDn) + vlvSrchEntry = Entry(vlvSrchDn) + vlvSrchEntry.setValues('objectclass', 'top', 'vlvSearch') + vlvSrchEntry.setValues('cn', 'vlvSrch') + vlvSrchEntry.setValues('vlvBase', DEFAULT_SUFFIX) + vlvSrchEntry.setValues('vlvFilter', '(|(objectclass=*)(objectclass=ldapsubentry))') + vlvSrchEntry.setValues('vlvScope', '2') + M2.add_s(vlvSrchEntry) + + # generate vlvIndex entry + vlvIndexDn = "cn=vlvIdx,%s" % vlvSrchDn + log.info('0-2. vlvIndex dn: %s' % vlvIndexDn) + vlvIndexEntry = Entry(vlvIndexDn) + vlvIndexEntry.setValues('objectclass', 'top', 'vlvIndex') + vlvIndexEntry.setValues('cn', 'vlvIdx') + vlvIndexEntry.setValues('vlvSort', 'cn ou sn') + M2.add_s(vlvIndexEntry) + + log.info('1. Initialize Master 2 from Master 1.') + M1.agreement.init(DEFAULT_SUFFIX, HOST_MASTER_2, PORT_MASTER_2) + M1.waitForReplInit(m1_m2_agmt) + + # Check replication is working... + if M1.testReplication(DEFAULT_SUFFIX, M2): + log.info('1-1. Replication is working.') + else: + log.fatal('1-1. Replication is not working.') + assert False + + log.info('2. Delete the backend instance on Master 2.') + M2.delete_s(vlvIndexDn) + M2.delete_s(vlvSrchDn) + # delete the agreement, replica, and mapping tree, too. + M2.replica.disableReplication(DEFAULT_SUFFIX) + mappingTree = 'cn="%s",cn=mapping tree,cn=config' % DEFAULT_SUFFIX + M2.mappingtree.delete(DEFAULT_SUFFIX, beName, mappingTree) + M2.backend.delete(DEFAULT_SUFFIX, beDn, beName) + + log.info('3. Recreate the backend and the VLV index on Master 2.') + M2.mappingtree.create(DEFAULT_SUFFIX, beName) + M2.backend.create(DEFAULT_SUFFIX, {BACKEND_NAME: beName}) + log.info('3-1. Recreating %s and %s on Master 2.' % (vlvSrchDn, vlvIndexDn)) + M2.add_s(vlvSrchEntry) + M2.add_s(vlvIndexEntry) + M2.replica.enableReplication(suffix=DEFAULT_SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_2) + # agreement m2_m1_agmt is not needed... :p + + log.info('4. Initialize Master 2 from Master 1 again.') + M1.agreement.init(DEFAULT_SUFFIX, HOST_MASTER_2, PORT_MASTER_2) + M1.waitForReplInit(m1_m2_agmt) + + # Check replication is working... + if M1.testReplication(DEFAULT_SUFFIX, M2): + log.info('4-1. Replication is working.') + else: + log.fatal('4-1. Replication is not working.') + assert False + + log.info('5. Check Master 2 is up.') + entries = M2.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, '(cn=*)') + assert len(entries) > 0 + log.info('5-1. %s entries are returned from M2.' % len(entries)) + + log.info('Test complete') + + +def test_ticket47966_final(topology): + topology.master1.delete() + topology.master2.delete() + log.info('Testcase PASSED') + + +def run_isolated(): + global installation1_prefix + installation1_prefix = None + + topo = topology(True) + test_ticket47966(topo) + test_ticket47966_final(topo) + + +if __name__ == '__main__': + run_isolated() + diff --git a/dirsrvtests/tests/tickets/ticket47970_test.py b/dirsrvtests/tests/tickets/ticket47970_test.py new file mode 100644 index 0000000..a748939 --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket47970_test.py @@ -0,0 +1,158 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2015 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import os +import sys +import time +import ldap +import ldap.sasl +import logging +import pytest +from lib389 import DirSrv, Entry, tools, tasks +from lib389.tools import DirSrvTools +from lib389._constants import * +from lib389.properties import * +from lib389.tasks import * + +log = logging.getLogger(__name__) + +installation_prefix = None + +USER1_DN = "uid=user1,%s" % DEFAULT_SUFFIX +USER2_DN = "uid=user2,%s" % DEFAULT_SUFFIX + + +class TopologyStandalone(object): + def __init__(self, standalone): + standalone.open() + self.standalone = standalone + + +@pytest.fixture(scope="module") +def topology(request): + ''' + This fixture is used to standalone topology for the 'module'. + ''' + global installation_prefix + + if installation_prefix: + args_instance[SER_DEPLOYED_DIR] = installation_prefix + + standalone = DirSrv(verbose=False) + + # Args for the standalone instance + args_instance[SER_HOST] = HOST_STANDALONE + args_instance[SER_PORT] = PORT_STANDALONE + args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE + args_standalone = args_instance.copy() + standalone.allocate(args_standalone) + + # Get the status of the instance and restart it if it exists + instance_standalone = standalone.exists() + + # Remove the instance + if instance_standalone: + standalone.delete() + + # Create the instance + standalone.create() + + # Used to retrieve configuration information (dbdir, confdir...) + standalone.open() + + # clear the tmp directory + standalone.clearTmpDir(__file__) + + # Here we have standalone instance up and running + return TopologyStandalone(standalone) + + +def test_ticket47970(topology): + """ + Testing that a failed SASL bind does not trigger account lockout - + which would attempt to update the passwordRetryCount on the root dse entry + """ + + log.info('Testing Ticket 47970 - Testing that a failed SASL bind does not trigger account lockout') + + # + # Enable account lockout + # + try: + topology.standalone.modify_s("cn=config", [(ldap.MOD_REPLACE, 'passwordLockout', 'on')]) + log.info('account lockout enabled.') + except ldap.LDAPError as e: + log.error('Failed to enable account lockout: ' + e.message['desc']) + assert False + + try: + topology.standalone.modify_s("cn=config", [(ldap.MOD_REPLACE, 'passwordMaxFailure', '5')]) + log.info('passwordMaxFailure set.') + except ldap.LDAPError as e: + log.error('Failed to to set passwordMaxFailure: ' + e.message['desc']) + assert False + + # + # Perform SASL bind that should fail + # + failed_as_expected = False + try: + user_name = "mark" + pw = "secret" + auth_tokens = ldap.sasl.digest_md5(user_name, pw) + topology.standalone.sasl_interactive_bind_s("", auth_tokens) + except ldap.INVALID_CREDENTIALS as e: + log.info("SASL Bind failed as expected") + failed_as_expected = True + + if not failed_as_expected: + log.error("SASL bind unexpectedly succeeded!") + assert False + + # + # Check that passwordRetryCount was not set on the root dse entry + # + try: + entry = topology.standalone.search_s("", ldap.SCOPE_BASE, + "passwordRetryCount=*", + ['passwordRetryCount']) + except ldap.LDAPError as e: + log.error('Failed to search Root DSE entry: ' + e.message['desc']) + assert False + + if entry: + log.error('Root DSE was incorrectly updated') + assert False + + # We passed + log.info('Root DSE was correctly not updated') + + +def test_ticket47970_final(topology): + topology.standalone.delete() + log.info('Testcase PASSED') + + +def run_isolated(): + ''' + run_isolated is used to run these test cases independently of a test scheduler (xunit, py.test..) + To run isolated without py.test, you need to + - edit this file and comment '@pytest.fixture' line before 'topology' function. + - set the installation prefix + - run this program + ''' + global installation_prefix + installation_prefix = None + + topo = topology(True) + test_ticket47970(topo) + test_ticket47970_final(topo) + + +if __name__ == '__main__': + run_isolated() diff --git a/dirsrvtests/tests/tickets/ticket47973_test.py b/dirsrvtests/tests/tickets/ticket47973_test.py new file mode 100644 index 0000000..12bb789 --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket47973_test.py @@ -0,0 +1,185 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2015 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import os +import sys +import time +import ldap +import ldap.sasl +import logging +import pytest +from lib389 import DirSrv, Entry, tools, tasks +from lib389.tools import DirSrvTools +from lib389._constants import * +from lib389.properties import * +from lib389.tasks import * + +log = logging.getLogger(__name__) + +installation_prefix = None + +USER_DN = 'uid=user1,%s' % (DEFAULT_SUFFIX) +SCHEMA_RELOAD_COUNT = 10 + + +class TopologyStandalone(object): + def __init__(self, standalone): + standalone.open() + self.standalone = standalone + + +@pytest.fixture(scope="module") +def topology(request): + ''' + This fixture is used to standalone topology for the 'module'. + ''' + global installation_prefix + + if installation_prefix: + args_instance[SER_DEPLOYED_DIR] = installation_prefix + + standalone = DirSrv(verbose=False) + + # Args for the standalone instance + args_instance[SER_HOST] = HOST_STANDALONE + args_instance[SER_PORT] = PORT_STANDALONE + args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE + args_standalone = args_instance.copy() + standalone.allocate(args_standalone) + + # Get the status of the instance and restart it if it exists + instance_standalone = standalone.exists() + + # Remove the instance + if instance_standalone: + standalone.delete() + + # Create the instance + standalone.create() + + # Used to retrieve configuration information (dbdir, confdir...) + standalone.open() + + # clear the tmp directory + standalone.clearTmpDir(__file__) + + # Here we have standalone instance up and running + return TopologyStandalone(standalone) + + +def task_complete(conn, task_dn): + finished = False + + try: + task_entry = conn.search_s(task_dn, ldap.SCOPE_BASE, 'objectclass=*') + if not task_entry: + log.fatal('wait_for_task: Search failed to find task: ' + task_dn) + assert False + if task_entry[0].hasAttr('nstaskexitcode'): + # task is done + finished = True + except ldap.LDAPError as e: + log.fatal('wait_for_task: Search failed: ' + e.message['desc']) + assert False + + return finished + + +def test_ticket47973(topology): + """ + During the schema reload task there is a small window where the new schema is not loaded + into the asi hashtables - this results in searches not returning entries. + """ + + log.info('Testing Ticket 47973 - Test the searches still work as expected during schema reload tasks') + + # + # Add a user + # + try: + topology.standalone.add_s(Entry((USER_DN, { + 'objectclass': 'top extensibleObject'.split(), + 'uid': 'user1' + }))) + except ldap.LDAPError as e: + log.error('Failed to add user1: error ' + e.message['desc']) + assert False + + # + # Run a series of schema_reload tasks while searching for our user. Since + # this is a race condition, run it several times. + # + task_count = 0 + while task_count < SCHEMA_RELOAD_COUNT: + # + # Add a schema reload task + # + + TASK_DN = 'cn=task-' + str(task_count) + ',cn=schema reload task, cn=tasks, cn=config' + try: + topology.standalone.add_s(Entry((TASK_DN, { + 'objectclass': 'top extensibleObject'.split(), + 'cn': 'task-' + str(task_count) + }))) + except ldap.LDAPError as e: + log.error('Failed to add task entry: error ' + e.message['desc']) + assert False + + # + # While we wait for the task to complete keep searching for our user + # + search_count = 0 + while search_count < 100: + # + # Now check the user is still being returned + # + try: + entries = topology.standalone.search_s(DEFAULT_SUFFIX, + ldap.SCOPE_SUBTREE, + '(uid=user1)') + if not entries or not entries[0]: + log.fatal('User was not returned from search!') + assert False + except ldap.LDAPError as e: + log.fatal('Unable to search for entry %s: error %s' % (USER_DN, e.message['desc'])) + assert False + + # + # Check if task is complete + # + if task_complete(topology.standalone, TASK_DN): + break + + search_count += 1 + + task_count += 1 + + +def test_ticket47973_final(topology): + topology.standalone.delete() + log.info('Testcase PASSED') + + +def run_isolated(): + ''' + run_isolated is used to run these test cases independently of a test scheduler (xunit, py.test..) + To run isolated without py.test, you need to + - edit this file and comment '@pytest.fixture' line before 'topology' function. + - set the installation prefix + - run this program + ''' + global installation_prefix + installation_prefix = None + + topo = topology(True) + test_ticket47973(topo) + test_ticket47973_final(topo) + + +if __name__ == '__main__': + run_isolated() diff --git a/dirsrvtests/tests/tickets/ticket47980_test.py b/dirsrvtests/tests/tickets/ticket47980_test.py new file mode 100644 index 0000000..34f0d3f --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket47980_test.py @@ -0,0 +1,662 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2015 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import os +import sys +import time +import ldap +import ldap.sasl +import logging +import pytest +from lib389 import DirSrv, Entry, tools, tasks +from lib389.tools import DirSrvTools +from lib389._constants import * +from lib389.properties import * +from lib389.tasks import * + +log = logging.getLogger(__name__) + +installation_prefix = None + +BRANCH1 = 'ou=level1,' + DEFAULT_SUFFIX +BRANCH2 = 'ou=level2,ou=level1,' + DEFAULT_SUFFIX +BRANCH3 = 'ou=level3,ou=level2,ou=level1,' + DEFAULT_SUFFIX +BRANCH4 = 'ou=people,' + DEFAULT_SUFFIX +BRANCH5 = 'ou=lower,ou=people,' + DEFAULT_SUFFIX +BRANCH6 = 'ou=lower,ou=lower,ou=people,' + DEFAULT_SUFFIX +USER1_DN = 'uid=user1,%s' % (BRANCH1) +USER2_DN = 'uid=user2,%s' % (BRANCH2) +USER3_DN = 'uid=user3,%s' % (BRANCH3) +USER4_DN = 'uid=user4,%s' % (BRANCH4) +USER5_DN = 'uid=user5,%s' % (BRANCH5) +USER6_DN = 'uid=user6,%s' % (BRANCH6) + +BRANCH1_CONTAINER = 'cn=nsPwPolicyContainer,ou=level1,dc=example,dc=com' +BRANCH1_PWP = 'cn=cn\3DnsPwPolicyEntry\2Cou\3Dlevel1\2Cdc\3Dexample\2Cdc\3Dcom,' + \ + 'cn=nsPwPolicyContainer,ou=level1,dc=example,dc=com' +BRANCH1_COS_TMPL = 'cn=cn\3DnsPwTemplateEntry\2Cou\3Dlevel1\2Cdc\3Dexample\2Cdc\3Dcom,' + \ + 'cn=nsPwPolicyContainer,ou=level1,dc=example,dc=com' +BRANCH1_COS_DEF = 'cn=nsPwPolicy_CoS,ou=level1,dc=example,dc=com' + +BRANCH2_CONTAINER = 'cn=nsPwPolicyContainer,ou=level2,ou=level1,dc=example,dc=com' +BRANCH2_PWP = 'cn=cn\3DnsPwPolicyEntry\2Cou\3Dlevel2\2Cou\3Dlevel1\2Cdc\3Dexample\2Cdc\3Dcom,' + \ + 'cn=nsPwPolicyContainer,ou=level2,ou=level1,dc=example,dc=com' +BRANCH2_COS_TMPL = 'cn=cn\3DnsPwTemplateEntry\2Cou\3Dlevel2\2Cou\3Dlevel1\2Cdc\3Dexample\2Cdc\3Dcom,' + \ + 'cn=nsPwPolicyContainer,ou=level2,ou=level1,dc=example,dc=com' +BRANCH2_COS_DEF = 'cn=nsPwPolicy_CoS,ou=level2,ou=level1,dc=example,dc=com' + +BRANCH3_CONTAINER = 'cn=nsPwPolicyContainer,ou=level3,ou=level2,ou=level1,dc=example,dc=com' +BRANCH3_PWP = 'cn=cn\3DnsPwPolicyEntry\2Cou\3Dlevel3\2Cou\3Dlevel2\2Cou\3Dlevel1\2Cdc\3Dexample\2Cdc\3Dcom,' + \ + 'cn=nsPwPolicyContainer,ou=level3,ou=level2,ou=level1,dc=example,dc=com' +BRANCH3_COS_TMPL = 'cn=cn\3DnsPwTemplateEntry\2Cou\3Dlevel3\2Cou\3Dlevel2\2Cou\3Dlevel1\2Cdc\3Dexample\2Cdc\3Dcom,' + \ + 'cn=nsPwPolicyContainer,ou=level3,ou=level2,ou=level1,dc=example,dc=com' +BRANCH3_COS_DEF = 'cn=nsPwPolicy_CoS,ou=level3,ou=level2,ou=level1,dc=example,dc=com' + +BRANCH4_CONTAINER = 'cn=nsPwPolicyContainer,ou=people,dc=example,dc=com' +BRANCH4_PWP = 'cn=cn\3DnsPwPolicyEntry\2Cou\3DPeople\2Cdc\3Dexample\2Cdc\3Dcom,' + \ + 'cn=nsPwPolicyContainer,ou=People,dc=example,dc=com' +BRANCH4_COS_TMPL = 'cn=cn\3DnsPwTemplateEntry\2Cou\3DPeople\2Cdc\3Dexample\2Cdc\3Dcom,' + \ + 'cn=nsPwPolicyContainer,ou=People,dc=example,dc=com' +BRANCH4_COS_DEF = 'cn=nsPwPolicy_CoS,ou=people,dc=example,dc=com' + +BRANCH5_CONTAINER = 'cn=nsPwPolicyContainer,ou=lower,ou=people,dc=example,dc=com' +BRANCH5_PWP = 'cn=cn\3DnsPwPolicyEntry\2Cou\3Dlower\2Cou\3DPeople\2Cdc\3Dexample\2Cdc\3Dcom,' + \ + 'cn=nsPwPolicyContainer,ou=lower,ou=People,dc=example,dc=com' +BRANCH5_COS_TMPL = 'cn=cn\3DnsPwTemplateEntry\2Cou\3Dlower\2Cou\3DPeople\2Cdc\3Dexample\2Cdc\3Dcom,' + \ + 'cn=nsPwPolicyContainer,ou=lower,ou=People,dc=example,dc=com' +BRANCH5_COS_DEF = 'cn=nsPwPolicy_CoS,ou=lower,ou=People,dc=example,dc=com' + +BRANCH6_CONTAINER = 'cn=nsPwPolicyContainer,ou=lower,ou=lower,ou=People,dc=example,dc=com' +BRANCH6_PWP = 'cn=cn\3DnsPwPolicyEntry\2Cou\3Dlower\2Cou\3Dlower\2Cou\3DPeople\2Cdc\3Dexample\2Cdc\3Dcom,' + \ + 'cn=nsPwPolicyContainer,ou=lower,ou=lower,ou=People,dc=example,dc=com' +BRANCH6_COS_TMPL = 'cn=cn\3DnsPwTemplateEntry\2Cou\3Dlower\2Cou\3Dlower\2Cou\3DPeople\2Cdc\3Dexample\2Cdc\3Dcom,' + \ + 'cn=nsPwPolicyContainer,ou=lower,ou=lower,ou=People,dc=example,dc=com' +BRANCH6_COS_DEF = 'cn=nsPwPolicy_CoS,ou=lower,ou=lower,ou=People,dc=example,dc=com' + + +class TopologyStandalone(object): + def __init__(self, standalone): + standalone.open() + self.standalone = standalone + + +@pytest.fixture(scope="module") +def topology(request): + ''' + This fixture is used to standalone topology for the 'module'. + ''' + global installation_prefix + + if installation_prefix: + args_instance[SER_DEPLOYED_DIR] = installation_prefix + + standalone = DirSrv(verbose=False) + + # Args for the standalone instance + args_instance[SER_HOST] = HOST_STANDALONE + args_instance[SER_PORT] = PORT_STANDALONE + args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE + args_standalone = args_instance.copy() + standalone.allocate(args_standalone) + + # Get the status of the instance and restart it if it exists + instance_standalone = standalone.exists() + + # Remove the instance + if instance_standalone: + standalone.delete() + + # Create the instance + standalone.create() + + # Used to retrieve configuration information (dbdir, confdir...) + standalone.open() + + # clear the tmp directory + standalone.clearTmpDir(__file__) + + # Here we have standalone instance up and running + return TopologyStandalone(standalone) + + +def test_ticket47980(topology): + """ + Multiple COS pointer definitions that use the same attribute are not correctly ordered. + The cos plugin was incorrectly sorting the attribute indexes based on subtree, which lead + to the wrong cos attribute value being applied to the entry. + """ + + log.info('Testing Ticket 47980 - Testing multiple nested COS pointer definitions are processed correctly') + + # Add our nested branches + try: + topology.standalone.add_s(Entry((BRANCH1, { + 'objectclass': 'top extensibleObject'.split(), + 'ou': 'level1' + }))) + except ldap.LDAPError as e: + log.error('Failed to add level1: error ' + e.message['desc']) + assert False + + try: + topology.standalone.add_s(Entry((BRANCH2, { + 'objectclass': 'top extensibleObject'.split(), + 'ou': 'level2' + }))) + except ldap.LDAPError as e: + log.error('Failed to add level2: error ' + e.message['desc']) + assert False + + try: + topology.standalone.add_s(Entry((BRANCH3, { + 'objectclass': 'top extensibleObject'.split(), + 'uid': 'level3' + }))) + except ldap.LDAPError as e: + log.error('Failed to add level3: error ' + e.message['desc']) + assert False + + # People branch, might already exist + try: + topology.standalone.add_s(Entry((BRANCH4, { + 'objectclass': 'top extensibleObject'.split(), + 'ou': 'level4' + }))) + except ldap.ALREADY_EXISTS: + pass + except ldap.LDAPError as e: + log.error('Failed to add level4: error ' + e.message['desc']) + assert False + + try: + topology.standalone.add_s(Entry((BRANCH5, { + 'objectclass': 'top extensibleObject'.split(), + 'ou': 'level5' + }))) + except ldap.LDAPError as e: + log.error('Failed to add level5: error ' + e.message['desc']) + assert False + + try: + topology.standalone.add_s(Entry((BRANCH6, { + 'objectclass': 'top extensibleObject'.split(), + 'uid': 'level6' + }))) + except ldap.LDAPError as e: + log.error('Failed to add level6: error ' + e.message['desc']) + assert False + + # Add users to each branch + try: + topology.standalone.add_s(Entry((USER1_DN, { + 'objectclass': 'top extensibleObject'.split(), + 'uid': 'user1' + }))) + except ldap.LDAPError as e: + log.error('Failed to add user1: error ' + e.message['desc']) + assert False + + try: + topology.standalone.add_s(Entry((USER2_DN, { + 'objectclass': 'top extensibleObject'.split(), + 'uid': 'user2' + }))) + except ldap.LDAPError as e: + log.error('Failed to add user2: error ' + e.message['desc']) + assert False + + try: + topology.standalone.add_s(Entry((USER3_DN, { + 'objectclass': 'top extensibleObject'.split(), + 'uid': 'user3' + }))) + except ldap.LDAPError as e: + log.error('Failed to add user3: error ' + e.message['desc']) + assert False + + try: + topology.standalone.add_s(Entry((USER4_DN, { + 'objectclass': 'top extensibleObject'.split(), + 'uid': 'user4' + }))) + except ldap.LDAPError as e: + log.error('Failed to add user4: error ' + e.message['desc']) + assert False + + try: + topology.standalone.add_s(Entry((USER5_DN, { + 'objectclass': 'top extensibleObject'.split(), + 'uid': 'user5' + }))) + except ldap.LDAPError as e: + log.error('Failed to add user5: error ' + e.message['desc']) + assert False + + try: + topology.standalone.add_s(Entry((USER6_DN, { + 'objectclass': 'top extensibleObject'.split(), + 'uid': 'user6' + }))) + except ldap.LDAPError as e: + log.error('Failed to add user6: error ' + e.message['desc']) + assert False + + # Enable password policy + try: + topology.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-pwpolicy-local', 'on')]) + except ldap.LDAPError as e: + log.error('Failed to set pwpolicy-local: error ' + e.message['desc']) + assert False + + # + # Add subtree policy to branch 1 + # + # Add the container + try: + topology.standalone.add_s(Entry((BRANCH1_CONTAINER, { + 'objectclass': 'top nsContainer'.split(), + 'cn': 'nsPwPolicyContainer' + }))) + except ldap.LDAPError as e: + log.error('Failed to add subtree container for level1: error ' + e.message['desc']) + assert False + + # Add the password policy subentry + try: + topology.standalone.add_s(Entry((BRANCH1_PWP, { + 'objectclass': 'top ldapsubentry passwordpolicy'.split(), + 'cn': 'cn=nsPwPolicyEntry,ou=level1,dc=example,dc=com', + 'passwordMustChange': 'off', + 'passwordExp': 'off', + 'passwordHistory': 'off', + 'passwordMinAge': '0', + 'passwordChange': 'off', + 'passwordStorageScheme': 'ssha' + }))) + except ldap.LDAPError as e: + log.error('Failed to add passwordpolicy for level1: error ' + e.message['desc']) + assert False + + # Add the COS template + try: + topology.standalone.add_s(Entry((BRANCH1_COS_TMPL, { + 'objectclass': 'top ldapsubentry costemplate extensibleObject'.split(), + 'cn': 'cn=nsPwPolicyEntry,ou=level1,dc=example,dc=com', + 'cosPriority': '1', + 'cn': 'cn=nsPwTemplateEntry,ou=level1,dc=example,dc=com', + 'pwdpolicysubentry': BRANCH1_PWP + }))) + except ldap.LDAPError as e: + log.error('Failed to add COS template for level1: error ' + e.message['desc']) + assert False + + # Add the COS definition + try: + topology.standalone.add_s(Entry((BRANCH1_COS_DEF, { + 'objectclass': 'top ldapsubentry cosSuperDefinition cosPointerDefinition'.split(), + 'cn': 'cn=nsPwPolicyEntry,ou=level1,dc=example,dc=com', + 'costemplatedn': BRANCH1_COS_TMPL, + 'cosAttribute': 'pwdpolicysubentry default operational-default' + }))) + except ldap.LDAPError as e: + log.error('Failed to add COS def for level1: error ' + e.message['desc']) + assert False + + # + # Add subtree policy to branch 2 + # + # Add the container + try: + topology.standalone.add_s(Entry((BRANCH2_CONTAINER, { + 'objectclass': 'top nsContainer'.split(), + 'cn': 'nsPwPolicyContainer' + }))) + except ldap.LDAPError as e: + log.error('Failed to add subtree container for level2: error ' + e.message['desc']) + assert False + + # Add the password policy subentry + try: + topology.standalone.add_s(Entry((BRANCH2_PWP, { + 'objectclass': 'top ldapsubentry passwordpolicy'.split(), + 'cn': 'cn=nsPwPolicyEntry,ou=level2,dc=example,dc=com', + 'passwordMustChange': 'off', + 'passwordExp': 'off', + 'passwordHistory': 'off', + 'passwordMinAge': '0', + 'passwordChange': 'off', + 'passwordStorageScheme': 'ssha' + }))) + except ldap.LDAPError as e: + log.error('Failed to add passwordpolicy for level2: error ' + e.message['desc']) + assert False + + # Add the COS template + try: + topology.standalone.add_s(Entry((BRANCH2_COS_TMPL, { + 'objectclass': 'top ldapsubentry costemplate extensibleObject'.split(), + 'cn': 'cn=nsPwPolicyEntry,ou=level2,dc=example,dc=com', + 'cosPriority': '1', + 'cn': 'cn=nsPwTemplateEntry,ou=level2,dc=example,dc=com', + 'pwdpolicysubentry': BRANCH2_PWP + }))) + except ldap.LDAPError as e: + log.error('Failed to add COS template for level2: error ' + e.message['desc']) + assert False + + # Add the COS definition + try: + topology.standalone.add_s(Entry((BRANCH2_COS_DEF, { + 'objectclass': 'top ldapsubentry cosSuperDefinition cosPointerDefinition'.split(), + 'cn': 'cn=nsPwPolicyEntry,ou=level2,dc=example,dc=com', + 'costemplatedn': BRANCH2_COS_TMPL, + 'cosAttribute': 'pwdpolicysubentry default operational-default' + }))) + except ldap.LDAPError as e: + log.error('Failed to add COS def for level2: error ' + e.message['desc']) + assert False + + # + # Add subtree policy to branch 3 + # + # Add the container + try: + topology.standalone.add_s(Entry((BRANCH3_CONTAINER, { + 'objectclass': 'top nsContainer'.split(), + 'cn': 'nsPwPolicyContainer' + }))) + except ldap.LDAPError as e: + log.error('Failed to add subtree container for level3: error ' + e.message['desc']) + assert False + + # Add the password policy subentry + try: + topology.standalone.add_s(Entry((BRANCH3_PWP, { + 'objectclass': 'top ldapsubentry passwordpolicy'.split(), + 'cn': 'cn=nsPwPolicyEntry,ou=level3,dc=example,dc=com', + 'passwordMustChange': 'off', + 'passwordExp': 'off', + 'passwordHistory': 'off', + 'passwordMinAge': '0', + 'passwordChange': 'off', + 'passwordStorageScheme': 'ssha' + }))) + except ldap.LDAPError as e: + log.error('Failed to add passwordpolicy for level3: error ' + e.message['desc']) + assert False + + # Add the COS template + try: + topology.standalone.add_s(Entry((BRANCH3_COS_TMPL, { + 'objectclass': 'top ldapsubentry costemplate extensibleObject'.split(), + 'cn': 'cn=nsPwPolicyEntry,ou=level3,dc=example,dc=com', + 'cosPriority': '1', + 'cn': 'cn=nsPwTemplateEntry,ou=level3,dc=example,dc=com', + 'pwdpolicysubentry': BRANCH3_PWP + }))) + except ldap.LDAPError as e: + log.error('Failed to add COS template for level3: error ' + e.message['desc']) + assert False + + # Add the COS definition + try: + topology.standalone.add_s(Entry((BRANCH3_COS_DEF, { + 'objectclass': 'top ldapsubentry cosSuperDefinition cosPointerDefinition'.split(), + 'cn': 'cn=nsPwPolicyEntry,ou=level3,dc=example,dc=com', + 'costemplatedn': BRANCH3_COS_TMPL, + 'cosAttribute': 'pwdpolicysubentry default operational-default' + }))) + except ldap.LDAPError as e: + log.error('Failed to add COS def for level3: error ' + e.message['desc']) + assert False + + # + # Add subtree policy to branch 4 + # + # Add the container + try: + topology.standalone.add_s(Entry((BRANCH4_CONTAINER, { + 'objectclass': 'top nsContainer'.split(), + 'cn': 'nsPwPolicyContainer' + }))) + except ldap.LDAPError as e: + log.error('Failed to add subtree container for level3: error ' + e.message['desc']) + assert False + + # Add the password policy subentry + try: + topology.standalone.add_s(Entry((BRANCH4_PWP, { + 'objectclass': 'top ldapsubentry passwordpolicy'.split(), + 'cn': 'cn=nsPwPolicyEntry,ou=people,dc=example,dc=com', + 'passwordMustChange': 'off', + 'passwordExp': 'off', + 'passwordHistory': 'off', + 'passwordMinAge': '0', + 'passwordChange': 'off', + 'passwordStorageScheme': 'ssha' + }))) + except ldap.LDAPError as e: + log.error('Failed to add passwordpolicy for branch4: error ' + e.message['desc']) + assert False + + # Add the COS template + try: + topology.standalone.add_s(Entry((BRANCH4_COS_TMPL, { + 'objectclass': 'top ldapsubentry costemplate extensibleObject'.split(), + 'cn': 'cn=nsPwPolicyEntry,ou=people,dc=example,dc=com', + 'cosPriority': '1', + 'cn': 'cn=nsPwTemplateEntry,ou=people,dc=example,dc=com', + 'pwdpolicysubentry': BRANCH4_PWP + }))) + except ldap.LDAPError as e: + log.error('Failed to add COS template for level3: error ' + e.message['desc']) + assert False + + # Add the COS definition + try: + topology.standalone.add_s(Entry((BRANCH4_COS_DEF, { + 'objectclass': 'top ldapsubentry cosSuperDefinition cosPointerDefinition'.split(), + 'cn': 'cn=nsPwPolicyEntry,ou=people,dc=example,dc=com', + 'costemplatedn': BRANCH4_COS_TMPL, + 'cosAttribute': 'pwdpolicysubentry default operational-default' + }))) + except ldap.LDAPError as e: + log.error('Failed to add COS def for branch4: error ' + e.message['desc']) + assert False + + # + # Add subtree policy to branch 5 + # + # Add the container + try: + topology.standalone.add_s(Entry((BRANCH5_CONTAINER, { + 'objectclass': 'top nsContainer'.split(), + 'cn': 'nsPwPolicyContainer' + }))) + except ldap.LDAPError as e: + log.error('Failed to add subtree container for branch5: error ' + e.message['desc']) + assert False + + # Add the password policy subentry + try: + topology.standalone.add_s(Entry((BRANCH5_PWP, { + 'objectclass': 'top ldapsubentry passwordpolicy'.split(), + 'cn': 'cn=nsPwPolicyEntry,ou=lower,ou=people,dc=example,dc=com', + 'passwordMustChange': 'off', + 'passwordExp': 'off', + 'passwordHistory': 'off', + 'passwordMinAge': '0', + 'passwordChange': 'off', + 'passwordStorageScheme': 'ssha' + }))) + except ldap.LDAPError as e: + log.error('Failed to add passwordpolicy for branch5: error ' + e.message['desc']) + assert False + + # Add the COS template + try: + topology.standalone.add_s(Entry((BRANCH5_COS_TMPL, { + 'objectclass': 'top ldapsubentry costemplate extensibleObject'.split(), + 'cn': 'cn=nsPwPolicyEntry,ou=lower,ou=people,dc=example,dc=com', + 'cosPriority': '1', + 'cn': 'cn=nsPwTemplateEntry,ou=lower,ou=people,dc=example,dc=com', + 'pwdpolicysubentry': BRANCH5_PWP + }))) + except ldap.LDAPError as e: + log.error('Failed to add COS template for branch5: error ' + e.message['desc']) + assert False + + # Add the COS definition + try: + topology.standalone.add_s(Entry((BRANCH5_COS_DEF, { + 'objectclass': 'top ldapsubentry cosSuperDefinition cosPointerDefinition'.split(), + 'cn': 'cn=nsPwPolicyEntry,ou=lower,ou=people,dc=example,dc=com', + 'costemplatedn': BRANCH5_COS_TMPL, + 'cosAttribute': 'pwdpolicysubentry default operational-default' + }))) + except ldap.LDAPError as e: + log.error('Failed to add COS def for level3: error ' + e.message['desc']) + assert False + + # + # Add subtree policy to branch 6 + # + # Add the container + try: + topology.standalone.add_s(Entry((BRANCH6_CONTAINER, { + 'objectclass': 'top nsContainer'.split(), + 'cn': 'nsPwPolicyContainer' + }))) + except ldap.LDAPError as e: + log.error('Failed to add subtree container for branch6: error ' + e.message['desc']) + assert False + + # Add the password policy subentry + try: + topology.standalone.add_s(Entry((BRANCH6_PWP, { + 'objectclass': 'top ldapsubentry passwordpolicy'.split(), + 'cn': 'cn=nsPwPolicyEntry,ou=level3,dc=example,dc=com', + 'passwordMustChange': 'off', + 'passwordExp': 'off', + 'passwordHistory': 'off', + 'passwordMinAge': '0', + 'passwordChange': 'off', + 'passwordStorageScheme': 'ssha' + }))) + except ldap.LDAPError as e: + log.error('Failed to add passwordpolicy for branch6: error ' + e.message['desc']) + assert False + + # Add the COS template + try: + topology.standalone.add_s(Entry((BRANCH6_COS_TMPL, { + 'objectclass': 'top ldapsubentry costemplate extensibleObject'.split(), + 'cn': 'cn=nsPwPolicyEntry,ou=lower,ou=lower,ou=people,dc=example,dc=com', + 'cosPriority': '1', + 'cn': 'cn=nsPwTemplateEntry,ou=lower,ou=lower,ou=people,dc=example,dc=com', + 'pwdpolicysubentry': BRANCH6_PWP + }))) + except ldap.LDAPError as e: + log.error('Failed to add COS template for branch6: error ' + e.message['desc']) + assert False + + # Add the COS definition + try: + topology.standalone.add_s(Entry((BRANCH6_COS_DEF, { + 'objectclass': 'top ldapsubentry cosSuperDefinition cosPointerDefinition'.split(), + 'cn': 'cn=nsPwPolicyEntry,ou=lower,ou=lower,ou=people,dc=example,dc=com', + 'costemplatedn': BRANCH6_COS_TMPL, + 'cosAttribute': 'pwdpolicysubentry default operational-default' + }))) + except ldap.LDAPError as e: + log.error('Failed to add COS def for branch6: error ' + e.message['desc']) + assert False + + time.sleep(2) + + # + # Now check that each user has its expected passwordPolicy subentry + # + try: + entries = topology.standalone.search_s(USER1_DN, ldap.SCOPE_BASE, '(objectclass=top)', ['pwdpolicysubentry']) + if not entries[0].hasValue('pwdpolicysubentry', BRANCH1_PWP): + log.fatal('User %s does not have expected pwdpolicysubentry!') + assert False + except ldap.LDAPError as e: + log.fatal('Unable to search for entry %s: error %s' % (USER1_DN, e.message['desc'])) + assert False + + try: + entries = topology.standalone.search_s(USER2_DN, ldap.SCOPE_BASE, '(objectclass=top)', ['pwdpolicysubentry']) + if not entries[0].hasValue('pwdpolicysubentry', BRANCH2_PWP): + log.fatal('User %s does not have expected pwdpolicysubentry!' % USER2_DN) + assert False + except ldap.LDAPError as e: + log.fatal('Unable to search for entry %s: error %s' % (USER2_DN, e.message['desc'])) + assert False + + try: + entries = topology.standalone.search_s(USER3_DN, ldap.SCOPE_BASE, '(objectclass=top)', ['pwdpolicysubentry']) + if not entries[0].hasValue('pwdpolicysubentry', BRANCH3_PWP): + log.fatal('User %s does not have expected pwdpolicysubentry!' % USER3_DN) + assert False + except ldap.LDAPError as e: + log.fatal('Unable to search for entry %s: error %s' % (USER3_DN, e.message['desc'])) + assert False + + try: + entries = topology.standalone.search_s(USER4_DN, ldap.SCOPE_BASE, '(objectclass=top)', ['pwdpolicysubentry']) + if not entries[0].hasValue('pwdpolicysubentry', BRANCH4_PWP): + log.fatal('User %s does not have expected pwdpolicysubentry!' % USER4_DN) + assert False + except ldap.LDAPError as e: + log.fatal('Unable to search for entry %s: error %s' % (USER4_DN, e.message['desc'])) + assert False + + try: + entries = topology.standalone.search_s(USER5_DN, ldap.SCOPE_BASE, '(objectclass=top)', ['pwdpolicysubentry']) + if not entries[0].hasValue('pwdpolicysubentry', BRANCH5_PWP): + log.fatal('User %s does not have expected pwdpolicysubentry!' % USER5_DN) + assert False + except ldap.LDAPError as e: + log.fatal('Unable to search for entry %s: error %s' % (USER5_DN, e.message['desc'])) + assert False + + try: + entries = topology.standalone.search_s(USER6_DN, ldap.SCOPE_BASE, '(objectclass=top)', ['pwdpolicysubentry']) + if not entries[0].hasValue('pwdpolicysubentry', BRANCH6_PWP): + log.fatal('User %s does not have expected pwdpolicysubentry!' % USER6_DN) + assert False + except ldap.LDAPError as e: + log.fatal('Unable to search for entry %s: error %s' % (USER6_DN, e.message['desc'])) + assert False + + +def test_ticket47980_final(topology): + topology.standalone.delete() + log.info('Testcase PASSED') + + +def run_isolated(): + ''' + run_isolated is used to run these test cases independently of a test scheduler (xunit, py.test..) + To run isolated without py.test, you need to + - edit this file and comment '@pytest.fixture' line before 'topology' function. + - set the installation prefix + - run this program + ''' + global installation_prefix + installation_prefix = None + + topo = topology(True) + test_ticket47980(topo) + test_ticket47980_final(topo) + + +if __name__ == '__main__': + run_isolated() diff --git a/dirsrvtests/tests/tickets/ticket47981_test.py b/dirsrvtests/tests/tickets/ticket47981_test.py new file mode 100644 index 0000000..b25d7dd --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket47981_test.py @@ -0,0 +1,295 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2015 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import os +import sys +import time +import ldap +import ldap.sasl +import logging +import pytest +from lib389 import DirSrv, Entry, tools, tasks +from lib389.tools import DirSrvTools +from lib389._constants import * +from lib389.properties import * +from lib389.tasks import * + +log = logging.getLogger(__name__) + +installation_prefix = None + +BRANCH = 'ou=people,' + DEFAULT_SUFFIX +USER_DN = 'uid=user1,%s' % (BRANCH) +BRANCH_CONTAINER = 'cn=nsPwPolicyContainer,ou=people,dc=example,dc=com' +BRANCH_COS_DEF = 'cn=nsPwPolicy_CoS,ou=people,dc=example,dc=com' +BRANCH_PWP = 'cn=cn\\3DnsPwPolicyEntry\\2Cou\\3DPeople\\2Cdc\\3Dexample\\2Cdc\\3Dcom,' + \ + 'cn=nsPwPolicyContainer,ou=People,dc=example,dc=com' +BRANCH_COS_TMPL = 'cn=cn\\3DnsPwTemplateEntry\\2Cou\\3DPeople\\2Cdc\\3Dexample\\2Cdc\\3Dcom,' + \ + 'cn=nsPwPolicyContainer,ou=People,dc=example,dc=com' +SECOND_SUFFIX = 'o=netscaperoot' +BE_NAME = 'netscaperoot' + + +class TopologyStandalone(object): + def __init__(self, standalone): + standalone.open() + self.standalone = standalone + + +@pytest.fixture(scope="module") +def topology(request): + ''' + This fixture is used to standalone topology for the 'module'. + ''' + global installation_prefix + + if installation_prefix: + args_instance[SER_DEPLOYED_DIR] = installation_prefix + + standalone = DirSrv(verbose=False) + + # Args for the standalone instance + args_instance[SER_HOST] = HOST_STANDALONE + args_instance[SER_PORT] = PORT_STANDALONE + args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE + args_standalone = args_instance.copy() + standalone.allocate(args_standalone) + + # Get the status of the instance and restart it if it exists + instance_standalone = standalone.exists() + + # Remove the instance + if instance_standalone: + standalone.delete() + + # Create the instance + standalone.create() + + # Used to retrieve configuration information (dbdir, confdir...) + standalone.open() + + # clear the tmp directory + standalone.clearTmpDir(__file__) + + # Here we have standalone instance up and running + return TopologyStandalone(standalone) + + +def addSubtreePwPolicy(inst): + # + # Add subtree policy to the people branch + # + try: + inst.add_s(Entry((BRANCH_CONTAINER, { + 'objectclass': 'top nsContainer'.split(), + 'cn': 'nsPwPolicyContainer' + }))) + except ldap.LDAPError as e: + log.error('Failed to add subtree container for ou=people: error ' + e.message['desc']) + assert False + + # Add the password policy subentry + try: + inst.add_s(Entry((BRANCH_PWP, { + 'objectclass': 'top ldapsubentry passwordpolicy'.split(), + 'cn': 'cn=nsPwPolicyEntry,ou=people,dc=example,dc=com', + 'passwordMustChange': 'off', + 'passwordExp': 'off', + 'passwordHistory': 'off', + 'passwordMinAge': '0', + 'passwordChange': 'off', + 'passwordStorageScheme': 'ssha' + }))) + except ldap.LDAPError as e: + log.error('Failed to add passwordpolicy: error ' + e.message['desc']) + assert False + + # Add the COS template + try: + inst.add_s(Entry((BRANCH_COS_TMPL, { + 'objectclass': 'top ldapsubentry costemplate extensibleObject'.split(), + 'cn': 'cn=nsPwPolicyEntry,ou=people,dc=example,dc=com', + 'cosPriority': '1', + 'cn': 'cn=nsPwTemplateEntry,ou=people,dc=example,dc=com', + 'pwdpolicysubentry': BRANCH_PWP + }))) + except ldap.LDAPError as e: + log.error('Failed to add COS template: error ' + e.message['desc']) + assert False + + # Add the COS definition + try: + inst.add_s(Entry((BRANCH_COS_DEF, { + 'objectclass': 'top ldapsubentry cosSuperDefinition cosPointerDefinition'.split(), + 'cn': 'cn=nsPwPolicyEntry,ou=people,dc=example,dc=com', + 'costemplatedn': BRANCH_COS_TMPL, + 'cosAttribute': 'pwdpolicysubentry default operational-default' + }))) + except ldap.LDAPError as e: + log.error('Failed to add COS def: error ' + e.message['desc']) + assert False + time.sleep(0.5) + + +def delSubtreePwPolicy(inst): + try: + inst.delete_s(BRANCH_COS_DEF) + except ldap.LDAPError as e: + log.error('Failed to delete COS def: error ' + e.message['desc']) + assert False + + try: + inst.delete_s(BRANCH_COS_TMPL) + except ldap.LDAPError as e: + log.error('Failed to delete COS template: error ' + e.message['desc']) + assert False + + try: + inst.delete_s(BRANCH_PWP) + except ldap.LDAPError as e: + log.error('Failed to delete COS password policy: error ' + e.message['desc']) + assert False + + try: + inst.delete_s(BRANCH_CONTAINER) + except ldap.LDAPError as e: + log.error('Failed to delete COS container: error ' + e.message['desc']) + assert False + time.sleep(0.5) + + +def test_ticket47981(topology): + """ + If there are multiple suffixes, and the last suffix checked does not contain any COS entries, + while other suffixes do, then the vattr cache is not invalidated as it should be. Then any + cached entries will still contain the old COS attributes/values. + """ + + log.info('Testing Ticket 47981 - Test that COS def changes are correctly reflected in affected users') + + # + # Create a second backend that does not have any COS entries + # + log.info('Adding second suffix that will not contain any COS entries...\n') + + topology.standalone.backend.create(SECOND_SUFFIX, {BACKEND_NAME: BE_NAME}) + topology.standalone.mappingtree.create(SECOND_SUFFIX, bename=BE_NAME) + try: + topology.standalone.add_s(Entry((SECOND_SUFFIX, { + 'objectclass': 'top organization'.split(), + 'o': BE_NAME}))) + except ldap.ALREADY_EXISTS: + pass + except ldap.LDAPError as e: + log.error('Failed to create suffix entry: error ' + e.message['desc']) + assert False + + # + # Add People branch, it might already exist + # + log.info('Add our test entries to the default suffix, and proceed with the test...') + + try: + topology.standalone.add_s(Entry((BRANCH, { + 'objectclass': 'top extensibleObject'.split(), + 'ou': 'level4' + }))) + except ldap.ALREADY_EXISTS: + pass + except ldap.LDAPError as e: + log.error('Failed to add ou=people: error ' + e.message['desc']) + assert False + + # + # Add a user to the branch + # + try: + topology.standalone.add_s(Entry((USER_DN, { + 'objectclass': 'top extensibleObject'.split(), + 'uid': 'user1' + }))) + except ldap.LDAPError as e: + log.error('Failed to add user1: error ' + e.message['desc']) + assert False + + # + # Enable password policy and add the subtree policy + # + try: + topology.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-pwpolicy-local', 'on')]) + except ldap.LDAPError as e: + log.error('Failed to set pwpolicy-local: error ' + e.message['desc']) + assert False + + addSubtreePwPolicy(topology.standalone) + + # + # Now check the user has its expected passwordPolicy subentry + # + try: + entries = topology.standalone.search_s(USER_DN, + ldap.SCOPE_BASE, + '(objectclass=top)', + ['pwdpolicysubentry', 'dn']) + if not entries[0].hasAttr('pwdpolicysubentry'): + log.fatal('User does not have expected pwdpolicysubentry!') + assert False + except ldap.LDAPError as e: + log.fatal('Unable to search for entry %s: error %s' % (USER_DN, e.message['desc'])) + assert False + + # + # Delete the password policy and make sure it is removed from the same user + # + delSubtreePwPolicy(topology.standalone) + try: + entries = topology.standalone.search_s(USER_DN, ldap.SCOPE_BASE, '(objectclass=top)', ['pwdpolicysubentry']) + if entries[0].hasAttr('pwdpolicysubentry'): + log.fatal('User unexpectedly does have the pwdpolicysubentry!') + assert False + except ldap.LDAPError as e: + log.fatal('Unable to search for entry %s: error %s' % (USER_DN, e.message['desc'])) + assert False + + # + # Add the subtree policvy back and see if the user now has it + # + addSubtreePwPolicy(topology.standalone) + try: + entries = topology.standalone.search_s(USER_DN, ldap.SCOPE_BASE, '(objectclass=top)', ['pwdpolicysubentry']) + if not entries[0].hasAttr('pwdpolicysubentry'): + log.fatal('User does not have expected pwdpolicysubentry!') + assert False + except ldap.LDAPError as e: + log.fatal('Unable to search for entry %s: error %s' % (USER_DN, e.message['desc'])) + assert False + + +def test_ticket47981_final(topology): + topology.standalone.delete() + log.info('Testcase PASSED') + + +def run_isolated(): + ''' + run_isolated is used to run these test cases independently of a test scheduler (xunit, py.test..) + To run isolated without py.test, you need to + - edit this file and comment '@pytest.fixture' line before 'topology' function. + - set the installation prefix + - run this program + ''' + global installation_prefix + installation_prefix = None + + topo = topology(True) + test_ticket47981(topo) + test_ticket47981_final(topo) + + +if __name__ == '__main__': + run_isolated() diff --git a/dirsrvtests/tests/tickets/ticket47988_test.py b/dirsrvtests/tests/tickets/ticket47988_test.py new file mode 100644 index 0000000..db58e9d --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket47988_test.py @@ -0,0 +1,503 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2015 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +''' +Created on Nov 7, 2013 + +@author: tbordaz +''' +import os +import sys +import time +import ldap +import logging +import pytest +import tarfile +import stat +import shutil +from random import randint +from lib389 import DirSrv, Entry, tools +from lib389.tools import DirSrvTools +from lib389._constants import * +from lib389.properties import * + + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + +# +# important part. We can deploy Master1 and Master2 on different versions +# +installation1_prefix = None +installation2_prefix = None + +TEST_REPL_DN = "cn=test_repl, %s" % SUFFIX +OC_NAME = 'OCticket47988' +MUST = "(postalAddress $ postalCode)" +MAY = "(member $ street)" + +OTHER_NAME = 'other_entry' +MAX_OTHERS = 10 + +BIND_NAME = 'bind_entry' +BIND_DN = 'cn=%s, %s' % (BIND_NAME, SUFFIX) +BIND_PW = 'password' + +ENTRY_NAME = 'test_entry' +ENTRY_DN = 'cn=%s, %s' % (ENTRY_NAME, SUFFIX) +ENTRY_OC = "top person %s" % OC_NAME + +def _oc_definition(oid_ext, name, must=None, may=None): + oid = "1.2.3.4.5.6.7.8.9.10.%d" % oid_ext + desc = 'To test ticket 47490' + sup = 'person' + if not must: + must = MUST + if not may: + may = MAY + + new_oc = "( %s NAME '%s' DESC '%s' SUP %s AUXILIARY MUST %s MAY %s )" % (oid, name, desc, sup, must, may) + return new_oc +class TopologyMaster1Master2(object): + def __init__(self, master1, master2): + master1.open() + self.master1 = master1 + + master2.open() + self.master2 = master2 + + +@pytest.fixture(scope="module") +def topology(request): + ''' + This fixture is used to create a replicated topology for the 'module'. + The replicated topology is MASTER1 <-> Master2. + ''' + global installation1_prefix + global installation2_prefix + + #os.environ['USE_VALGRIND'] = '1' + + # allocate master1 on a given deployement + master1 = DirSrv(verbose=False) + if installation1_prefix: + args_instance[SER_DEPLOYED_DIR] = installation1_prefix + + # Args for the master1 instance + args_instance[SER_HOST] = HOST_MASTER_1 + args_instance[SER_PORT] = PORT_MASTER_1 + args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1 + args_master = args_instance.copy() + master1.allocate(args_master) + + # allocate master1 on a given deployement + master2 = DirSrv(verbose=False) + if installation2_prefix: + args_instance[SER_DEPLOYED_DIR] = installation2_prefix + + # Args for the consumer instance + args_instance[SER_HOST] = HOST_MASTER_2 + args_instance[SER_PORT] = PORT_MASTER_2 + args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_2 + args_master = args_instance.copy() + master2.allocate(args_master) + + # Get the status of the instance and restart it if it exists + instance_master1 = master1.exists() + instance_master2 = master2.exists() + + # Remove all the instances + if instance_master1: + master1.delete() + if instance_master2: + master2.delete() + + # Create the instances + master1.create() + master1.open() + master2.create() + master2.open() + + # + # Now prepare the Master-Consumer topology + # + # First Enable replication + master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_1) + master2.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_2) + + # Initialize the supplier->consumer + + properties = {RA_NAME: r'meTo_$host:$port', + RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], + RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], + RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], + RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} + repl_agreement = master1.agreement.create(suffix=SUFFIX, host=master2.host, port=master2.port, properties=properties) + + if not repl_agreement: + log.fatal("Fail to create a replica agreement") + sys.exit(1) + + log.debug("%s created" % repl_agreement) + + properties = {RA_NAME: r'meTo_$host:$port', + RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], + RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], + RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], + RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} + master2.agreement.create(suffix=SUFFIX, host=master1.host, port=master1.port, properties=properties) + + master1.agreement.init(SUFFIX, HOST_MASTER_2, PORT_MASTER_2) + master1.waitForReplInit(repl_agreement) + + # Check replication is working fine + if master1.testReplication(DEFAULT_SUFFIX, master2): + log.info('Replication is working.') + else: + log.fatal('Replication is not working.') + assert False + + # Here we have two instances master and consumer + return TopologyMaster1Master2(master1, master2) + + +def _header(topology, label): + topology.master1.log.info("\n\n###############################################") + topology.master1.log.info("#######") + topology.master1.log.info("####### %s" % label) + topology.master1.log.info("#######") + topology.master1.log.info("###################################################") + + +def _install_schema(server, tarFile): + server.stop(timeout=10) + + tmpSchema = '/tmp/schema_47988' + if not os.path.isdir(tmpSchema): + os.mkdir(tmpSchema) + + for the_file in os.listdir(tmpSchema): + file_path = os.path.join(tmpSchema, the_file) + if os.path.isfile(file_path): + os.unlink(file_path) + + os.chdir(tmpSchema) + tar = tarfile.open(tarFile, 'r:gz') + for member in tar.getmembers(): + tar.extract(member.name) + + tar.close() + + st = os.stat(server.schemadir) + os.chmod(server.schemadir, st.st_mode | stat.S_IWUSR | stat.S_IXUSR | stat.S_IRUSR) + for the_file in os.listdir(tmpSchema): + schemaFile = os.path.join(server.schemadir, the_file) + if os.path.isfile(schemaFile): + if the_file.startswith('99user.ldif'): + # only replace 99user.ldif, the other standard definition are kept + os.chmod(schemaFile, stat.S_IWUSR | stat.S_IRUSR) + server.log.info("replace %s" % schemaFile) + shutil.copy(the_file, schemaFile) + + else: + server.log.info("add %s" % schemaFile) + shutil.copy(the_file, schemaFile) + os.chmod(schemaFile, stat.S_IRUSR | stat.S_IRGRP) + os.chmod(server.schemadir, st.st_mode | stat.S_IRUSR | stat.S_IRGRP) + + +def test_ticket47988_init(topology): + """ + It adds + - Objectclass with MAY 'member' + - an entry ('bind_entry') with which we bind to test the 'SELFDN' operation + It deletes the anonymous aci + + """ + + _header(topology, 'test_ticket47988_init') + + # enable acl error logging + mod = [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', str(8192))] # REPL + topology.master1.modify_s(DN_CONFIG, mod) + topology.master2.modify_s(DN_CONFIG, mod) + + mod = [(ldap.MOD_REPLACE, 'nsslapd-accesslog-level', str(260))] # Internal op + topology.master1.modify_s(DN_CONFIG, mod) + topology.master2.modify_s(DN_CONFIG, mod) + + # add dummy entries + for cpt in range(MAX_OTHERS): + name = "%s%d" % (OTHER_NAME, cpt) + topology.master1.add_s(Entry(("cn=%s,%s" % (name, SUFFIX), { + 'objectclass': "top person".split(), + 'sn': name, + 'cn': name}))) + + # check that entry 0 is replicated before + loop = 0 + entryDN = "cn=%s0,%s" % (OTHER_NAME, SUFFIX) + while loop <= 10: + try: + ent = topology.master2.getEntry(entryDN, ldap.SCOPE_BASE, "(objectclass=*)", ['telephonenumber']) + break + except ldap.NO_SUCH_OBJECT: + time.sleep(1) + loop += 1 + assert (loop <= 10) + + topology.master1.stop(timeout=10) + topology.master2.stop(timeout=10) + + #install the specific schema M1: ipa3.3, M2: ipa4.1 + schema_file = os.path.join(topology.master1.getDir(__file__, DATA_DIR), "ticket47988/schema_ipa3.3.tar.gz") + _install_schema(topology.master1, schema_file) + schema_file = os.path.join(topology.master1.getDir(__file__, DATA_DIR), "ticket47988/schema_ipa4.1.tar.gz") + _install_schema(topology.master2, schema_file) + + topology.master1.start(timeout=10) + topology.master2.start(timeout=10) + + +def _do_update_schema(server, range=3999): + ''' + Update the schema of the M2 (IPA4.1). to generate a nsSchemaCSN + ''' + postfix = str(randint(range, range + 1000)) + OID = '2.16.840.1.113730.3.8.12.%s' % postfix + NAME = 'thierry%s' % postfix + value = '( %s NAME \'%s\' DESC \'Override for Group Attributes\' STRUCTURAL MUST ( cn ) MAY sn X-ORIGIN ( \'IPA v4.1.2\' \'user defined\' ) )' % (OID, NAME) + mod = [(ldap.MOD_ADD, 'objectclasses', value)] + server.modify_s('cn=schema', mod) + + +def _do_update_entry(supplier=None, consumer=None, attempts=10): + ''' + This is doing an update on M2 (IPA4.1) and checks the update has been + propagated to M1 (IPA3.3) + ''' + assert(supplier) + assert(consumer) + entryDN = "cn=%s0,%s" % (OTHER_NAME, SUFFIX) + value = str(randint(100, 200)) + mod = [(ldap.MOD_REPLACE, 'telephonenumber', value)] + supplier.modify_s(entryDN, mod) + + loop = 0 + while loop <= attempts: + ent = consumer.getEntry(entryDN, ldap.SCOPE_BASE, "(objectclass=*)", ['telephonenumber']) + read_val = ent.telephonenumber or "0" + if read_val == value: + break + # the expected value is not yet replicated. try again + time.sleep(5) + loop += 1 + supplier.log.debug("test_do_update: receive %s (expected %s)" % (read_val, value)) + assert (loop <= attempts) + + +def _pause_M2_to_M1(topology): + topology.master1.log.info("\n\n######################### Pause RA M2->M1 ######################\n") + ents = topology.master2.agreement.list(suffix=SUFFIX) + assert len(ents) == 1 + topology.master2.agreement.pause(ents[0].dn) + + +def _resume_M1_to_M2(topology): + topology.master1.log.info("\n\n######################### resume RA M1->M2 ######################\n") + ents = topology.master1.agreement.list(suffix=SUFFIX) + assert len(ents) == 1 + topology.master1.agreement.resume(ents[0].dn) + + +def _pause_M1_to_M2(topology): + topology.master1.log.info("\n\n######################### Pause RA M1->M2 ######################\n") + ents = topology.master1.agreement.list(suffix=SUFFIX) + assert len(ents) == 1 + topology.master1.agreement.pause(ents[0].dn) + + +def _resume_M2_to_M1(topology): + topology.master1.log.info("\n\n######################### resume RA M2->M1 ######################\n") + ents = topology.master2.agreement.list(suffix=SUFFIX) + assert len(ents) == 1 + topology.master2.agreement.resume(ents[0].dn) + + +def test_ticket47988_1(topology): + ''' + Check that replication is working and pause replication M2->M1 + ''' + _header(topology, 'test_ticket47988_1') + + topology.master1.log.debug("\n\nCheck that replication is working and pause replication M2->M1\n") + _do_update_entry(supplier=topology.master2, consumer=topology.master1, attempts=5) + _pause_M2_to_M1(topology) + + +def test_ticket47988_2(topology): + ''' + Update M1 schema and trigger update M1->M2 + So M1 should learn new/extended definitions that are in M2 schema + ''' + _header(topology, 'test_ticket47988_2') + + topology.master1.log.debug("\n\nUpdate M1 schema and an entry on M1\n") + master1_schema_csn = topology.master1.schema.get_schema_csn() + master2_schema_csn = topology.master2.schema.get_schema_csn() + topology.master1.log.debug("\nBefore updating the schema on M1\n") + topology.master1.log.debug("Master1 nsschemaCSN: %s" % master1_schema_csn) + topology.master1.log.debug("Master2 nsschemaCSN: %s" % master2_schema_csn) + + # Here M1 should no, should check M2 schema and learn + _do_update_schema(topology.master1) + master1_schema_csn = topology.master1.schema.get_schema_csn() + master2_schema_csn = topology.master2.schema.get_schema_csn() + topology.master1.log.debug("\nAfter updating the schema on M1\n") + topology.master1.log.debug("Master1 nsschemaCSN: %s" % master1_schema_csn) + topology.master1.log.debug("Master2 nsschemaCSN: %s" % master2_schema_csn) + assert (master1_schema_csn) + + # to avoid linger effect where a replication session is reused without checking the schema + _pause_M1_to_M2(topology) + _resume_M1_to_M2(topology) + + #topo.master1.log.debug("\n\nSleep.... attach the debugger dse_modify") + #time.sleep(60) + _do_update_entry(supplier=topology.master1, consumer=topology.master2, attempts=15) + master1_schema_csn = topology.master1.schema.get_schema_csn() + master2_schema_csn = topology.master2.schema.get_schema_csn() + topology.master1.log.debug("\nAfter a full replication session\n") + topology.master1.log.debug("Master1 nsschemaCSN: %s" % master1_schema_csn) + topology.master1.log.debug("Master2 nsschemaCSN: %s" % master2_schema_csn) + assert (master1_schema_csn) + assert (master2_schema_csn) + + +def test_ticket47988_3(topology): + ''' + Resume replication M2->M1 and check replication is still working + ''' + _header(topology, 'test_ticket47988_3') + + _resume_M2_to_M1(topology) + _do_update_entry(supplier=topology.master1, consumer=topology.master2, attempts=5) + _do_update_entry(supplier=topology.master2, consumer=topology.master1, attempts=5) + + +def test_ticket47988_4(topology): + ''' + Check schemaCSN is identical on both server + And save the nsschemaCSN to later check they do not change unexpectedly + ''' + _header(topology, 'test_ticket47988_4') + + master1_schema_csn = topology.master1.schema.get_schema_csn() + master2_schema_csn = topology.master2.schema.get_schema_csn() + topology.master1.log.debug("\n\nMaster1 nsschemaCSN: %s" % master1_schema_csn) + topology.master1.log.debug("\n\nMaster2 nsschemaCSN: %s" % master2_schema_csn) + assert (master1_schema_csn) + assert (master2_schema_csn) + assert (master1_schema_csn == master2_schema_csn) + + topology.master1.saved_schema_csn = master1_schema_csn + topology.master2.saved_schema_csn = master2_schema_csn + + +def test_ticket47988_5(topology): + ''' + Check schemaCSN do not change unexpectedly + ''' + _header(topology, 'test_ticket47988_5') + + _do_update_entry(supplier=topology.master1, consumer=topology.master2, attempts=5) + _do_update_entry(supplier=topology.master2, consumer=topology.master1, attempts=5) + master1_schema_csn = topology.master1.schema.get_schema_csn() + master2_schema_csn = topology.master2.schema.get_schema_csn() + topology.master1.log.debug("\n\nMaster1 nsschemaCSN: %s" % master1_schema_csn) + topology.master1.log.debug("\n\nMaster2 nsschemaCSN: %s" % master2_schema_csn) + assert (master1_schema_csn) + assert (master2_schema_csn) + assert (master1_schema_csn == master2_schema_csn) + + assert (topology.master1.saved_schema_csn == master1_schema_csn) + assert (topology.master2.saved_schema_csn == master2_schema_csn) + + +def test_ticket47988_6(topology): + ''' + Update M1 schema and trigger update M2->M1 + So M2 should learn new/extended definitions that are in M1 schema + ''' + + _header(topology, 'test_ticket47988_6') + + topology.master1.log.debug("\n\nUpdate M1 schema and an entry on M1\n") + master1_schema_csn = topology.master1.schema.get_schema_csn() + master2_schema_csn = topology.master2.schema.get_schema_csn() + topology.master1.log.debug("\nBefore updating the schema on M1\n") + topology.master1.log.debug("Master1 nsschemaCSN: %s" % master1_schema_csn) + topology.master1.log.debug("Master2 nsschemaCSN: %s" % master2_schema_csn) + + # Here M1 should no, should check M2 schema and learn + _do_update_schema(topology.master1, range=5999) + master1_schema_csn = topology.master1.schema.get_schema_csn() + master2_schema_csn = topology.master2.schema.get_schema_csn() + topology.master1.log.debug("\nAfter updating the schema on M1\n") + topology.master1.log.debug("Master1 nsschemaCSN: %s" % master1_schema_csn) + topology.master1.log.debug("Master2 nsschemaCSN: %s" % master2_schema_csn) + assert (master1_schema_csn) + + # to avoid linger effect where a replication session is reused without checking the schema + _pause_M1_to_M2(topology) + _resume_M1_to_M2(topology) + + #topo.master1.log.debug("\n\nSleep.... attach the debugger dse_modify") + #time.sleep(60) + _do_update_entry(supplier=topology.master2, consumer=topology.master1, attempts=15) + master1_schema_csn = topology.master1.schema.get_schema_csn() + master2_schema_csn = topology.master2.schema.get_schema_csn() + topology.master1.log.debug("\nAfter a full replication session\n") + topology.master1.log.debug("Master1 nsschemaCSN: %s" % master1_schema_csn) + topology.master1.log.debug("Master2 nsschemaCSN: %s" % master2_schema_csn) + assert (master1_schema_csn) + assert (master2_schema_csn) + + +def test_ticket47988_final(topology): + topology.master1.delete() + topology.master2.delete() + log.info('Testcase PASSED') + + +def run_isolated(): + ''' + run_isolated is used to run these test cases independently of a test scheduler (xunit, py.test..) + To run isolated without py.test, you need to + - edit this file and comment '@pytest.fixture' line before 'topology' function. + - set the installation prefix + - run this program + ''' + global installation1_prefix + global installation2_prefix + installation1_prefix = None + installation2_prefix = None + + topo = topology(True) + test_ticket47988_init(topo) + test_ticket47988_1(topo) + test_ticket47988_2(topo) + test_ticket47988_3(topo) + test_ticket47988_4(topo) + test_ticket47988_5(topo) + test_ticket47988_6(topo) + test_ticket47988_final(topo) + +if __name__ == '__main__': + run_isolated() + diff --git a/dirsrvtests/tests/tickets/ticket48005_test.py b/dirsrvtests/tests/tickets/ticket48005_test.py new file mode 100644 index 0000000..b2a93e1 --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket48005_test.py @@ -0,0 +1,415 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2015 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import os +import sys +import time +import ldap +import logging +import pytest +import re +from lib389 import DirSrv, Entry, tools, tasks +from lib389.tools import DirSrvTools +from lib389._constants import * +from lib389.properties import * +from lib389.tasks import * + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + +installation1_prefix = None + +class TopologyStandalone(object): + def __init__(self, standalone): + standalone.open() + self.standalone = standalone + + +@pytest.fixture(scope="module") +def topology(request): + global installation1_prefix + if installation1_prefix: + args_instance[SER_DEPLOYED_DIR] = installation1_prefix + + # Creating standalone instance ... + standalone = DirSrv(verbose=False) + args_instance[SER_HOST] = HOST_STANDALONE + args_instance[SER_PORT] = PORT_STANDALONE + args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE + args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX + args_standalone = args_instance.copy() + standalone.allocate(args_standalone) + instance_standalone = standalone.exists() + if instance_standalone: + standalone.delete() + standalone.create() + standalone.open() + + # Clear out the tmp dir + standalone.clearTmpDir(__file__) + + return TopologyStandalone(standalone) + + +def test_ticket48005_setup(topology): + ''' + allow dump core + generate a test ldif file using dbgen.pl + import the ldif + ''' + log.info("Ticket 48005 setup...") + if hasattr(topology.standalone, 'prefix'): + prefix = topology.standalone.prefix + else: + prefix = None + sysconfig_dirsrv = prefix + ENV_SYSCONFIG_DIR + "/dirsrv" + cmdline = 'egrep "ulimit -c unlimited" %s' % sysconfig_dirsrv + p = os.popen(cmdline, "r") + ulimitc = p.readline() + if ulimitc == "": + log.info('No ulimit -c in %s' % sysconfig_dirsrv) + log.info('Adding it') + cmdline = 'echo "ulimit -c unlimited" >> %s' % sysconfig_dirsrv + + sysconfig_dirsrv_systemd = sysconfig_dirsrv + ".systemd" + cmdline = 'egrep LimitCORE=infinity %s' % sysconfig_dirsrv_systemd + p = os.popen(cmdline, "r") + lcore = p.readline() + if lcore == "": + log.info('No LimitCORE in %s' % sysconfig_dirsrv_systemd) + log.info('Adding it') + cmdline = 'echo LimitCORE=infinity >> %s' % sysconfig_dirsrv_systemd + + topology.standalone.restart(timeout=10) + + ldif_file = topology.standalone.getDir(__file__, DATA_DIR) + "ticket48005.ldif" + os.system('ls %s' % ldif_file) + os.system('rm -f %s' % ldif_file) + if hasattr(topology.standalone, 'prefix'): + prefix = topology.standalone.prefix + else: + prefix = None + dbgen_prog = prefix + '/bin/dbgen.pl' + log.info('dbgen_prog: %s' % dbgen_prog) + os.system('%s -s %s -o %s -u -n 10000' % (dbgen_prog, SUFFIX, ldif_file)) + cmdline = 'egrep dn: %s | wc -l' % ldif_file + p = os.popen(cmdline, "r") + dnnumstr = p.readline() + num = int(dnnumstr) + log.info("We have %d entries.\n", num) + + importTask = Tasks(topology.standalone) + args = {TASK_WAIT: True} + importTask.importLDIF(SUFFIX, None, ldif_file, args) + log.info('Importing %s complete.' % ldif_file) + + +def test_ticket48005_memberof(topology): + ''' + Enable memberof and referint plugin + Run fixmemberof task without waiting + Shutdown the server + Check if a core file was generated or not + If no core was found, this test case was successful. + ''' + log.info("Ticket 48005 memberof test...") + topology.standalone.plugins.enable(name=PLUGIN_MEMBER_OF) + topology.standalone.plugins.enable(name=PLUGIN_REFER_INTEGRITY) + + topology.standalone.restart(timeout=10) + + try: + # run the fixup task + topology.standalone.tasks.fixupMemberOf(suffix=SUFFIX, args={TASK_WAIT: False}) + except ValueError: + log.error('Some problem occured with a value that was provided') + assert False + + topology.standalone.stop(timeout=10) + + mytmp = topology.standalone.getDir(__file__, TMP_DIR) + logdir = re.sub('errors', '', topology.standalone.errlog) + cmdline = 'ls ' + logdir + 'core*' + p = os.popen(cmdline, "r") + lcore = p.readline() + if lcore != "": + s.system('mv %score* %s/core.ticket48005_memberof' % (logdir, mytmp)) + log.error('FixMemberof: Moved core file(s) to %s; Test failed' % mytmp) + assert False + log.info('No core files are found') + + topology.standalone.start(timeout=10) + + topology.standalone.plugins.disable(name=PLUGIN_REFER_INTEGRITY) + topology.standalone.plugins.disable(name=PLUGIN_MEMBER_OF) + + topology.standalone.restart(timeout=10) + + log.info("Ticket 48005 memberof test complete") + + +def test_ticket48005_automember(topology): + ''' + Enable automember and referint plugin + 1. Run automember rebuild membership task without waiting + Shutdown the server + Check if a core file was generated or not + If no core was found, this test case was successful. + 2. Run automember export updates task without waiting + Shutdown the server + Check if a core file was generated or not + If no core was found, this test case was successful. + 3. Run automember map updates task without waiting + Shutdown the server + Check if a core file was generated or not + If no core was found, this test case was successful. + ''' + log.info("Ticket 48005 automember test...") + topology.standalone.plugins.enable(name=PLUGIN_AUTOMEMBER) + topology.standalone.plugins.enable(name=PLUGIN_REFER_INTEGRITY) + + # configure automember config entry + log.info('Adding automember config') + try: + topology.standalone.add_s(Entry(('cn=group cfg,cn=Auto Membership Plugin,cn=plugins,cn=config', { + 'objectclass': 'top autoMemberDefinition'.split(), + 'autoMemberScope': 'dc=example,dc=com', + 'autoMemberFilter': 'objectclass=inetorgperson', + 'autoMemberDefaultGroup': 'cn=group0,dc=example,dc=com', + 'autoMemberGroupingAttr': 'uniquemember:dn', + 'cn': 'group cfg'}))) + except ValueError: + log.error('Failed to add automember config') + assert False + + topology.standalone.restart(timeout=10) + + try: + # run the automember rebuild task + topology.standalone.tasks.automemberRebuild(suffix=SUFFIX, args={TASK_WAIT: False}) + except ValueError: + log.error('Automember rebuild task failed.') + assert False + + topology.standalone.stop(timeout=10) + + mytmp = topology.standalone.getDir(__file__, TMP_DIR) + logdir = re.sub('errors', '', topology.standalone.errlog) + cmdline = 'ls ' + logdir + 'core*' + p = os.popen(cmdline, "r") + lcore = p.readline() + if lcore != "": + s.system('mv %score* %s/core.ticket48005_automember_rebuild' % (logdir, mytmp)) + log.error('Automember_rebuld: Moved core file(s) to %s; Test failed' % mytmp) + assert False + log.info('No core files are found') + + topology.standalone.start(timeout=10) + + ldif_out_file = mytmp + "/ticket48005_automember_exported.ldif" + try: + # run the automember export task + topology.standalone.tasks.automemberExport(suffix=SUFFIX, ldif_out=ldif_out_file, args={TASK_WAIT: False}) + except ValueError: + log.error('Automember Export task failed.') + assert False + + topology.standalone.stop(timeout=10) + + logdir = re.sub('errors', '', topology.standalone.errlog) + cmdline = 'ls ' + logdir + 'core*' + p = os.popen(cmdline, "r") + lcore = p.readline() + if lcore != "": + s.system('mv %score* %s/core.ticket48005_automember_export' % (logdir, mytmp)) + log.error('Automember_export: Moved core file(s) to %s; Test failed' % mytmp) + assert False + log.info('No core files are found') + + topology.standalone.start(timeout=10) + + ldif_in_file = topology.standalone.getDir(__file__, DATA_DIR) + "ticket48005.ldif" + ldif_out_file = mytmp + "/ticket48005_automember_map.ldif" + try: + # run the automember map task + topology.standalone.tasks.automemberMap(ldif_in=ldif_in_file, ldif_out=ldif_out_file, args={TASK_WAIT: False}) + except ValueError: + log.error('Automember Map task failed.') + assert False + + topology.standalone.stop(timeout=10) + + logdir = re.sub('errors', '', topology.standalone.errlog) + cmdline = 'ls ' + logdir + 'core*' + p = os.popen(cmdline, "r") + lcore = p.readline() + if lcore != "": + s.system('mv %score* %s/core.ticket48005_automember_map' % (logdir, mytmp)) + log.error('Automember_map: Moved core file(s) to %s; Test failed' % mytmp) + assert False + log.info('No core files are found') + + topology.standalone.start(timeout=10) + + topology.standalone.plugins.disable(name=PLUGIN_REFER_INTEGRITY) + topology.standalone.plugins.enable(name=PLUGIN_AUTOMEMBER) + + topology.standalone.restart(timeout=10) + + log.info("Ticket 48005 automember test complete") + + +def test_ticket48005_syntaxvalidate(topology): + ''' + Run syntax validate task without waiting + Shutdown the server + Check if a core file was generated or not + If no core was found, this test case was successful. + ''' + log.info("Ticket 48005 syntax validate test...") + + try: + # run the fixup task + topology.standalone.tasks.syntaxValidate(suffix=SUFFIX, args={TASK_WAIT: False}) + except ValueError: + log.error('Some problem occured with a value that was provided') + assert False + + topology.standalone.stop(timeout=10) + + mytmp = topology.standalone.getDir(__file__, TMP_DIR) + logdir = re.sub('errors', '', topology.standalone.errlog) + cmdline = 'ls ' + logdir + 'core*' + p = os.popen(cmdline, "r") + lcore = p.readline() + if lcore != "": + s.system('mv %score* %s/core.ticket48005_syntaxvalidate' % (logdir, mytmp)) + log.error('SyntaxValidate: Moved core file(s) to %s; Test failed' % mytmp) + assert False + log.info('No core files are found') + + topology.standalone.start(timeout=10) + + log.info("Ticket 48005 syntax validate test complete") + + +def test_ticket48005_usn(topology): + ''' + Enable entryusn + Delete all user entries. + Run USN tombstone cleanup task + Shutdown the server + Check if a core file was generated or not + If no core was found, this test case was successful. + ''' + log.info("Ticket 48005 usn test...") + topology.standalone.plugins.enable(name=PLUGIN_USN) + + topology.standalone.restart(timeout=10) + + try: + entries = topology.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, "(objectclass=inetorgperson)") + if len(entries) == 0: + log.info("No user entries.") + else: + for i in range(len(entries)): + # log.info('Deleting %s' % entries[i].dn) + try: + topology.standalone.delete_s(entries[i].dn) + except ValueError: + log.error('delete_s %s failed.' % entries[i].dn) + assert False + except ValueError: + log.error('search_s failed.') + assert False + + try: + # run the usn tombstone cleanup + topology.standalone.tasks.usnTombstoneCleanup(suffix=SUFFIX, bename="userRoot", args={TASK_WAIT: False}) + except ValueError: + log.error('Some problem occured with a value that was provided') + assert False + + topology.standalone.stop(timeout=10) + + mytmp = topology.standalone.getDir(__file__, TMP_DIR) + logdir = re.sub('errors', '', topology.standalone.errlog) + cmdline = 'ls ' + logdir + 'core*' + p = os.popen(cmdline, "r") + lcore = p.readline() + if lcore != "": + s.system('mv %score* %s/core.ticket48005_usn' % (logdir, mytmp)) + log.error('usnTombstoneCleanup: Moved core file(s) to %s; Test failed' % mytmp) + assert False + log.info('No core files are found') + + topology.standalone.start(timeout=10) + + topology.standalone.plugins.disable(name=PLUGIN_USN) + + topology.standalone.restart(timeout=10) + + log.info("Ticket 48005 usn test complete") + + +def test_ticket48005_schemareload(topology): + ''' + Run schema reload task without waiting + Shutdown the server + Check if a core file was generated or not + If no core was found, this test case was successful. + ''' + log.info("Ticket 48005 schema reload test...") + + try: + # run the schema reload task + topology.standalone.tasks.schemaReload(args={TASK_WAIT: False}) + except ValueError: + log.error('Schema Reload task failed.') + assert False + + topology.standalone.stop(timeout=10) + + logdir = re.sub('errors', '', topology.standalone.errlog) + cmdline = 'ls ' + logdir + 'core*' + p = os.popen(cmdline, "r") + lcore = p.readline() + if lcore != "": + mytmp = topology.standalone.getDir(__file__, TMP_DIR) + s.system('mv %score* %s/core.ticket48005_schema_reload' % (logdir, mytmp)) + log.error('Schema reload: Moved core file(s) to %s; Test failed' % mytmp) + assert False + log.info('No core files are found') + + topology.standalone.start(timeout=10) + + log.info("Ticket 48005 schema reload test complete") + + +def test_ticket48005_final(topology): + topology.standalone.delete() + log.info('Testcase PASSED') + + +def run_isolated(): + global installation1_prefix + installation1_prefix = None + + topo = topology(True) + test_ticket48005_setup(topo) + test_ticket48005_memberof(topo) + test_ticket48005_automember(topo) + test_ticket48005_syntaxvalidate(topo) + test_ticket48005_usn(topo) + test_ticket48005_schemareload(topo) + test_ticket48005_final(topo) + + +if __name__ == '__main__': + run_isolated() + diff --git a/dirsrvtests/tests/tickets/ticket48013_test.py b/dirsrvtests/tests/tickets/ticket48013_test.py new file mode 100644 index 0000000..0ccdeba --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket48013_test.py @@ -0,0 +1,134 @@ +import os +import sys +import time +import ldap +import logging +import pytest +import pyasn1 +import pyasn1_modules +import ldap,ldapurl +from ldap.ldapobject import SimpleLDAPObject +from ldap.syncrepl import SyncreplConsumer +from lib389 import DirSrv, Entry, tools, tasks +from lib389.tools import DirSrvTools +from lib389._constants import * +from lib389.properties import * +from lib389.tasks import * +from lib389.utils import * + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + +installation1_prefix = None + + +class TopologyStandalone(object): + def __init__(self, standalone): + standalone.open() + self.standalone = standalone + + +class SyncObject(SimpleLDAPObject, SyncreplConsumer): + def __init__(self, uri): + # Init the ldap connection + SimpleLDAPObject.__init__(self, uri) + + def sync_search(self, test_cookie): + self.syncrepl_search('dc=example,dc=com', ldap.SCOPE_SUBTREE, + filterstr='(objectclass=*)', mode='refreshOnly', + cookie=test_cookie) + + def poll(self): + self.syncrepl_poll(all=1) + + +@pytest.fixture(scope="module") +def topology(request): + global installation1_prefix + if installation1_prefix: + args_instance[SER_DEPLOYED_DIR] = installation1_prefix + + # Creating standalone instance ... + standalone = DirSrv(verbose=False) + args_instance[SER_HOST] = HOST_STANDALONE + args_instance[SER_PORT] = PORT_STANDALONE + args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE + args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX + args_standalone = args_instance.copy() + standalone.allocate(args_standalone) + instance_standalone = standalone.exists() + if instance_standalone: + standalone.delete() + standalone.create() + standalone.open() + + # Clear out the tmp dir + standalone.clearTmpDir(__file__) + + return TopologyStandalone(standalone) + + +def test_ticket48013(topology): + ''' + Content Synchonization: Test that invalid cookies are caught + ''' + + cookies = ('#', '##', 'a#a#a', 'a#a#1') + + # Enable dynamic plugins + try: + topology.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-dynamic-plugins', 'on')]) + except ldap.LDAPError as e: + ldap.error('Failed to enable dynamic plugin!' + e.message['desc']) + assert False + + # Enable retro changelog + topology.standalone.plugins.enable(name=PLUGIN_RETRO_CHANGELOG) + + # Enbale content sync plugin + topology.standalone.plugins.enable(name=PLUGIN_REPL_SYNC) + + # Set everything up + ldap_url = ldapurl.LDAPUrl('ldap://localhost:31389') + ldap_connection = SyncObject(ldap_url.initializeUrl()) + + # Authenticate + try: + ldap_connection.simple_bind_s(DN_DM, PASSWORD) + except ldap.LDAPError as e: + print('Login to LDAP server failed: %s' % e.message['desc']) + assert False + + # Test invalid cookies + for invalid_cookie in cookies: + log.info('Testing cookie: %s' % invalid_cookie) + try: + ldap_connection.sync_search(invalid_cookie) + ldap_connection.poll() + log.fatal('Invalid cookie accepted!') + assert False + except Exception as e: + log.info('Invalid cookie correctly rejected: %s' % e.message['info']) + pass + + # Success + log.info('Test complete') + + +def test_ticket48013_final(topology): + topology.standalone.delete() + log.info('Testcase PASSED') + + +def run_isolated(): + global installation1_prefix + installation1_prefix = None + + topo = topology(True) + test_ticket48013(topo) + test_ticket48013_final(topo) + + +if __name__ == '__main__': + run_isolated() + diff --git a/dirsrvtests/tests/tickets/ticket48026_test.py b/dirsrvtests/tests/tickets/ticket48026_test.py new file mode 100644 index 0000000..f8d440f --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket48026_test.py @@ -0,0 +1,168 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2015 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import os +import sys +import time +import ldap +import logging +import pytest +from lib389 import DirSrv, Entry, tools, tasks +from lib389.tools import DirSrvTools +from lib389._constants import * +from lib389.properties import * +from lib389.tasks import * +from lib389.utils import * + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + +installation1_prefix = None + +USER1_DN = 'uid=user1,' + DEFAULT_SUFFIX +USER2_DN = 'uid=user2,' + DEFAULT_SUFFIX + + +class TopologyStandalone(object): + def __init__(self, standalone): + standalone.open() + self.standalone = standalone + + +@pytest.fixture(scope="module") +def topology(request): + global installation1_prefix + if installation1_prefix: + args_instance[SER_DEPLOYED_DIR] = installation1_prefix + + # Creating standalone instance ... + standalone = DirSrv(verbose=False) + args_instance[SER_HOST] = HOST_STANDALONE + args_instance[SER_PORT] = PORT_STANDALONE + args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE + args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX + args_standalone = args_instance.copy() + standalone.allocate(args_standalone) + instance_standalone = standalone.exists() + if instance_standalone: + standalone.delete() + standalone.create() + standalone.open() + + # Clear out the tmp dir + standalone.clearTmpDir(__file__) + + return TopologyStandalone(standalone) + + +def test_ticket48026(topology): + ''' + Test that multiple attribute uniqueness works correctly. + ''' + # Configure the plugin + inst = topology.standalone + inst.plugins.enable(name=PLUGIN_ATTR_UNIQUENESS) + + try: + # This plugin enable / disable doesn't seem to create the nsslapd-pluginId correctly? + inst.modify_s('cn=' + PLUGIN_ATTR_UNIQUENESS + ',cn=plugins,cn=config', + [(ldap.MOD_REPLACE, 'uniqueness-attribute-name', 'mail'), + (ldap.MOD_ADD, 'uniqueness-attribute-name', + 'mailAlternateAddress'), + ]) + except ldap.LDAPError as e: + log.fatal('test_ticket48026: Failed to configure plugin for "mail": error ' + e.message['desc']) + assert False + + inst.restart(timeout=30) + + # Add an entry + try: + inst.add_s(Entry((USER1_DN, {'objectclass': "top extensibleObject".split(), + 'sn': '1', + 'cn': 'user 1', + 'uid': 'user1', + 'mail': 'user1@example.com', + 'mailAlternateAddress' : 'user1@alt.example.com', + 'userpassword': 'password'}))) + except ldap.LDAPError as e: + log.fatal('test_ticket48026: Failed to add test user' + USER1_DN + ': error ' + e.message['desc']) + assert False + + try: + inst.add_s(Entry((USER2_DN, {'objectclass': "top extensibleObject".split(), + 'sn': '2', + 'cn': 'user 2', + 'uid': 'user2', + 'mail': 'user1@example.com', + 'userpassword': 'password'}))) + except ldap.CONSTRAINT_VIOLATION: + pass + else: + log.error('test_ticket48026: Adding of 1st entry(mail v mail) incorrectly succeeded') + assert False + + try: + inst.add_s(Entry((USER2_DN, {'objectclass': "top extensibleObject".split(), + 'sn': '2', + 'cn': 'user 2', + 'uid': 'user2', + 'mailAlternateAddress': 'user1@alt.example.com', + 'userpassword': 'password'}))) + except ldap.CONSTRAINT_VIOLATION: + pass + else: + log.error('test_ticket48026: Adding of 2nd entry(mailAlternateAddress v mailAlternateAddress) incorrectly succeeded') + assert False + + try: + inst.add_s(Entry((USER2_DN, {'objectclass': "top extensibleObject".split(), + 'sn': '2', + 'cn': 'user 2', + 'uid': 'user2', + 'mail': 'user1@alt.example.com', + 'userpassword': 'password'}))) + except ldap.CONSTRAINT_VIOLATION: + pass + else: + log.error('test_ticket48026: Adding of 3rd entry(mail v mailAlternateAddress) incorrectly succeeded') + assert False + + try: + inst.add_s(Entry((USER2_DN, {'objectclass': "top extensibleObject".split(), + 'sn': '2', + 'cn': 'user 2', + 'uid': 'user2', + 'mailAlternateAddress': 'user1@example.com', + 'userpassword': 'password'}))) + except ldap.CONSTRAINT_VIOLATION: + pass + else: + log.error('test_ticket48026: Adding of 4th entry(mailAlternateAddress v mail) incorrectly succeeded') + assert False + + log.info('Test complete') + + +def test_ticket48026_final(topology): + topology.standalone.delete() + log.info('Testcase PASSED') + + +def run_isolated(): + global installation1_prefix + installation1_prefix = None + + topo = topology(True) + test_ticket48026(topo) + test_ticket48026_final(topo) + + +if __name__ == '__main__': + run_isolated() + diff --git a/dirsrvtests/tests/tickets/ticket48109_test.py b/dirsrvtests/tests/tickets/ticket48109_test.py new file mode 100644 index 0000000..e4091e0 --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket48109_test.py @@ -0,0 +1,394 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2015 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import os +import sys +import time +import ldap +import logging +import pytest +from lib389 import DirSrv, Entry, tools, tasks +from lib389.tools import DirSrvTools +from lib389._constants import * +from lib389.properties import * +from lib389.tasks import * +from lib389.utils import * + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + +installation1_prefix = None + +UID_INDEX = 'cn=uid,cn=index,cn=userRoot,cn=ldbm database,cn=plugins,cn=config' + +class TopologyStandalone(object): + def __init__(self, standalone): + standalone.open() + self.standalone = standalone + + +@pytest.fixture(scope="module") +def topology(request): + global installation1_prefix + if installation1_prefix: + args_instance[SER_DEPLOYED_DIR] = installation1_prefix + + # Creating standalone instance ... + standalone = DirSrv(verbose=False) + args_instance[SER_HOST] = HOST_STANDALONE + args_instance[SER_PORT] = PORT_STANDALONE + args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE + args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX + args_standalone = args_instance.copy() + standalone.allocate(args_standalone) + instance_standalone = standalone.exists() + if instance_standalone: + standalone.delete() + standalone.create() + standalone.open() + + # Clear out the tmp dir + standalone.clearTmpDir(__file__) + + return TopologyStandalone(standalone) + + +def test_ticket48109_0(topology): + ''' + Set SubStr lengths to cn=uid,cn=index,... + objectClass: extensibleObject + nsIndexType: sub + nsSubStrBegin: 2 + nsSubStrEnd: 2 + ''' + log.info('Test case 0') + # add substr setting to UID_INDEX + try: + topology.standalone.modify_s(UID_INDEX, + [(ldap.MOD_ADD, 'objectClass', 'extensibleObject'), + (ldap.MOD_ADD, 'nsIndexType', 'sub'), + (ldap.MOD_ADD, 'nsSubStrBegin', '2'), + (ldap.MOD_ADD, 'nsSubStrEnd', '2')]) + except ldap.LDAPError as e: + log.error('Failed to add substr lengths: error ' + e.message['desc']) + assert False + + # restart the server to apply the indexing + topology.standalone.restart(timeout=10) + + # add a test user + UID = 'auser0' + USER_DN = 'uid=%s,%s' % (UID, SUFFIX) + try: + topology.standalone.add_s(Entry((USER_DN, { + 'objectclass': 'top person organizationalPerson inetOrgPerson'.split(), + 'cn': 'a user0', + 'sn': 'user0', + 'givenname': 'a', + 'mail': UID}))) + except ldap.LDAPError as e: + log.error('Failed to add ' + USER_DN + ': error ' + e.message['desc']) + assert False + + entries = topology.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, '(uid=a*)') + assert len(entries) == 1 + + # restart the server to check the access log + topology.standalone.restart(timeout=10) + + cmdline = 'egrep %s %s | egrep "uid=a\*"' % (SUFFIX, topology.standalone.accesslog) + p = os.popen(cmdline, "r") + l0 = p.readline() + if l0 == "": + log.error('Search with "(uid=a*)" is not logged in ' + topology.standalone.accesslog) + assert False + else: + #regex = re.compile('\(conn=[0-9]* op=[0-9]*\) SRCH .*') + regex = re.compile(r'.*\s+(conn=\d+ op=\d+)\s+SRCH .*') + match = regex.match(l0) + log.info('match: %s' % match.group(1)) + cmdline = 'egrep "%s" %s | egrep "RESULT"' % (match.group(1), topology.standalone.accesslog) + p = os.popen(cmdline, "r") + l1 = p.readline() + if l1 == "": + log.error('Search result of "(uid=a*)" is not logged in ' + topology.standalone.accesslog) + assert False + else: + log.info('l1: %s' % l1) + regex = re.compile(r'.*nentries=(\d+)\s+.*') + match = regex.match(l1) + log.info('match: nentires=%s' % match.group(1)) + if match.group(1) == "0": + log.error('Entry uid=a* not found.') + assert False + else: + log.info('Entry uid=a* found.') + regex = re.compile(r'.*(notes=[AU]).*') + match = regex.match(l1) + if match: + log.error('%s - substr index was not used' % match.group(1)) + assert False + else: + log.info('Test case 0 - OK - substr index used') + + # clean up substr setting to UID_INDEX + try: + topology.standalone.modify_s(UID_INDEX, + [(ldap.MOD_DELETE, 'objectClass', 'extensibleObject'), + (ldap.MOD_DELETE, 'nsIndexType', 'sub'), + (ldap.MOD_DELETE, 'nsSubStrBegin', '2'), + (ldap.MOD_DELETE, 'nsSubStrEnd', '2')]) + except ldap.LDAPError as e: + log.error('Failed to delete substr lengths: error ' + e.message['desc']) + assert False + + +def test_ticket48109_1(topology): + ''' + Set SubStr lengths to cn=uid,cn=index,... + nsIndexType: sub + nsMatchingRule: nsSubStrBegin=2 + nsMatchingRule: nsSubStrEnd=2 + ''' + log.info('Test case 1') + # add substr setting to UID_INDEX + try: + topology.standalone.modify_s(UID_INDEX, + [(ldap.MOD_ADD, 'nsIndexType', 'sub'), + (ldap.MOD_ADD, 'nsMatchingRule', 'nssubstrbegin=2'), + (ldap.MOD_ADD, 'nsMatchingRule', 'nssubstrend=2')]) + except ldap.LDAPError as e: + log.error('Failed to add substr lengths: error ' + e.message['desc']) + assert False + + # restart the server to apply the indexing + topology.standalone.restart(timeout=10) + + # add a test user + UID = 'buser1' + USER_DN = 'uid=%s,%s' % (UID, SUFFIX) + try: + topology.standalone.add_s(Entry((USER_DN, { + 'objectclass': 'top person organizationalPerson inetOrgPerson'.split(), + 'cn': 'b user1', + 'sn': 'user1', + 'givenname': 'b', + 'mail': UID}))) + except ldap.LDAPError as e: + log.error('Failed to add ' + USER_DN + ': error ' + e.message['desc']) + assert False + + entries = topology.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, '(uid=b*)') + assert len(entries) == 1 + + # restart the server to check the access log + topology.standalone.restart(timeout=10) + + cmdline = 'egrep %s %s | egrep "uid=b\*"' % (SUFFIX, topology.standalone.accesslog) + p = os.popen(cmdline, "r") + l0 = p.readline() + if l0 == "": + log.error('Search with "(uid=b*)" is not logged in ' + topology.standalone.accesslog) + assert False + else: + #regex = re.compile('\(conn=[0-9]* op=[0-9]*\) SRCH .*') + regex = re.compile(r'.*\s+(conn=\d+ op=\d+)\s+SRCH .*') + match = regex.match(l0) + log.info('match: %s' % match.group(1)) + cmdline = 'egrep "%s" %s | egrep "RESULT"' % (match.group(1), topology.standalone.accesslog) + p = os.popen(cmdline, "r") + l1 = p.readline() + if l1 == "": + log.error('Search result of "(uid=*b)" is not logged in ' + topology.standalone.accesslog) + assert False + else: + log.info('l1: %s' % l1) + regex = re.compile(r'.*nentries=(\d+)\s+.*') + match = regex.match(l1) + log.info('match: nentires=%s' % match.group(1)) + if match.group(1) == "0": + log.error('Entry uid=*b not found.') + assert False + else: + log.info('Entry uid=*b found.') + regex = re.compile(r'.*(notes=[AU]).*') + match = regex.match(l1) + if match: + log.error('%s - substr index was not used' % match.group(1)) + assert False + else: + log.info('Test case 1 - OK - substr index used') + + # clean up substr setting to UID_INDEX + try: + topology.standalone.modify_s(UID_INDEX, + [(ldap.MOD_DELETE, 'nsIndexType', 'sub'), + (ldap.MOD_DELETE, 'nsMatchingRule', 'nssubstrbegin=2'), + (ldap.MOD_DELETE, 'nsMatchingRule', 'nssubstrend=2')]) + except ldap.LDAPError as e: + log.error('Failed to delete substr lengths: error ' + e.message['desc']) + assert False + + +def test_ticket48109_2(topology): + ''' + Set SubStr conflict formats/lengths to cn=uid,cn=index,... + objectClass: extensibleObject + nsIndexType: sub + nsMatchingRule: nsSubStrBegin=3 + nsMatchingRule: nsSubStrEnd=3 + nsSubStrBegin: 2 + nsSubStrEnd: 2 + nsSubStr{Begin,End} are honored. + ''' + log.info('Test case 2') + + # add substr setting to UID_INDEX + try: + topology.standalone.modify_s(UID_INDEX, + [(ldap.MOD_ADD, 'nsIndexType', 'sub'), + (ldap.MOD_ADD, 'nsMatchingRule', 'nssubstrbegin=3'), + (ldap.MOD_ADD, 'nsMatchingRule', 'nssubstrend=3'), + (ldap.MOD_ADD, 'objectClass', 'extensibleObject'), + (ldap.MOD_ADD, 'nsSubStrBegin', '2'), + (ldap.MOD_ADD, 'nsSubStrEnd', '2')]) + except ldap.LDAPError as e: + log.error('Failed to add substr lengths: error ' + e.message['desc']) + assert False + + # restart the server to apply the indexing + topology.standalone.restart(timeout=10) + + # add a test user + UID = 'cuser2' + USER_DN = 'uid=%s,%s' % (UID, SUFFIX) + try: + topology.standalone.add_s(Entry((USER_DN, { + 'objectclass': 'top person organizationalPerson inetOrgPerson'.split(), + 'cn': 'c user2', + 'sn': 'user2', + 'givenname': 'c', + 'mail': UID}))) + except ldap.LDAPError as e: + log.error('Failed to add ' + USER_DN + ': error ' + e.message['desc']) + assert False + + entries = topology.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, '(uid=c*)') + assert len(entries) == 1 + + entries = topology.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, '(uid=*2)') + assert len(entries) == 1 + + # restart the server to check the access log + topology.standalone.restart(timeout=10) + + cmdline = 'egrep %s %s | egrep "uid=c\*"' % (SUFFIX, topology.standalone.accesslog) + p = os.popen(cmdline, "r") + l0 = p.readline() + if l0 == "": + log.error('Search with "(uid=c*)" is not logged in ' + topology.standalone.accesslog) + assert False + else: + #regex = re.compile('\(conn=[0-9]* op=[0-9]*\) SRCH .*') + regex = re.compile(r'.*\s+(conn=\d+ op=\d+)\s+SRCH .*') + match = regex.match(l0) + log.info('match: %s' % match.group(1)) + cmdline = 'egrep "%s" %s | egrep "RESULT"' % (match.group(1), topology.standalone.accesslog) + p = os.popen(cmdline, "r") + l1 = p.readline() + if l1 == "": + log.error('Search result of "(uid=c*)" is not logged in ' + topology.standalone.accesslog) + assert False + else: + log.info('l1: %s' % l1) + regex = re.compile(r'.*nentries=(\d+)\s+.*') + match = regex.match(l1) + log.info('match: nentires=%s' % match.group(1)) + if match.group(1) == "0": + log.error('Entry uid=c* not found.') + assert False + else: + log.info('Entry uid=c* found.') + regex = re.compile(r'.*(notes=[AU]).*') + match = regex.match(l1) + if match: + log.error('%s - substr index was not used' % match.group(1)) + assert False + else: + log.info('Test case 2-1 - OK - correct substr index used') + + cmdline = 'egrep %s %s | egrep "uid=\*2"' % (SUFFIX, topology.standalone.accesslog) + p = os.popen(cmdline, "r") + l0 = p.readline() + if l0 == "": + log.error('Search with "(uid=*2)" is not logged in ' + topology.standalone.accesslog) + assert False + else: + #regex = re.compile('\(conn=[0-9]* op=[0-9]*\) SRCH .*') + regex = re.compile(r'.*\s+(conn=\d+ op=\d+)\s+SRCH .*') + match = regex.match(l0) + log.info('match: %s' % match.group(1)) + cmdline = 'egrep "%s" %s | egrep "RESULT"' % (match.group(1), topology.standalone.accesslog) + p = os.popen(cmdline, "r") + l1 = p.readline() + if l1 == "": + log.error('Search result of "(uid=*2)" is not logged in ' + topology.standalone.accesslog) + assert False + else: + log.info('l1: %s' % l1) + regex = re.compile(r'.*nentries=(\d+)\s+.*') + match = regex.match(l1) + log.info('match: nentires=%s' % match.group(1)) + if match.group(1) == "0": + log.error('Entry uid=*2 not found.') + assert False + else: + log.info('Entry uid=*2 found.') + regex = re.compile(r'.*(notes=[AU]).*') + match = regex.match(l1) + if match: + log.error('%s - substr index was not used' % match.group(1)) + assert False + else: + log.info('Test case 2-2 - OK - correct substr index used') + + # clean up substr setting to UID_INDEX + try: + topology.standalone.modify_s(UID_INDEX, + [(ldap.MOD_DELETE, 'nsIndexType', 'sub'), + (ldap.MOD_DELETE, 'nsMatchingRule', 'nssubstrbegin=3'), + (ldap.MOD_DELETE, 'nsMatchingRule', 'nssubstrend=3'), + (ldap.MOD_DELETE, 'objectClass', 'extensibleObject'), + (ldap.MOD_DELETE, 'nsSubStrBegin', '2'), + (ldap.MOD_DELETE, 'nsSubStrEnd', '2')]) + except ldap.LDAPError as e: + log.error('Failed to delete substr lengths: error ' + e.message['desc']) + assert False + + log.info('Test complete') + + +def test_ticket48109_final(topology): + topology.standalone.delete() + log.info('Testcase PASSED') + + +def run_isolated(): + global installation1_prefix + installation1_prefix = None + + topo = topology(True) + test_ticket48109_0(topo) + test_ticket48109_1(topo) + test_ticket48109_2(topo) + test_ticket48109_final(topo) + + +if __name__ == '__main__': + run_isolated() + diff --git a/dirsrvtests/tests/tickets/ticket48170_test.py b/dirsrvtests/tests/tickets/ticket48170_test.py new file mode 100644 index 0000000..cc71e37 --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket48170_test.py @@ -0,0 +1,96 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2015 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import os +import sys +import time +import ldap +import logging +import pytest +from lib389 import DirSrv, Entry, tools, tasks +from lib389.tools import DirSrvTools +from lib389._constants import * +from lib389.properties import * +from lib389.tasks import * +from lib389.utils import * + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + +installation1_prefix = None + + +class TopologyStandalone(object): + def __init__(self, standalone): + standalone.open() + self.standalone = standalone + + +@pytest.fixture(scope="module") +def topology(request): + global installation1_prefix + if installation1_prefix: + args_instance[SER_DEPLOYED_DIR] = installation1_prefix + + # Creating standalone instance ... + standalone = DirSrv(verbose=False) + args_instance[SER_HOST] = HOST_STANDALONE + args_instance[SER_PORT] = PORT_STANDALONE + args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE + args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX + args_standalone = args_instance.copy() + standalone.allocate(args_standalone) + instance_standalone = standalone.exists() + if instance_standalone: + standalone.delete() + standalone.create() + standalone.open() + + # Clear out the tmp dir + standalone.clearTmpDir(__file__) + + return TopologyStandalone(standalone) + + +def test_ticket48170(topology): + ''' + Attempt to add a nsIndexType wikth an invalid value: "eq,pres" + ''' + + INDEX_DN = 'cn=cn,cn=index,cn=userroot,cn=ldbm database,cn=plugins,cn=config' + REJECTED = False + try: + topology.standalone.modify_s(INDEX_DN, [(ldap.MOD_ADD, 'nsINdexType', 'eq,pres')]) + except ldap.UNWILLING_TO_PERFORM: + log.info('Index update correctly rejected') + REJECTED = True + + if not REJECTED: + log.fatal('Invalid nsIndexType value was incorrectly accepted.') + assert False + + log.info('Test complete') + + +def test_ticket48170_final(topology): + topology.standalone.delete() + log.info('Testcase PASSED') + + +def run_isolated(): + global installation1_prefix + installation1_prefix = None + + topo = topology(True) + test_ticket48170(topo) + test_ticket48170_final(topo) + + +if __name__ == '__main__': + run_isolated() + diff --git a/dirsrvtests/tests/tickets/ticket48191_test.py b/dirsrvtests/tests/tickets/ticket48191_test.py new file mode 100644 index 0000000..000975a --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket48191_test.py @@ -0,0 +1,323 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2015 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import os +import sys +import time +import ldap +import logging +import pytest +from lib389 import DirSrv, Entry, tools, tasks +from lib389.tools import DirSrvTools +from lib389._constants import * +from lib389.properties import * +from lib389.tasks import * +from ldap.controls import SimplePagedResultsControl +from ldap.controls.simple import GetEffectiveRightsControl + +log = logging.getLogger(__name__) + +installation_prefix = None + +CONFIG_DN = 'cn=config' +MYSUFFIX = 'o=ticket48191.org' +MYSUFFIXBE = 'ticket48191' + +_MYLDIF = 'ticket48191.ldif' + +SEARCHFILTER = '(objectclass=*)' + + +class TopologyStandalone(object): + def __init__(self, standalone): + standalone.open() + self.standalone = standalone + + +@pytest.fixture(scope="module") +def topology(request): + ''' + This fixture is used to standalone topology for the 'module'. + ''' + global installation_prefix + + if installation_prefix: + args_instance[SER_DEPLOYED_DIR] = installation_prefix + + standalone = DirSrv(verbose=False) + + # Args for the standalone instance + args_instance[SER_HOST] = HOST_STANDALONE + args_instance[SER_PORT] = PORT_STANDALONE + args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE + args_standalone = args_instance.copy() + standalone.allocate(args_standalone) + + # Get the status of the instance and restart it if it exists + instance_standalone = standalone.exists() + + # Remove the instance + if instance_standalone: + standalone.delete() + + # Create the instance + standalone.create() + + # Used to retrieve configuration information (dbdir, confdir...) + standalone.open() + + # clear the tmp directory + standalone.clearTmpDir(__file__) + + # Here we have standalone instance up and running + return TopologyStandalone(standalone) + + +def test_ticket48191_setup(topology): + """ + Import 20 entries + Set nsslapd-maxsimplepaged-per-conn in cn=config + If the val is negative, no limit. + If the value is 0, the simple paged results is disabled. + If the value is positive, the value is the max simple paged results requests per connection. + The setting has to be dynamic. + """ + log.info('Testing Ticket 48191 - Config parameter nsslapd-maxsimplepaged-per-conn') + + # bind as directory manager + topology.standalone.log.info("Bind as %s" % DN_DM) + topology.standalone.simple_bind_s(DN_DM, PASSWORD) + + topology.standalone.log.info("\n\n######################### SETUP SUFFIX o=ticket48191.org ######################\n") + + topology.standalone.backend.create(MYSUFFIX, {BACKEND_NAME: MYSUFFIXBE}) + topology.standalone.mappingtree.create(MYSUFFIX, bename=MYSUFFIXBE) + + topology.standalone.log.info("\n\n######################### Generate Test data ######################\n") + + # get tmp dir + mytmp = topology.standalone.getDir(__file__, TMP_DIR) + if mytmp is None: + mytmp = "/tmp" + + MYLDIF = '%s%s' % (mytmp, _MYLDIF) + os.system('ls %s' % MYLDIF) + os.system('rm -f %s' % MYLDIF) + if hasattr(topology.standalone, 'prefix'): + prefix = topology.standalone.prefix + else: + prefix = None + dbgen_prog = prefix + '/bin/dbgen.pl' + topology.standalone.log.info('dbgen_prog: %s' % dbgen_prog) + os.system('%s -s %s -o %s -n 14' % (dbgen_prog, MYSUFFIX, MYLDIF)) + cmdline = 'egrep dn: %s | wc -l' % MYLDIF + p = os.popen(cmdline, "r") + dnnumstr = p.readline() + global dnnum + dnnum = int(dnnumstr) + topology.standalone.log.info("We have %d entries.\n", dnnum) + + topology.standalone.log.info("\n\n######################### Import Test data ######################\n") + + args = {TASK_WAIT: True} + importTask = Tasks(topology.standalone) + importTask.importLDIF(MYSUFFIX, MYSUFFIXBE, MYLDIF, args) + + topology.standalone.log.info("\n\n######################### SEARCH ALL ######################\n") + topology.standalone.log.info("Bind as %s and add the READ/SEARCH SELFDN aci" % DN_DM) + topology.standalone.simple_bind_s(DN_DM, PASSWORD) + + global entries + entries = topology.standalone.search_s(MYSUFFIX, ldap.SCOPE_SUBTREE, SEARCHFILTER) + topology.standalone.log.info("Returned %d entries.\n", len(entries)) + + #print entries + + assert dnnum == len(entries) + + topology.standalone.log.info('%d entries are successfully imported.' % dnnum) + + +def test_ticket48191_run_0(topology): + topology.standalone.log.info("\n\n######################### SEARCH WITH SIMPLE PAGED RESULTS CONTROL (no nsslapd-maxsimplepaged-per-conn) ######################\n") + + page_size = 4 + spr_req_ctrl = SimplePagedResultsControl(True, size=page_size, cookie='') + + known_ldap_resp_ctrls = { + SimplePagedResultsControl.controlType: SimplePagedResultsControl, + } + + topology.standalone.log.info("Calling search_ext...") + msgid = topology.standalone.search_ext(MYSUFFIX, + ldap.SCOPE_SUBTREE, + SEARCHFILTER, + ['cn'], + serverctrls=[spr_req_ctrl]) + pageddncnt = 0 + pages = 0 + while True: + pages += 1 + + topology.standalone.log.info("Getting page %d" % pages) + rtype, rdata, rmsgid, responcectrls = topology.standalone.result3(msgid, resp_ctrl_classes=known_ldap_resp_ctrls) + topology.standalone.log.info("%d results" % len(rdata)) + pageddncnt += len(rdata) + + topology.standalone.log.info("Results:") + for dn, attrs in rdata: + topology.standalone.log.info("dn: %s" % dn) + + pctrls = [ + c for c in responcectrls if c.controlType == SimplePagedResultsControl.controlType + ] + if not pctrls: + topology.standalone.log.info('Warning: Server ignores RFC 2696 control.') + break + + if pctrls[0].cookie: + spr_req_ctrl.cookie = pctrls[0].cookie + topology.standalone.log.info("cookie: %s" % spr_req_ctrl.cookie) + msgid = topology.standalone.search_ext(MYSUFFIX, + ldap.SCOPE_SUBTREE, + SEARCHFILTER, + ['cn'], + serverctrls=[spr_req_ctrl]) + else: + topology.standalone.log.info("No cookie") + break + + topology.standalone.log.info("Paged result search returned %d entries in %d pages.\n", pageddncnt, pages) + + global dnnum + global entries + assert dnnum == len(entries) + assert pages == (dnnum / page_size) + + +def test_ticket48191_run_1(topology): + topology.standalone.log.info("\n\n######################### SEARCH WITH SIMPLE PAGED RESULTS CONTROL (nsslapd-maxsimplepaged-per-conn: 0) ######################\n") + + topology.standalone.modify_s(CONFIG_DN, [(ldap.MOD_REPLACE, 'nsslapd-maxsimplepaged-per-conn', '0')]) + + page_size = 4 + spr_req_ctrl = SimplePagedResultsControl(True, size=page_size, cookie='') + + known_ldap_resp_ctrls = { + SimplePagedResultsControl.controlType: SimplePagedResultsControl, + } + + topology.standalone.log.info("Calling search_ext...") + msgid = topology.standalone.search_ext(MYSUFFIX, + ldap.SCOPE_SUBTREE, + SEARCHFILTER, + ['cn'], + serverctrls=[spr_req_ctrl]) + + topology.standalone.log.fatal('Unexpected success') + try: + rtype, rdata, rmsgid, responcectrls = topology.standalone.result3(msgid, resp_ctrl_classes=known_ldap_resp_ctrls) + except ldap.UNWILLING_TO_PERFORM as e: + topology.standalone.log.info('Returned the expected RC UNWILLING_TO_PERFORM') + return + except ldap.LDAPError as e: + topology.standalone.log.fatal('Unexpected error: ' + e.message['desc']) + assert False + topology.standalone.log.info("Type %d" % rtype) + topology.standalone.log.info("%d results" % len(rdata)) + assert False + + +def test_ticket48191_run_2(topology): + topology.standalone.log.info("\n\n######################### SEARCH WITH SIMPLE PAGED RESULTS CONTROL (nsslapd-maxsimplepaged-per-conn: 1000) ######################\n") + + topology.standalone.modify_s(CONFIG_DN, [(ldap.MOD_REPLACE, 'nsslapd-maxsimplepaged-per-conn', '1000')]) + + page_size = 4 + spr_req_ctrl = SimplePagedResultsControl(True, size=page_size, cookie='') + + known_ldap_resp_ctrls = { + SimplePagedResultsControl.controlType: SimplePagedResultsControl, + } + + topology.standalone.log.info("Calling search_ext...") + msgid = topology.standalone.search_ext(MYSUFFIX, + ldap.SCOPE_SUBTREE, + SEARCHFILTER, + ['cn'], + serverctrls=[spr_req_ctrl]) + pageddncnt = 0 + pages = 0 + while True: + pages += 1 + + topology.standalone.log.info("Getting page %d" % pages) + rtype, rdata, rmsgid, responcectrls = topology.standalone.result3(msgid, resp_ctrl_classes=known_ldap_resp_ctrls) + topology.standalone.log.info("%d results" % len(rdata)) + pageddncnt += len(rdata) + + topology.standalone.log.info("Results:") + for dn, attrs in rdata: + topology.standalone.log.info("dn: %s" % dn) + + pctrls = [ + c for c in responcectrls if c.controlType == SimplePagedResultsControl.controlType + ] + if not pctrls: + topology.standalone.log.info('Warning: Server ignores RFC 2696 control.') + break + + if pctrls[0].cookie: + spr_req_ctrl.cookie = pctrls[0].cookie + topology.standalone.log.info("cookie: %s" % spr_req_ctrl.cookie) + msgid = topology.standalone.search_ext(MYSUFFIX, + ldap.SCOPE_SUBTREE, + SEARCHFILTER, + ['cn'], + serverctrls=[spr_req_ctrl]) + else: + topology.standalone.log.info("No cookie") + break + + topology.standalone.log.info("Paged result search returned %d entries in %d pages.\n", pageddncnt, pages) + + global dnnum + global entries + assert dnnum == len(entries) + assert pages == (dnnum / page_size) + + topology.standalone.log.info("ticket48191 was successfully verified.") + + +def test_ticket48191_final(topology): + topology.standalone.delete() + log.info('Testcase PASSED') + + +def run_isolated(): + ''' + run_isolated is used to run these test cases independently of a test scheduler (xunit, py.test..) + To run isolated without py.test, you need to + - edit this file and comment '@pytest.fixture' line before 'topology' function. + - set the installation prefix + - run this program + ''' + global installation_prefix + installation_prefix = None + + topo = topology(True) + test_ticket48191_setup(topo) + test_ticket48191_run_0(topo) + test_ticket48191_run_1(topo) + test_ticket48191_run_2(topo) + test_ticket48191_final(topo) + + +if __name__ == '__main__': + run_isolated() + diff --git a/dirsrvtests/tests/tickets/ticket48194_test.py b/dirsrvtests/tests/tickets/ticket48194_test.py new file mode 100644 index 0000000..17e179a --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket48194_test.py @@ -0,0 +1,499 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2015 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import os +import sys +import subprocess +import time +import ldap +import logging +import pytest +import shutil +from lib389 import DirSrv, Entry, tools +from lib389 import DirSrvTools +from lib389.tools import DirSrvTools +from lib389._constants import * +from lib389.properties import * + +log = logging.getLogger(__name__) + +installation_prefix = None + +CONFIG_DN = 'cn=config' +ENCRYPTION_DN = 'cn=encryption,%s' % CONFIG_DN +RSA = 'RSA' +RSA_DN = 'cn=%s,%s' % (RSA, ENCRYPTION_DN) +LDAPSPORT = '10636' +SERVERCERT = 'Server-Cert' +plus_all_ecount = 0 +plus_all_dcount = 0 +plus_all_ecount_noweak = 0 +plus_all_dcount_noweak = 0 + +class TopologyStandalone(object): + def __init__(self, standalone): + standalone.open() + self.standalone = standalone + + +@pytest.fixture(scope="module") +def topology(request): + ''' + This fixture is used to standalone topology for the 'module'. + ''' + global installation_prefix + + if installation_prefix: + args_instance[SER_DEPLOYED_DIR] = installation_prefix + + standalone = DirSrv(verbose=False) + + # Args for the standalone instance + args_instance[SER_HOST] = HOST_STANDALONE + args_instance[SER_PORT] = PORT_STANDALONE + args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE + args_standalone = args_instance.copy() + standalone.allocate(args_standalone) + + # Get the status of the instance and restart it if it exists + instance_standalone = standalone.exists() + + # Remove the instance + if instance_standalone: + standalone.delete() + + # Create the instance + standalone.create() + + # Used to retrieve configuration information (dbdir, confdir...) + standalone.open() + + # clear the tmp directory + standalone.clearTmpDir(__file__) + + # Here we have standalone instance up and running + return TopologyStandalone(standalone) + + +def _header(topology, label): + topology.standalone.log.info("\n\n###############################################") + topology.standalone.log.info("####### %s" % label) + topology.standalone.log.info("###############################################") + + +def test_ticket48194_init(topology): + """ + Generate self signed cert and import it to the DS cert db. + Enable SSL + """ + _header(topology, 'Testing Ticket 48194 - harden the list of ciphers available by default') + + conf_dir = topology.standalone.confdir + + log.info("\n######################### Checking existing certs ######################\n") + os.system('certutil -L -d %s -n "CA certificate"' % conf_dir) + os.system('certutil -L -d %s -n "%s"' % (conf_dir, SERVERCERT)) + + log.info("\n######################### Create a password file ######################\n") + pwdfile = '%s/pwdfile.txt' % (conf_dir) + opasswd = os.popen("(ps -ef ; w ) | sha1sum | awk '{print $1}'", "r") + passwd = opasswd.readline() + pwdfd = open(pwdfile, "w") + pwdfd.write(passwd) + pwdfd.close() + + log.info("\n######################### Create a noise file ######################\n") + noisefile = '%s/noise.txt' % (conf_dir) + noise = os.popen("(w ; ps -ef ; date ) | sha1sum | awk '{print $1}'", "r") + noisewdfd = open(noisefile, "w") + noisewdfd.write(noise.readline()) + noisewdfd.close() + + log.info("\n######################### Create key3.db and cert8.db database ######################\n") + os.system("ls %s" % pwdfile) + os.system("cat %s" % pwdfile) + os.system('certutil -N -d %s -f %s' % (conf_dir, pwdfile)) + + log.info("\n######################### Creating encryption key for CA ######################\n") + os.system('certutil -G -d %s -z %s -f %s' % (conf_dir, noisefile, pwdfile)) + + log.info("\n######################### Creating self-signed CA certificate ######################\n") + os.system('( echo y ; echo ; echo y ) | certutil -S -n "CA certificate" -s "cn=CAcert" -x -t "CT,," -m 1000 -v 120 -d %s -z %s -f %s -2' % (conf_dir, noisefile, pwdfile)) + + log.info("\n######################### Exporting the CA certificate to cacert.asc ######################\n") + cafile = '%s/cacert.asc' % conf_dir + catxt = os.popen('certutil -L -d %s -n "CA certificate" -a' % conf_dir) + cafd = open(cafile, "w") + while True: + line = catxt.readline() + if (line == ''): + break + cafd.write(line) + cafd.close() + + log.info("\n######################### Generate the server certificate ######################\n") + ohostname = os.popen('hostname --fqdn', "r") + myhostname = ohostname.readline() + os.system('certutil -S -n "%s" -s "cn=%s,ou=389 Directory Server" -c "CA certificate" -t "u,u,u" -m 1001 -v 120 -d %s -z %s -f %s' % (SERVERCERT, myhostname.rstrip(), conf_dir, noisefile, pwdfile)) + + log.info("\n######################### create the pin file ######################\n") + pinfile = '%s/pin.txt' % (conf_dir) + pintxt = 'Internal (Software) Token:%s' % passwd + pinfd = open(pinfile, "w") + pinfd.write(pintxt) + pinfd.close() + + log.info("\n######################### enable SSL in the directory server with all ciphers ######################\n") + topology.standalone.simple_bind_s(DN_DM, PASSWORD) + topology.standalone.modify_s(ENCRYPTION_DN, [(ldap.MOD_REPLACE, 'nsSSL3', 'off'), + (ldap.MOD_REPLACE, 'nsTLS1', 'on'), + (ldap.MOD_REPLACE, 'nsSSLClientAuth', 'allowed'), + (ldap.MOD_REPLACE, 'allowWeakCipher', 'on'), + (ldap.MOD_REPLACE, 'nsSSL3Ciphers', '+all')]) + + topology.standalone.modify_s(CONFIG_DN, [(ldap.MOD_REPLACE, 'nsslapd-security', 'on'), + (ldap.MOD_REPLACE, 'nsslapd-ssl-check-hostname', 'off'), + (ldap.MOD_REPLACE, 'nsslapd-secureport', LDAPSPORT)]) + + topology.standalone.add_s(Entry((RSA_DN, {'objectclass': "top nsEncryptionModule".split(), + 'cn': RSA, + 'nsSSLPersonalitySSL': SERVERCERT, + 'nsSSLToken': 'internal (software)', + 'nsSSLActivation': 'on'}))) + +def connectWithOpenssl(topology, cipher, expect): + """ + Connect with the given cipher + Condition: + If expect is True, the handshake should be successful. + If expect is False, the handshake should be refused with + access log: "Cannot communicate securely with peer: + no common encryption algorithm(s)." + """ + log.info("Testing %s -- expect to handshake %s", cipher,"successfully" if expect else "failed") + + myurl = 'localhost:%s' % LDAPSPORT + cmdline = ['/usr/bin/openssl', 's_client', '-connect', myurl, '-cipher', cipher] + + strcmdline = '/usr/bin/openssl s_client -connect localhost:%s -cipher %s' % (LDAPSPORT, cipher) + log.info("Running cmdline: %s", strcmdline) + + try: + proc = subprocess.Popen(cmdline, stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.STDOUT) + except ValueError: + log.info("%s failed: %s", cmdline, ValueError) + proc.kill() + + while True: + l = proc.stdout.readline() + if l == "": + break + if 'Cipher is' in l: + log.info("Found: %s", l) + if expect: + if '(NONE)' in l: + assert False + else: + proc.stdin.close() + assert True + else: + if '(NONE)' in l: + assert True + else: + proc.stdin.close() + assert False + +def test_ticket48194_run_0(topology): + """ + Check nsSSL3Ciphers: +all + All ciphers are enabled except null. + Note: allowWeakCipher: on + """ + _header(topology, 'Test Case 1 - Check the ciphers availability for "+all"; allowWeakCipher: on') + + topology.standalone.simple_bind_s(DN_DM, PASSWORD) + topology.standalone.modify_s(CONFIG_DN, [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', '64')]) + + log.info("\n######################### Restarting the server ######################\n") + topology.standalone.restart(timeout=120) + + connectWithOpenssl(topology, 'RC4-SHA', True) + connectWithOpenssl(topology, 'AES256-SHA256', True) + +def test_ticket48194_run_1(topology): + """ + Check nsSSL3Ciphers: +all + All ciphers are enabled except null. + Note: default allowWeakCipher (i.e., off) for +all + """ + _header(topology, 'Test Case 2 - Check the ciphers availability for "+all" with default allowWeakCiphers') + + topology.standalone.simple_bind_s(DN_DM, PASSWORD) + topology.standalone.modify_s(CONFIG_DN, [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', '64')]) + # Make sure allowWeakCipher is not set. + topology.standalone.modify_s(ENCRYPTION_DN, [(ldap.MOD_DELETE, 'allowWeakCipher', None)]) + + log.info("\n######################### Restarting the server ######################\n") + topology.standalone.stop(timeout=10) + os.system('mv %s %s.48194_0' % (topology.standalone.errlog, topology.standalone.errlog)) + os.system('touch %s' % (topology.standalone.errlog)) + topology.standalone.start(timeout=120) + + connectWithOpenssl(topology, 'RC4-SHA', False) + connectWithOpenssl(topology, 'AES256-SHA256', True) + +def test_ticket48194_run_2(topology): + """ + Check nsSSL3Ciphers: +rsa_aes_128_sha,+rsa_aes_256_sha + rsa_aes_128_sha, tls_rsa_aes_128_sha, rsa_aes_256_sha, tls_rsa_aes_256_sha are enabled. + default allowWeakCipher + """ + _header(topology, 'Test Case 3 - Check the ciphers availability for "+rsa_aes_128_sha,+rsa_aes_256_sha" with default allowWeakCipher') + + topology.standalone.simple_bind_s(DN_DM, PASSWORD) + topology.standalone.modify_s(ENCRYPTION_DN, [(ldap.MOD_REPLACE, 'nsSSL3Ciphers', '+rsa_aes_128_sha,+rsa_aes_256_sha')]) + + log.info("\n######################### Restarting the server ######################\n") + topology.standalone.stop(timeout=10) + os.system('mv %s %s.48194_1' % (topology.standalone.errlog, topology.standalone.errlog)) + os.system('touch %s' % (topology.standalone.errlog)) + topology.standalone.start(timeout=120) + + connectWithOpenssl(topology, 'RC4-SHA', False) + connectWithOpenssl(topology, 'AES256-SHA256', False) + connectWithOpenssl(topology, 'AES128-SHA', True) + connectWithOpenssl(topology, 'AES256-SHA', True) + +def test_ticket48194_run_3(topology): + """ + Check nsSSL3Ciphers: -all + All ciphers are disabled. + default allowWeakCipher + """ + _header(topology, 'Test Case 4 - Check the ciphers availability for "-all"') + + topology.standalone.simple_bind_s(DN_DM, PASSWORD) + topology.standalone.modify_s(ENCRYPTION_DN, [(ldap.MOD_REPLACE, 'nsSSL3Ciphers', '-all')]) + + log.info("\n######################### Restarting the server ######################\n") + topology.standalone.stop(timeout=10) + os.system('mv %s %s.48194_2' % (topology.standalone.errlog, topology.standalone.errlog)) + os.system('touch %s' % (topology.standalone.errlog)) + topology.standalone.start(timeout=120) + + connectWithOpenssl(topology, 'RC4-SHA', False) + connectWithOpenssl(topology, 'AES256-SHA256', False) + +def test_ticket48194_run_4(topology): + """ + Check no nsSSL3Ciphers + Default ciphers are enabled. + default allowWeakCipher + """ + _header(topology, 'Test Case 5 - Check no nsSSL3Ciphers (-all) with default allowWeakCipher') + + topology.standalone.simple_bind_s(DN_DM, PASSWORD) + topology.standalone.modify_s(ENCRYPTION_DN, [(ldap.MOD_DELETE, 'nsSSL3Ciphers', '-all')]) + + log.info("\n######################### Restarting the server ######################\n") + topology.standalone.stop(timeout=10) + os.system('mv %s %s.48194_3' % (topology.standalone.errlog, topology.standalone.errlog)) + os.system('touch %s' % (topology.standalone.errlog)) + topology.standalone.start(timeout=120) + + connectWithOpenssl(topology, 'RC4-SHA', False) + connectWithOpenssl(topology, 'AES256-SHA256', True) + +def test_ticket48194_run_5(topology): + """ + Check nsSSL3Ciphers: default + Default ciphers are enabled. + default allowWeakCipher + """ + _header(topology, 'Test Case 6 - Check default nsSSL3Ciphers (default setting) with default allowWeakCipher') + + topology.standalone.simple_bind_s(DN_DM, PASSWORD) + topology.standalone.modify_s(ENCRYPTION_DN, [(ldap.MOD_REPLACE, 'nsSSL3Ciphers', 'default')]) + + log.info("\n######################### Restarting the server ######################\n") + topology.standalone.stop(timeout=10) + os.system('mv %s %s.48194_4' % (topology.standalone.errlog, topology.standalone.errlog)) + os.system('touch %s' % (topology.standalone.errlog)) + topology.standalone.start(timeout=120) + + connectWithOpenssl(topology, 'RC4-SHA', False) + connectWithOpenssl(topology, 'AES256-SHA256', True) + +def test_ticket48194_run_6(topology): + """ + Check nsSSL3Ciphers: +all,-TLS_RSA_WITH_AES_256_CBC_SHA256 + All ciphers are disabled. + default allowWeakCipher + """ + _header(topology, 'Test Case 7 - Check nsSSL3Ciphers: +all,-TLS_RSA_WITH_AES_256_CBC_SHA256 with default allowWeakCipher') + + topology.standalone.simple_bind_s(DN_DM, PASSWORD) + topology.standalone.modify_s(ENCRYPTION_DN, [(ldap.MOD_REPLACE, 'nsSSL3Ciphers', '+all,-TLS_RSA_WITH_AES_256_CBC_SHA256')]) + + log.info("\n######################### Restarting the server ######################\n") + topology.standalone.stop(timeout=10) + os.system('mv %s %s.48194_5' % (topology.standalone.errlog, topology.standalone.errlog)) + os.system('touch %s' % (topology.standalone.errlog)) + topology.standalone.start(timeout=120) + + connectWithOpenssl(topology, 'RC4-SHA', False) + connectWithOpenssl(topology, 'AES256-SHA256', False) + connectWithOpenssl(topology, 'AES128-SHA', True) + +def test_ticket48194_run_7(topology): + """ + Check nsSSL3Ciphers: -all,+rsa_rc4_128_md5 + All ciphers are disabled. + default allowWeakCipher + """ + _header(topology, 'Test Case 8 - Check nsSSL3Ciphers: -all,+rsa_rc4_128_md5 with default allowWeakCipher') + + topology.standalone.simple_bind_s(DN_DM, PASSWORD) + topology.standalone.modify_s(ENCRYPTION_DN, [(ldap.MOD_REPLACE, 'nsSSL3Ciphers', '-all,+rsa_rc4_128_md5')]) + + log.info("\n######################### Restarting the server ######################\n") + topology.standalone.stop(timeout=10) + os.system('mv %s %s.48194_6' % (topology.standalone.errlog, topology.standalone.errlog)) + os.system('touch %s' % (topology.standalone.errlog)) + topology.standalone.start(timeout=120) + + connectWithOpenssl(topology, 'RC4-SHA', False) + connectWithOpenssl(topology, 'AES256-SHA256', False) + connectWithOpenssl(topology, 'RC4-MD5', True) + +def test_ticket48194_run_8(topology): + """ + Check nsSSL3Ciphers: default + allowWeakCipher: off + Strong Default ciphers are enabled. + """ + _header(topology, 'Test Case 9 - Check default nsSSL3Ciphers (default setting + allowWeakCipher: off)') + + topology.standalone.simple_bind_s(DN_DM, PASSWORD) + topology.standalone.modify_s(ENCRYPTION_DN, [(ldap.MOD_REPLACE, 'nsSSL3Ciphers', 'default'), + (ldap.MOD_REPLACE, 'allowWeakCipher', 'off')]) + + log.info("\n######################### Restarting the server ######################\n") + topology.standalone.stop(timeout=10) + os.system('mv %s %s.48194_7' % (topology.standalone.errlog, topology.standalone.errlog)) + os.system('touch %s' % (topology.standalone.errlog)) + topology.standalone.start(timeout=120) + + connectWithOpenssl(topology, 'RC4-SHA', False) + connectWithOpenssl(topology, 'AES256-SHA256', True) + +def test_ticket48194_run_9(topology): + """ + Check no nsSSL3Ciphers + Default ciphers are enabled. + allowWeakCipher: on + nsslapd-errorlog-level: 0 + """ + _header(topology, 'Test Case 10 - Check no nsSSL3Ciphers (default setting) with no errorlog-level & allowWeakCipher on') + + topology.standalone.simple_bind_s(DN_DM, PASSWORD) + topology.standalone.modify_s(ENCRYPTION_DN, [(ldap.MOD_REPLACE, 'nsSSL3Ciphers', None), + (ldap.MOD_REPLACE, 'allowWeakCipher', 'on')]) + topology.standalone.modify_s(CONFIG_DN, [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', None)]) + + log.info("\n######################### Restarting the server ######################\n") + topology.standalone.stop(timeout=10) + os.system('mv %s %s.48194_8' % (topology.standalone.errlog, topology.standalone.errlog)) + os.system('touch %s' % (topology.standalone.errlog)) + topology.standalone.start(timeout=120) + + connectWithOpenssl(topology, 'RC4-SHA', True) + connectWithOpenssl(topology, 'AES256-SHA256', True) + +def test_ticket48194_run_10(topology): + """ + Check nsSSL3Ciphers: -TLS_RSA_WITH_NULL_MD5,+TLS_RSA_WITH_RC4_128_MD5, + +TLS_RSA_EXPORT_WITH_RC4_40_MD5,+TLS_RSA_EXPORT_WITH_RC2_CBC_40_MD5, + +TLS_DHE_RSA_WITH_DES_CBC_SHA,+SSL_RSA_FIPS_WITH_DES_CBC_SHA, + +TLS_DHE_RSA_WITH_3DES_EDE_CBC_SHA,+SSL_RSA_FIPS_WITH_3DES_EDE_CBC_SHA, + +TLS_RSA_EXPORT1024_WITH_RC4_56_SHA,+TLS_RSA_EXPORT1024_WITH_DES_CBC_SHA, + -SSL_CK_RC4_128_WITH_MD5,-SSL_CK_RC4_128_EXPORT40_WITH_MD5, + -SSL_CK_RC2_128_CBC_WITH_MD5,-SSL_CK_RC2_128_CBC_EXPORT40_WITH_MD5, + -SSL_CK_DES_64_CBC_WITH_MD5,-SSL_CK_DES_192_EDE3_CBC_WITH_MD5 + allowWeakCipher: on + nsslapd-errorlog-level: 0 + """ + _header(topology, 'Test Case 11 - Check nsSSL3Ciphers: long list using the NSS Cipher Suite name with allowWeakCipher on') + + topology.standalone.simple_bind_s(DN_DM, PASSWORD) + topology.standalone.modify_s(ENCRYPTION_DN, [(ldap.MOD_REPLACE, 'nsSSL3Ciphers', + '-TLS_RSA_WITH_NULL_MD5,+TLS_RSA_WITH_RC4_128_MD5,+TLS_RSA_EXPORT_WITH_RC4_40_MD5,+TLS_RSA_EXPORT_WITH_RC2_CBC_40_MD5,+TLS_DHE_RSA_WITH_DES_CBC_SHA,+SSL_RSA_FIPS_WITH_DES_CBC_SHA,+TLS_DHE_RSA_WITH_3DES_EDE_CBC_SHA,+SSL_RSA_FIPS_WITH_3DES_EDE_CBC_SHA,+TLS_RSA_EXPORT1024_WITH_RC4_56_SHA,+TLS_RSA_EXPORT1024_WITH_DES_CBC_SHA,-SSL_CK_RC4_128_WITH_MD5,-SSL_CK_RC4_128_EXPORT40_WITH_MD5,-SSL_CK_RC2_128_CBC_WITH_MD5,-SSL_CK_RC2_128_CBC_EXPORT40_WITH_MD5,-SSL_CK_DES_64_CBC_WITH_MD5,-SSL_CK_DES_192_EDE3_CBC_WITH_MD5')]) + + log.info("\n######################### Restarting the server ######################\n") + topology.standalone.stop(timeout=10) + os.system('mv %s %s.48194_9' % (topology.standalone.errlog, topology.standalone.errlog)) + os.system('touch %s' % (topology.standalone.errlog)) + topology.standalone.start(timeout=120) + + connectWithOpenssl(topology, 'RC4-SHA', False) + connectWithOpenssl(topology, 'RC4-MD5', True) + connectWithOpenssl(topology, 'AES256-SHA256', False) + +def test_ticket48194_run_11(topology): + """ + Check nsSSL3Ciphers: +fortezza + SSL_GetImplementedCiphers does not return this as a secuire cipher suite + """ + _header(topology, 'Test Case 12 - Check nsSSL3Ciphers: +fortezza, which is not supported') + + topology.standalone.simple_bind_s(DN_DM, PASSWORD) + topology.standalone.modify_s(ENCRYPTION_DN, [(ldap.MOD_REPLACE, 'nsSSL3Ciphers', '+fortezza')]) + + log.info("\n######################### Restarting the server ######################\n") + topology.standalone.stop(timeout=10) + os.system('mv %s %s.48194_10' % (topology.standalone.errlog, topology.standalone.errlog)) + os.system('touch %s' % (topology.standalone.errlog)) + topology.standalone.start(timeout=120) + + connectWithOpenssl(topology, 'RC4-SHA', False) + connectWithOpenssl(topology, 'AES256-SHA256', False) + +def test_ticket48194_final(topology): + topology.standalone.delete() + log.info('Testcase PASSED') + +def run_isolated(): + ''' + run_isolated is used to run these test cases independently of a test scheduler (xunit, py.test..) + To run isolated without py.test, you need to + - edit this file and comment '@pytest.fixture' line before 'topology' function. + - set the installation prefix + - run this program + ''' + global installation_prefix + installation_prefix = None + + topo = topology(True) + test_ticket48194_init(topo) + + test_ticket48194_run_0(topo) + test_ticket48194_run_1(topo) + test_ticket48194_run_2(topo) + test_ticket48194_run_3(topo) + test_ticket48194_run_4(topo) + test_ticket48194_run_5(topo) + test_ticket48194_run_6(topo) + test_ticket48194_run_7(topo) + test_ticket48194_run_8(topo) + test_ticket48194_run_9(topo) + test_ticket48194_run_10(topo) + test_ticket48194_run_11(topo) + + test_ticket48194_final(topo) + +if __name__ == '__main__': + run_isolated() diff --git a/dirsrvtests/tests/tickets/ticket48212_test.py b/dirsrvtests/tests/tickets/ticket48212_test.py new file mode 100644 index 0000000..c3c8c8f --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket48212_test.py @@ -0,0 +1,210 @@ +import os +import sys +import time +import ldap +import logging +import pytest +from lib389 import DirSrv, Entry, tools, tasks +from lib389.tools import DirSrvTools +from lib389._constants import * +from lib389.properties import * +from lib389.tasks import * +from ldap.controls import SimplePagedResultsControl + +log = logging.getLogger(__name__) + +installation_prefix = None + +MYSUFFIX = 'dc=example,dc=com' +MYSUFFIXBE = 'userRoot' +_MYLDIF = 'example1k_posix.ldif' +UIDNUMBERDN = "cn=uidnumber,cn=index,cn=userroot,cn=ldbm database,cn=plugins,cn=config" + +class TopologyStandalone(object): + def __init__(self, standalone): + standalone.open() + self.standalone = standalone + + +@pytest.fixture(scope="module") +def topology(request): + ''' + This fixture is used to standalone topology for the 'module'. + ''' + global installation_prefix + + if installation_prefix: + args_instance[SER_DEPLOYED_DIR] = installation_prefix + + standalone = DirSrv(verbose=False) + + # Args for the standalone instance + args_instance[SER_HOST] = HOST_STANDALONE + args_instance[SER_PORT] = PORT_STANDALONE + args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE + args_standalone = args_instance.copy() + standalone.allocate(args_standalone) + + # Get the status of the instance and restart it if it exists + instance_standalone = standalone.exists() + + # Remove the instance + if instance_standalone: + standalone.delete() + + # Create the instance + standalone.create() + + # Used to retrieve configuration information (dbdir, confdir...) + standalone.open() + + # clear the tmp directory + standalone.clearTmpDir(__file__) + + # Here we have standalone instance up and running + return TopologyStandalone(standalone) + +def runDbVerify(topology): + topology.standalone.log.info("\n\n +++++ dbverify +++++\n") + dbverifyCMD = topology.standalone.sroot + "/slapd-" + topology.standalone.inst + "/dbverify -V" + dbverifyOUT = os.popen(dbverifyCMD, "r") + topology.standalone.log.info("Running %s" % dbverifyCMD) + running = True + error = False + while running: + l = dbverifyOUT.readline() + if l == "": + running = False + elif "libdb:" in l: + running = False + error = True + topology.standalone.log.info("%s" % l) + elif "verify failed" in l: + error = True + running = False + topology.standalone.log.info("%s" % l) + + if error: + topology.standalone.log.fatal("dbverify failed") + assert False + else: + topology.standalone.log.info("dbverify passed") + +def reindexUidNumber(topology): + topology.standalone.log.info("\n\n +++++ reindex uidnumber +++++\n") + indexCMD = topology.standalone.sroot + "/slapd-" + topology.standalone.inst + "/db2index.pl -D \"" + DN_DM + "\" -w \"" + PASSWORD + "\" -n " + MYSUFFIXBE + " -t uidnumber" + + indexOUT = os.popen(indexCMD, "r") + topology.standalone.log.info("Running %s" % indexCMD) + + time.sleep(10) + + tailCMD = "tail -n 3 " + topology.standalone.errlog + tailOUT = os.popen(tailCMD, "r") + running = True + done = False + while running: + l = tailOUT.readline() + if l == "": + running = False + elif "Finished indexing" in l: + running = False + done = True + topology.standalone.log.info("%s" % l) + + if done: + topology.standalone.log.info("%s done" % indexCMD) + else: + topology.standalone.log.fatal("%s did not finish" % indexCMD) + assert False + +def test_ticket48212_run(topology): + """ + Import posixAccount entries. + Index uidNumber + add nsMatchingRule: integerOrderingMatch + run dbverify to see if it reports the db corruption or not + delete nsMatchingRule: integerOrderingMatch + run dbverify to see if it reports the db corruption or not + if no corruption is reported, the bug fix was verified. + """ + log.info('Testing Ticket 48212 - Dynamic nsMatchingRule changes had no effect on the attrinfo thus following reindexing, as well.') + + # bind as directory manager + topology.standalone.log.info("Bind as %s" % DN_DM) + topology.standalone.simple_bind_s(DN_DM, PASSWORD) + + + data_dir_path = topology.standalone.getDir(__file__, DATA_DIR) + ldif_file = data_dir_path + "ticket48212/" + _MYLDIF + topology.standalone.log.info("\n\n######################### Import Test data (%s) ######################\n" % ldif_file) + args = {TASK_WAIT: True} + importTask = Tasks(topology.standalone) + importTask.importLDIF(MYSUFFIX, MYSUFFIXBE, ldif_file, args) + args = {TASK_WAIT: True} + + runDbVerify(topology) + + topology.standalone.log.info("\n\n######################### Add index by uidnumber ######################\n") + try: + topology.standalone.add_s(Entry((UIDNUMBERDN, {'objectclass': "top nsIndex".split(), + 'cn': 'uidnumber', + 'nsSystemIndex': 'false', + 'nsIndexType': "pres eq".split()}))) + except ValueError: + topology.standalone.log.fatal("add_s failed: %s", ValueError) + + topology.standalone.log.info("\n\n######################### reindexing... ######################\n") + reindexUidNumber(topology) + + runDbVerify(topology) + + topology.standalone.log.info("\n\n######################### Add nsMatchingRule ######################\n") + try: + topology.standalone.modify_s(UIDNUMBERDN, [(ldap.MOD_ADD, 'nsMatchingRule', 'integerOrderingMatch')]) + except ValueError: + topology.standalone.log.fatal("modify_s failed: %s", ValueError) + + topology.standalone.log.info("\n\n######################### reindexing... ######################\n") + reindexUidNumber(topology) + + runDbVerify(topology) + + topology.standalone.log.info("\n\n######################### Delete nsMatchingRule ######################\n") + try: + topology.standalone.modify_s(UIDNUMBERDN, [(ldap.MOD_DELETE, 'nsMatchingRule', 'integerOrderingMatch')]) + except ValueError: + topology.standalone.log.fatal("modify_s failed: %s", ValueError) + + reindexUidNumber(topology) + + runDbVerify(topology) + + topology.standalone.log.info("ticket48212 was successfully verified.") + + +def test_ticket48212_final(topology): + topology.standalone.delete() + log.info('Testcase PASSED') + + +def run_isolated(): + ''' + run_isolated is used to run these test cases independently of a test scheduler (xunit, py.test..) + To run isolated without py.test, you need to + - edit this file and comment '@pytest.fixture' line before 'topology' function. + - set the installation prefix + - run this program + ''' + global installation_prefix + installation_prefix = None + + topo = topology(True) + test_ticket48212_run(topo) + + test_ticket48212_final(topo) + + +if __name__ == '__main__': + run_isolated() + diff --git a/dirsrvtests/tests/tickets/ticket48214_test.py b/dirsrvtests/tests/tickets/ticket48214_test.py new file mode 100644 index 0000000..14bf392 --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket48214_test.py @@ -0,0 +1,171 @@ +import os +import sys +import time +import ldap +import logging +import pytest +from lib389 import DirSrv, Entry, tools, tasks +from lib389.tools import DirSrvTools +from lib389._constants import * +from lib389.properties import * +from lib389.tasks import * +from ldap.controls import SimplePagedResultsControl + +log = logging.getLogger(__name__) + +installation_prefix = None + +MYSUFFIX = 'dc=example,dc=com' +MYSUFFIXBE = 'userRoot' + +class TopologyStandalone(object): + def __init__(self, standalone): + standalone.open() + self.standalone = standalone + + +@pytest.fixture(scope="module") +def topology(request): + ''' + This fixture is used to standalone topology for the 'module'. + ''' + global installation_prefix + + if installation_prefix: + args_instance[SER_DEPLOYED_DIR] = installation_prefix + + standalone = DirSrv(verbose=False) + + # Args for the standalone instance + args_instance[SER_HOST] = HOST_STANDALONE + args_instance[SER_PORT] = PORT_STANDALONE + args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE + args_standalone = args_instance.copy() + standalone.allocate(args_standalone) + + # Get the status of the instance and restart it if it exists + instance_standalone = standalone.exists() + + # Remove the instance + if instance_standalone: + standalone.delete() + + # Create the instance + standalone.create() + + # Used to retrieve configuration information (dbdir, confdir...) + standalone.open() + + # clear the tmp directory + standalone.clearTmpDir(__file__) + + # Here we have standalone instance up and running + return TopologyStandalone(standalone) + +def getMaxBerSizeFromDseLdif(topology): + topology.standalone.log.info(" +++++ Get maxbersize from dse.ldif +++++\n") + dse_ldif = topology.standalone.confdir + '/dse.ldif' + grepMaxBerCMD = "egrep nsslapd-maxbersize " + dse_ldif + topology.standalone.log.info(" Run CMD: %s\n" % grepMaxBerCMD) + grepMaxBerOUT = os.popen(grepMaxBerCMD, "r") + running = True + maxbersize = -1 + while running: + l = grepMaxBerOUT.readline() + if l == "": + topology.standalone.log.info(" Empty: %s\n" % l) + running = False + elif "nsslapd-maxbersize:" in l.lower(): + running = False + fields = l.split() + if len(fields) >= 2: + maxbersize = fields[1] + topology.standalone.log.info(" Right format - %s %s\n" % (fields[0], fields[1])) + else: + topology.standalone.log.info(" Wrong format - %s\n" % l) + else: + topology.standalone.log.info(" Else?: %s\n" % l) + return maxbersize + +def checkMaxBerSize(topology): + topology.standalone.log.info(" +++++ Check Max Ber Size +++++\n") + maxbersizestr = getMaxBerSizeFromDseLdif(topology) + maxbersize = int(maxbersizestr) + isdefault = True + defaultvalue = 2097152 + if maxbersize < 0: + topology.standalone.log.info(" No nsslapd-maxbersize found in dse.ldif\n") + elif maxbersize == 0: + topology.standalone.log.info(" nsslapd-maxbersize: %d\n" % maxbersize) + else: + isdefault = False + topology.standalone.log.info(" nsslapd-maxbersize: %d\n" % maxbersize) + + try: + entry = topology.standalone.search_s('cn=config', ldap.SCOPE_BASE, + "(cn=*)", + ['nsslapd-maxbersize']) + if entry: + searchedsize = entry[0].getValue('nsslapd-maxbersize') + topology.standalone.log.info(" ldapsearch returned nsslapd-maxbersize: %s\n" % searchedsize) + else: + topology.standalone.log.fatal('ERROR: cn=config is not found?') + assert False + except ldap.LDAPError as e: + topology.standalone.log.error('ERROR: Failed to search for user entry: ' + e.message['desc']) + assert False + + if isdefault: + topology.standalone.log.info(" Checking %d vs %d\n" % (int(searchedsize), defaultvalue)) + assert int(searchedsize) == defaultvalue + + +def test_ticket48214_run(topology): + """ + Check ldapsearch returns the correct maxbersize when it is not explicitly set. + """ + log.info('Testing Ticket 48214 - ldapsearch on nsslapd-maxbersize returns 0 instead of current value') + + # bind as directory manager + topology.standalone.log.info("Bind as %s" % DN_DM) + topology.standalone.simple_bind_s(DN_DM, PASSWORD) + + topology.standalone.log.info("\n\n######################### Out of Box ######################\n") + checkMaxBerSize(topology) + + topology.standalone.log.info("\n\n######################### Add nsslapd-maxbersize: 0 ######################\n") + topology.standalone.modify_s('cn=config', [(ldap.MOD_REPLACE, 'nsslapd-maxbersize', '0')]) + checkMaxBerSize(topology) + + topology.standalone.log.info("\n\n######################### Add nsslapd-maxbersize: 10000 ######################\n") + topology.standalone.modify_s('cn=config', [(ldap.MOD_REPLACE, 'nsslapd-maxbersize', '10000')]) + checkMaxBerSize(topology) + + topology.standalone.log.info("ticket48214 was successfully verified.") + + +def test_ticket48214_final(topology): + topology.standalone.delete() + log.info('Testcase PASSED') + + +def run_isolated(): + ''' + run_isolated is used to run these test cases independently of a test scheduler (xunit, py.test..) + To run isolated without py.test, you need to + - edit this file and comment '@pytest.fixture' line before 'topology' function. + - set the installation prefix + - run this program + ''' + global installation_prefix + installation_prefix = None + + topo = topology(True) + test_ticket48214_run(topo) + + test_ticket48214_final(topo) + + +if __name__ == '__main__': + run_isolated() + diff --git a/dirsrvtests/tests/tickets/ticket48226_test.py b/dirsrvtests/tests/tickets/ticket48226_test.py new file mode 100644 index 0000000..6e244af --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket48226_test.py @@ -0,0 +1,249 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2015 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import os +import sys +import time +import ldap +import logging +import pytest +from lib389 import DirSrv, Entry, tools, tasks +from lib389.tools import DirSrvTools +from lib389._constants import * +from lib389.properties import * +from lib389.tasks import * +from lib389.utils import * + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + +installation1_prefix = None + + +class TopologyReplication(object): + def __init__(self, master1, master2): + master1.open() + self.master1 = master1 + master2.open() + self.master2 = master2 + + +@pytest.fixture(scope="module") +def topology(request): + global installation1_prefix + os.environ['USE_VALGRIND'] = '1' + if installation1_prefix: + args_instance[SER_DEPLOYED_DIR] = installation1_prefix + + # Creating master 1... + master1 = DirSrv(verbose=False) + if installation1_prefix: + args_instance[SER_DEPLOYED_DIR] = installation1_prefix + args_instance[SER_HOST] = HOST_MASTER_1 + args_instance[SER_PORT] = PORT_MASTER_1 + args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1 + args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX + args_master = args_instance.copy() + master1.allocate(args_master) + instance_master1 = master1.exists() + if instance_master1: + master1.delete() + master1.create() + master1.open() + master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_1) + + # Creating master 2... + master2 = DirSrv(verbose=False) + if installation1_prefix: + args_instance[SER_DEPLOYED_DIR] = installation1_prefix + args_instance[SER_HOST] = HOST_MASTER_2 + args_instance[SER_PORT] = PORT_MASTER_2 + args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_2 + args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX + args_master = args_instance.copy() + master2.allocate(args_master) + instance_master2 = master2.exists() + if instance_master2: + master2.delete() + master2.create() + master2.open() + master2.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_2) + + # + # Create all the agreements + # + # Creating agreement from master 1 to master 2 + properties = {RA_NAME: r'meTo_$host:$port', + RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], + RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], + RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], + RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} + m1_m2_agmt = master1.agreement.create(suffix=SUFFIX, host=master2.host, port=master2.port, properties=properties) + if not m1_m2_agmt: + log.fatal("Fail to create a master -> master replica agreement") + sys.exit(1) + log.debug("%s created" % m1_m2_agmt) + + # Creating agreement from master 2 to master 1 + properties = {RA_NAME: r'meTo_$host:$port', + RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], + RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], + RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], + RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} + m2_m1_agmt = master2.agreement.create(suffix=SUFFIX, host=master1.host, port=master1.port, properties=properties) + if not m2_m1_agmt: + log.fatal("Fail to create a master -> master replica agreement") + sys.exit(1) + log.debug("%s created" % m2_m1_agmt) + + # Allow the replicas to get situated with the new agreements... + time.sleep(5) + + # + # Initialize all the agreements + # + master1.agreement.init(SUFFIX, HOST_MASTER_2, PORT_MASTER_2) + master1.waitForReplInit(m1_m2_agmt) + + # Check replication is working... + if master1.testReplication(DEFAULT_SUFFIX, master2): + log.info('Replication is working.') + else: + log.fatal('Replication is not working.') + assert False + + # Clear out the tmp dir + master1.clearTmpDir(__file__) + + def fin(): + master1.delete() + master2.delete() + sbin_dir = get_sbin_dir(prefix=master2.prefix) + valgrind_disable(sbin_dir) + request.addfinalizer(fin) + + return TopologyReplication(master1, master2) + + +def test_ticket48226_set_purgedelay(topology): + args = {REPLICA_PURGE_DELAY: '5', + REPLICA_PURGE_INTERVAL: '5'} + try: + topology.master1.replica.setProperties(DEFAULT_SUFFIX, None, None, args) + except: + log.fatal('Failed to configure replica') + assert False + try: + topology.master2.replica.setProperties(DEFAULT_SUFFIX, None, None, args) + except: + log.fatal('Failed to configure replica') + assert False + topology.master1.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-auditlog-logging-enabled', 'on')]) + topology.master2.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-auditlog-logging-enabled', 'on')]) + topology.master1.restart(10) + topology.master2.restart(10) + + +def test_ticket48226_1(topology): + name = 'test_entry' + dn = "cn=%s,%s" % (name, SUFFIX) + + topology.master1.add_s(Entry((dn, {'objectclass': "top person".split(), + 'sn': name, + 'cn': name}))) + + # First do an update that is replicated + mods = [(ldap.MOD_ADD, 'description', '5')] + topology.master1.modify_s(dn, mods) + + nbtry = 0 + while (nbtry <= 10): + try: + ent = topology.master2.getEntry(dn, ldap.SCOPE_BASE, "(objectclass=*)", ['description']) + if ent.hasAttr('description') and ent.getValue('description') == '5': + break + except ldap.NO_SUCH_OBJECT: + pass + nbtry = nbtry + 1 + time.sleep(1) + assert nbtry <= 10 + + # Stop M2 so that it will not receive the next update + topology.master2.stop(10) + + # ADD a new value that is not replicated + mods = [(ldap.MOD_DELETE, 'description', '5')] + topology.master1.modify_s(dn, mods) + + # Stop M1 so that it will keep del '5' that is unknown from master2 + topology.master1.stop(10) + + # Get the sbin directory so we know where to replace 'ns-slapd' + sbin_dir = get_sbin_dir(prefix=topology.master2.prefix) + + # Enable valgrind + valgrind_enable(sbin_dir) + + # start M2 to do the next updates + topology.master2.start(60) + + # ADD 'description' by '5' + mods = [(ldap.MOD_DELETE, 'description', '5')] + topology.master2.modify_s(dn, mods) + + # DEL 'description' by '5' + mods = [(ldap.MOD_ADD, 'description', '5')] + topology.master2.modify_s(dn, mods) + + # sleep of purgedelay so that the next update will purge the CSN_7 + time.sleep(6) + + # ADD 'description' by '6' that purge the state info + mods = [(ldap.MOD_ADD, 'description', '6')] + topology.master2.modify_s(dn, mods) + + # Restart master1 + topology.master1.start(10) + + # Get the results file + results_file = valgrind_get_results_file(topology.master2) + + # Stop master2 + topology.master2.stop(10) + + # Check for leak + if valgrind_check_file(results_file, VALGRIND_LEAK_STR, 'csnset_dup'): + log.info('Valgrind reported leak in csnset_dup!') + assert False + else: + log.info('Valgrind is happy!') + + # Check for invalid read/write + if valgrind_check_file(results_file, VALGRIND_INVALID_STR, 'csnset_dup'): + log.info('Valgrind reported invalid!') + assert False + else: + log.info('Valgrind is happy!') + + # Check for invalid read/write + if valgrind_check_file(results_file, VALGRIND_INVALID_STR, 'csnset_free'): + log.info('Valgrind reported invalid!') + assert False + else: + log.info('Valgrind is happy!') + + topology.master1.start(10) + log.info('Testcase PASSED') + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) + diff --git a/dirsrvtests/tests/tickets/ticket48228_test.py b/dirsrvtests/tests/tickets/ticket48228_test.py new file mode 100644 index 0000000..bb20620 --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket48228_test.py @@ -0,0 +1,336 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2015 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import os +import sys +import time +import ldap +import logging +import pytest +from lib389 import DirSrv, Entry, tools, tasks +from lib389.tools import DirSrvTools +from lib389._constants import * +from lib389.properties import * +from lib389.tasks import * + +log = logging.getLogger(__name__) + +installation_prefix = None + +# Assuming DEFAULT_SUFFIX is "dc=example,dc=com", otherwise it does not work... :( +SUBTREE_CONTAINER = 'cn=nsPwPolicyContainer,' + DEFAULT_SUFFIX +SUBTREE_PWPDN = 'cn=nsPwPolicyEntry,' + DEFAULT_SUFFIX +SUBTREE_PWP = 'cn=cn\3DnsPwPolicyEntry\2Cdc\3Dexample\2Cdc\3Dcom,' + SUBTREE_CONTAINER +SUBTREE_COS_TMPLDN = 'cn=nsPwTemplateEntry,' + DEFAULT_SUFFIX +SUBTREE_COS_TMPL = 'cn=cn\3DnsPwTemplateEntry\2Cdc\3Dexample\2Cdc\3Dcom,' + SUBTREE_CONTAINER +SUBTREE_COS_DEF = 'cn=nsPwPolicy_CoS,' + DEFAULT_SUFFIX + +USER1_DN = 'uid=user1,' + DEFAULT_SUFFIX +USER2_DN = 'uid=user2,' + DEFAULT_SUFFIX + + +class TopologyStandalone(object): + def __init__(self, standalone): + standalone.open() + self.standalone = standalone + + +@pytest.fixture(scope="module") +def topology(request): + ''' + This fixture is used to standalone topology for the 'module'. + ''' + global installation_prefix + + if installation_prefix: + args_instance[SER_DEPLOYED_DIR] = installation_prefix + + standalone = DirSrv(verbose=False) + + # Args for the standalone instance + args_instance[SER_HOST] = HOST_STANDALONE + args_instance[SER_PORT] = PORT_STANDALONE + args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE + args_standalone = args_instance.copy() + standalone.allocate(args_standalone) + + # Get the status of the instance and restart it if it exists + instance_standalone = standalone.exists() + + # Remove the instance + if instance_standalone: + standalone.delete() + + # Create the instance + standalone.create() + + # Used to retrieve configuration information (dbdir, confdir...) + standalone.open() + + # clear the tmp directory + standalone.clearTmpDir(__file__) + + # Here we have standalone instance up and running + return TopologyStandalone(standalone) + + +def set_global_pwpolicy(topology, inhistory): + log.info(" +++++ Enable global password policy +++++\n") + topology.standalone.simple_bind_s(DN_DM, PASSWORD) + # Enable password policy + try: + topology.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-pwpolicy-local', 'on')]) + except ldap.LDAPError as e: + log.error('Failed to set pwpolicy-local: error ' + e.message['desc']) + assert False + + log.info(" Set global password history on\n") + try: + topology.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'passwordHistory', 'on')]) + except ldap.LDAPError as e: + log.error('Failed to set passwordHistory: error ' + e.message['desc']) + assert False + + log.info(" Set global passwords in history\n") + try: + count = "%d" % inhistory + topology.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'passwordInHistory', count)]) + except ldap.LDAPError as e: + log.error('Failed to set passwordInHistory: error ' + e.message['desc']) + assert False + + +def set_subtree_pwpolicy(topology): + log.info(" +++++ Enable subtree level password policy +++++\n") + topology.standalone.simple_bind_s(DN_DM, PASSWORD) + log.info(" Add the container") + try: + topology.standalone.add_s(Entry((SUBTREE_CONTAINER, {'objectclass': 'top nsContainer'.split(), + 'cn': 'nsPwPolicyContainer'}))) + except ldap.LDAPError as e: + log.error('Failed to add subtree container: error ' + e.message['desc']) + assert False + + log.info(" Add the password policy subentry {passwordHistory: on, passwordInHistory: 6}") + try: + topology.standalone.add_s(Entry((SUBTREE_PWP, {'objectclass': 'top ldapsubentry passwordpolicy'.split(), + 'cn': SUBTREE_PWPDN, + 'passwordMustChange': 'off', + 'passwordExp': 'off', + 'passwordHistory': 'on', + 'passwordInHistory': '6', + 'passwordMinAge': '0', + 'passwordChange': 'on', + 'passwordStorageScheme': 'clear'}))) + except ldap.LDAPError as e: + log.error('Failed to add passwordpolicy: error ' + e.message['desc']) + assert False + + log.info(" Add the COS template") + try: + topology.standalone.add_s(Entry((SUBTREE_COS_TMPL, {'objectclass': 'top ldapsubentry costemplate extensibleObject'.split(), + 'cn': SUBTREE_PWPDN, + 'cosPriority': '1', + 'cn': SUBTREE_COS_TMPLDN, + 'pwdpolicysubentry': SUBTREE_PWP}))) + except ldap.LDAPError as e: + log.error('Failed to add COS template: error ' + e.message['desc']) + assert False + + log.info(" Add the COS definition") + try: + topology.standalone.add_s(Entry((SUBTREE_COS_DEF, {'objectclass': 'top ldapsubentry cosSuperDefinition cosPointerDefinition'.split(), + 'cn': SUBTREE_PWPDN, + 'costemplatedn': SUBTREE_COS_TMPL, + 'cosAttribute': 'pwdpolicysubentry default operational-default'}))) + except ldap.LDAPError as e: + log.error('Failed to add COS def: error ' + e.message['desc']) + assert False + + +def check_passwd_inhistory(topology, user, cpw, passwd): + inhistory = 0 + log.info(" Bind as {%s,%s}" % (user, cpw)) + topology.standalone.simple_bind_s(user, cpw) + try: + topology.standalone.modify_s(user, [(ldap.MOD_REPLACE, 'userpassword', passwd)]) + except ldap.LDAPError as e: + log.info(' The password ' + passwd + ' of user' + USER1_DN + ' in history: error ' + e.message['desc']) + inhistory = 1 + return inhistory + + +def update_passwd(topology, user, passwd, times): + cpw = passwd + loop = 0 + while loop < times: + log.info(" Bind as {%s,%s}" % (user, cpw)) + topology.standalone.simple_bind_s(user, cpw) + cpw = 'password%d' % loop + try: + topology.standalone.modify_s(user, [(ldap.MOD_REPLACE, 'userpassword', cpw)]) + except ldap.LDAPError as e: + log.fatal('test_ticket48228: Failed to update the password ' + cpw + ' of user ' + user + ': error ' + e.message['desc']) + assert False + loop += 1 + + # checking the first password, which is supposed to be in history + inhistory = check_passwd_inhistory(topology, user, cpw, passwd) + assert inhistory == 1 + + +def test_ticket48228_test_global_policy(topology): + """ + Check global password policy + """ + + log.info(' Set inhistory = 6') + set_global_pwpolicy(topology, 6) + + log.info(' Bind as directory manager') + log.info("Bind as %s" % DN_DM) + topology.standalone.simple_bind_s(DN_DM, PASSWORD) + + log.info(' Add an entry' + USER1_DN) + try: + topology.standalone.add_s(Entry((USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(), + 'sn': '1', + 'cn': 'user 1', + 'uid': 'user1', + 'givenname': 'user', + 'mail': 'user1@example.com', + 'userpassword': 'password'}))) + except ldap.LDAPError as e: + log.fatal('test_ticket48228: Failed to add user' + USER1_DN + ': error ' + e.message['desc']) + assert False + + log.info(' Update the password of ' + USER1_DN + ' 6 times') + update_passwd(topology, USER1_DN, 'password', 6) + + log.info(' Set inhistory = 4') + set_global_pwpolicy(topology, 4) + + log.info(' checking the first password, which is supposed NOT to be in history any more') + cpw = 'password%d' % 5 + tpw = 'password' + inhistory = check_passwd_inhistory(topology, USER1_DN, cpw, tpw) + assert inhistory == 0 + + log.info(' checking the second password, which is supposed NOT to be in history any more') + cpw = tpw + tpw = 'password%d' % 0 + inhistory = check_passwd_inhistory(topology, USER1_DN, cpw, tpw) + assert inhistory == 0 + + log.info(' checking the second password, which is supposed NOT to be in history any more') + cpw = tpw + tpw = 'password%d' % 1 + inhistory = check_passwd_inhistory(topology, USER1_DN, cpw, tpw) + assert inhistory == 0 + + log.info(' checking the third password, which is supposed to be in history') + cpw = tpw + tpw = 'password%d' % 2 + inhistory = check_passwd_inhistory(topology, USER1_DN, cpw, tpw) + assert inhistory == 1 + + log.info("Global policy was successfully verified.") + + +def test_ticket48228_test_subtree_policy(topology): + """ + Check subtree level password policy + """ + + log.info(' Set inhistory = 6') + set_subtree_pwpolicy(topology) + + log.info(' Bind as directory manager') + log.info("Bind as %s" % DN_DM) + topology.standalone.simple_bind_s(DN_DM, PASSWORD) + + log.info(' Add an entry' + USER2_DN) + try: + topology.standalone.add_s(Entry((USER2_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(), + 'sn': '2', + 'cn': 'user 2', + 'uid': 'user2', + 'givenname': 'user', + 'mail': 'user2@example.com', + 'userpassword': 'password'}))) + except ldap.LDAPError as e: + log.fatal('test_ticket48228: Failed to add user' + USER2_DN + ': error ' + e.message['desc']) + assert False + + log.info(' Update the password of ' + USER2_DN + ' 6 times') + update_passwd(topology, USER2_DN, 'password', 6) + + log.info(' Set inhistory = 4') + topology.standalone.simple_bind_s(DN_DM, PASSWORD) + try: + topology.standalone.modify_s(SUBTREE_PWP, [(ldap.MOD_REPLACE, 'passwordInHistory', '4')]) + except ldap.LDAPError as e: + log.error('Failed to set pwpolicy-local: error ' + e.message['desc']) + assert False + + log.info(' checking the first password, which is supposed NOT to be in history any more') + cpw = 'password%d' % 5 + tpw = 'password' + inhistory = check_passwd_inhistory(topology, USER2_DN, cpw, tpw) + assert inhistory == 0 + + log.info(' checking the second password, which is supposed NOT to be in history any more') + cpw = tpw + tpw = 'password%d' % 0 + inhistory = check_passwd_inhistory(topology, USER2_DN, cpw, tpw) + assert inhistory == 0 + + log.info(' checking the second password, which is supposed NOT to be in history any more') + cpw = tpw + tpw = 'password%d' % 1 + inhistory = check_passwd_inhistory(topology, USER2_DN, cpw, tpw) + assert inhistory == 0 + + log.info(' checking the third password, which is supposed to be in history') + cpw = tpw + tpw = 'password%d' % 2 + inhistory = check_passwd_inhistory(topology, USER2_DN, cpw, tpw) + assert inhistory == 1 + + log.info("Subtree level policy was successfully verified.") + + +def test_ticket48228_final(topology): + topology.standalone.delete() + log.info('Testcase PASSED') + + +def run_isolated(): + ''' + run_isolated is used to run these test cases independently of a test scheduler (xunit, py.test..) + To run isolated without py.test, you need to + - edit this file and comment '@pytest.fixture' line before 'topology' function. + - set the installation prefix + - run this program + ''' + global installation_prefix + installation_prefix = None + + topo = topology(True) + log.info('Testing Ticket 48228 - wrong password check if passwordInHistory is decreased') + + test_ticket48228_test_global_policy(topo) + + test_ticket48228_test_subtree_policy(topo) + + test_ticket48228_final(topo) + + +if __name__ == '__main__': + run_isolated() + diff --git a/dirsrvtests/tests/tickets/ticket48233_test.py b/dirsrvtests/tests/tickets/ticket48233_test.py new file mode 100644 index 0000000..d9b0aae --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket48233_test.py @@ -0,0 +1,105 @@ +import os +import sys +import time +import ldap +import logging +import pytest +from lib389 import DirSrv, Entry, tools, tasks +from lib389.tools import DirSrvTools +from lib389._constants import * +from lib389.properties import * +from lib389.tasks import * +from lib389.utils import * + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + +installation1_prefix = None + + +class TopologyStandalone(object): + def __init__(self, standalone): + standalone.open() + self.standalone = standalone + + +@pytest.fixture(scope="module") +def topology(request): + global installation1_prefix + if installation1_prefix: + args_instance[SER_DEPLOYED_DIR] = installation1_prefix + + # Creating standalone instance ... + standalone = DirSrv(verbose=False) + args_instance[SER_HOST] = HOST_STANDALONE + args_instance[SER_PORT] = PORT_STANDALONE + args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE + args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX + args_standalone = args_instance.copy() + standalone.allocate(args_standalone) + instance_standalone = standalone.exists() + if instance_standalone: + standalone.delete() + standalone.create() + standalone.open() + + # Delete each instance in the end + def fin(): + standalone.delete() + request.addfinalizer(fin) + + # Clear out the tmp dir + standalone.clearTmpDir(__file__) + + return TopologyStandalone(standalone) + + +def test_ticket48233(topology): + """Test that ACI's that use IP restrictions do not crash the server at + shutdown + """ + + # Add aci to restrict access my ip + aci_text = ('(targetattr != "userPassword")(version 3.0;acl ' + + '"Enable anonymous access - IP"; allow (read,compare,search)' + + '(userdn = "ldap:///anyone") and (ip="127.0.0.1");)') + + try: + topology.standalone.modify_s(DEFAULT_SUFFIX, [(ldap.MOD_ADD, 'aci', aci_text)]) + except ldap.LDAPError as e: + log.error('Failed to add aci: (%s) error %s' % (aci_text, e.message['desc'])) + assert False + time.sleep(1) + + # Anonymous search to engage the aci + try: + topology.standalone.simple_bind_s("", "") + except ldap.LDAPError as e: + log.error('Failed to anonymously bind -error %s' % (e.message['desc'])) + assert False + + try: + entries = topology.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, 'objectclass=*') + if not entries: + log.fatal('Failed return an entries from search') + assert False + except ldap.LDAPError as e: + log.fatal('Search failed: ' + e.message['desc']) + assert False + + # Restart the server + topology.standalone.restart(timeout=10) + + # Check for crash + if topology.standalone.detectDisorderlyShutdown(): + log.fatal('Server crashed!') + assert False + + log.info('Test complete') + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) \ No newline at end of file diff --git a/dirsrvtests/tests/tickets/ticket48252_test.py b/dirsrvtests/tests/tickets/ticket48252_test.py new file mode 100644 index 0000000..5970d70 --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket48252_test.py @@ -0,0 +1,178 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2015 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import os +import sys +import time +import ldap +import logging +import pytest +from lib389 import DirSrv, Entry, tools, tasks +from lib389.tools import DirSrvTools +from lib389._constants import * +from lib389.properties import * +from lib389.tasks import * + +log = logging.getLogger(__name__) + +installation_prefix = None + +# Assuming DEFAULT_SUFFIX is "dc=example,dc=com", otherwise it does not work... :( +USER_NUM = 10 +TEST_USER = "test_user" + +class TopologyStandalone(object): + def __init__(self, standalone): + standalone.open() + self.standalone = standalone + + +@pytest.fixture(scope="module") +def topology(request): + ''' + This fixture is used to standalone topology for the 'module'. + ''' + global installation_prefix + + if installation_prefix: + args_instance[SER_DEPLOYED_DIR] = installation_prefix + + standalone = DirSrv(verbose=False) + + # Args for the standalone instance + args_instance[SER_HOST] = HOST_STANDALONE + args_instance[SER_PORT] = PORT_STANDALONE + args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE + args_standalone = args_instance.copy() + standalone.allocate(args_standalone) + + # Get the status of the instance and restart it if it exists + instance_standalone = standalone.exists() + + # Remove the instance + if instance_standalone: + standalone.delete() + + # Create the instance + standalone.create() + + # Used to retrieve configuration information (dbdir, confdir...) + standalone.open() + + # clear the tmp directory + standalone.clearTmpDir(__file__) + + # Here we have standalone instance up and running + return TopologyStandalone(standalone) + +def test_ticket48252_setup(topology): + """ + Enable USN plug-in for enabling tombstones + Add test entries + """ + + log.info("Enable the USN plugin...") + try: + topology.standalone.plugins.enable(name=PLUGIN_USN) + except e: + log.error("Failed to enable USN Plugin: error " + e.message['desc']) + assert False + + log.info("Adding test entries...") + for id in range(USER_NUM): + name = "%s%d" % (TEST_USER, id) + topology.standalone.add_s(Entry(("cn=%s,%s" % (name, SUFFIX), { + 'objectclass': "top person".split(), + 'sn': name, + 'cn': name}))) +def in_index_file(topology, id, index): + key = "%s%s" % (TEST_USER, id) + log.info(" dbscan - checking %s is in index file %s..." % (key, index)) + dbscanOut = topology.standalone.dbscan(DEFAULT_BENAME, index) + + if key in dbscanOut: + found = True + topology.standalone.log.info("Found key %s in dbscan output" % key) + else: + found = False + topology.standalone.log.info("Did not found key %s in dbscan output" % key) + + return found + +def test_ticket48252_run_0(topology): + """ + Delete an entry cn=test_entry0 + Check it is not in the 'cn' index file + """ + log.info("Case 1 - Check deleted entry is not in the 'cn' index file") + del_rdn = "cn=%s0" % TEST_USER + del_entry = "%s,%s" % (del_rdn, SUFFIX) + log.info(" Deleting a test entry %s..." % del_entry) + topology.standalone.delete_s(del_entry) + + assert in_index_file(topology, 0, 'cn') == False + + log.info(" db2index - reindexing %s ..." % 'cn') + assert topology.standalone.db2index(DEFAULT_BENAME, 'cn') + + assert in_index_file(topology, 0, 'cn') == False + log.info(" entry %s is not in the cn index file after reindexed." % del_entry) + log.info('Case 1 - PASSED') + +def test_ticket48252_run_1(topology): + """ + Delete an entry cn=test_entry1 + Check it is in the 'objectclass' index file as a tombstone entry + """ + log.info("Case 2 - Check deleted entry is in the 'objectclass' index file as a tombstone entry") + del_rdn = "cn=%s1" % TEST_USER + del_entry = "%s,%s" % (del_rdn, SUFFIX) + log.info(" Deleting a test entry %s..." % del_entry) + topology.standalone.delete_s(del_entry) + + entry = topology.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, '(&(objectclass=nstombstone)(%s))' % del_rdn) + assert len(entry) == 1 + log.info(" entry %s is in the objectclass index file." % del_entry) + + log.info(" db2index - reindexing %s ..." % 'objectclass') + assert topology.standalone.db2index(DEFAULT_BENAME, 'objectclass') + + entry = topology.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, '(&(objectclass=nstombstone)(%s))' % del_rdn) + assert len(entry) == 1 + log.info(" entry %s is in the objectclass index file after reindexed." % del_entry) + log.info('Case 2 - PASSED') + +def test_ticket48252_final(topology): + topology.standalone.delete() + log.info('Testing Ticket 48252 - PASSED.') + +def run_isolated(): + ''' + run_isolated is used to run these test cases independently of a test scheduler (xunit, py.test..) + To run isolated without py.test, you need to + - edit this file and comment '@pytest.fixture' line before 'topology' function. + - set the installation prefix + - run this program + ''' + global installation_prefix + installation_prefix = None + + topo = topology(True) + log.info('Testing Ticket 48252 - db2index creates index entry from deleted records') + + test_ticket48252_setup(topo) + + test_ticket48252_run_0(topo) + test_ticket48252_run_1(topo) + + test_ticket48252_final(topo) + + +if __name__ == '__main__': + run_isolated() + diff --git a/dirsrvtests/tests/tickets/ticket48265_test.py b/dirsrvtests/tests/tickets/ticket48265_test.py new file mode 100644 index 0000000..fb695c5 --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket48265_test.py @@ -0,0 +1,130 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2015 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import os +import sys +import time +import ldap +import logging +import pytest +import threading +from lib389 import DirSrv, Entry, tools, tasks +from lib389.tools import DirSrvTools +from lib389._constants import * +from lib389.properties import * +from lib389.tasks import * +from lib389.utils import * + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + +installation1_prefix = None + +USER_NUM = 20 +TEST_USER = 'test_user' + +class TopologyStandalone(object): + def __init__(self, standalone): + standalone.open() + self.standalone = standalone + + +@pytest.fixture(scope="module") +def topology(request): + global installation1_prefix + if installation1_prefix: + args_instance[SER_DEPLOYED_DIR] = installation1_prefix + + # Creating standalone instance ... + standalone = DirSrv(verbose=False) + args_instance[SER_HOST] = HOST_STANDALONE + args_instance[SER_PORT] = PORT_STANDALONE + args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE + args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX + args_standalone = args_instance.copy() + standalone.allocate(args_standalone) + instance_standalone = standalone.exists() + if instance_standalone: + standalone.delete() + standalone.create() + standalone.open() + + return TopologyStandalone(standalone) + + +def test_ticket48265_test(topology): + """ + Complex filter issues + Ticket 47521 type complex filter: + (&(|(uid=tuser*)(cn=Test user*))(&(givenname=test*3))(mail=tuser@example.com)(&(description=*))) + Ticket 48264 type complex filter: + (&(&(|(l=EU)(l=AP)(l=NA))(|(c=SE)(c=DE)))(|(uid=*test*)(cn=*test*))(l=eu)) + """ + + log.info("Adding %d test entries..." % USER_NUM) + for id in range(USER_NUM): + name = "%s%d" % (TEST_USER, id) + mail = "%s@example.com" % name + secretary = "cn=%s,ou=secretary,%s" % (name, SUFFIX) + topology.standalone.add_s(Entry(("cn=%s,%s" % (name, SUFFIX), { + 'objectclass': "top person organizationalPerson inetOrgPerson".split(), + 'sn': name, + 'cn': name, + 'uid': name, + 'givenname': 'test', + 'mail': mail, + 'description': 'description', + 'secretary': secretary, + 'l': 'MV', + 'title': 'Engineer'}))) + + log.info("Search with Ticket 47521 type complex filter") + for id in range(USER_NUM): + name = "%s%d" % (TEST_USER, id) + mail = "%s@example.com" % name + filter47521 = '(&(|(uid=%s*)(cn=%s*))(&(givenname=test))(mail=%s)(&(description=*)))' % (TEST_USER, TEST_USER, mail) + entry = topology.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, filter47521) + assert len(entry) == 1 + + log.info("Search with Ticket 48265 type complex filter") + for id in range(USER_NUM): + name = "%s%d" % (TEST_USER, id) + mail = "%s@example.com" % name + filter48265 = '(&(&(|(l=AA)(l=BB)(l=MV))(|(title=admin)(title=engineer)))(|(uid=%s)(mail=%s))(description=description))' % (name, mail) + entry = topology.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, filter48265) + assert len(entry) == 1 + + log.info('Test 48265 complete\n') + + +def test_ticket48265_final(topology): + topology.standalone.delete() + log.info('Testcase PASSED') + + +def run_isolated(): + ''' + run_isolated is used to run these test cases independently of a test scheduler (xunit, py.test..) + To run isolated without py.test, you need to + - edit this file and comment '@pytest.fixture' line before 'topology' function. + - set the installation prefix + - run this program + ''' + global installation1_prefix + installation1_prefix = None + + topo = topology(True) + log.info('Testing Ticket 48265 - Complex filter in a search request does not work as expected') + + test_ticket48265_test(topo) + + test_ticket48265_final(topo) + + +if __name__ == '__main__': + run_isolated() diff --git a/dirsrvtests/tests/tickets/ticket48312_test.py b/dirsrvtests/tests/tickets/ticket48312_test.py new file mode 100644 index 0000000..0989279 --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket48312_test.py @@ -0,0 +1,168 @@ +import os +import sys +import time +import ldap +import logging +import pytest +from lib389 import DirSrv, Entry, tools, tasks +from lib389.tools import DirSrvTools +from lib389._constants import * +from lib389.properties import * +from lib389.tasks import * +from lib389.utils import * + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + +installation1_prefix = None + + +class TopologyStandalone(object): + def __init__(self, standalone): + standalone.open() + self.standalone = standalone + + +@pytest.fixture(scope="module") +def topology(request): + global installation1_prefix + if installation1_prefix: + args_instance[SER_DEPLOYED_DIR] = installation1_prefix + + # Creating standalone instance ... + standalone = DirSrv(verbose=False) + args_instance[SER_HOST] = HOST_STANDALONE + args_instance[SER_PORT] = PORT_STANDALONE + args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE + args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX + args_standalone = args_instance.copy() + standalone.allocate(args_standalone) + instance_standalone = standalone.exists() + if instance_standalone: + standalone.delete() + standalone.create() + standalone.open() + + # Delete each instance in the end + def fin(): + standalone.delete() + + request.addfinalizer(fin) + + # Clear out the tmp dir + standalone.clearTmpDir(__file__) + + return TopologyStandalone(standalone) + + +def test_ticket48312(topology): + """ + Configure managed entries plugins(tempalte/definition), then perform a + modrdn(deleteoldrdn 1), and make sure the server does not crash. + """ + + GROUP_OU = 'ou=groups,' + DEFAULT_SUFFIX + PEOPLE_OU = 'ou=people,' + DEFAULT_SUFFIX + USER_DN = 'uid=user1,ou=people,' + DEFAULT_SUFFIX + CONFIG_DN = 'cn=config,cn=' + PLUGIN_MANAGED_ENTRY + ',cn=plugins,cn=config' + TEMPLATE_DN = 'cn=MEP Template,' + DEFAULT_SUFFIX + USER_NEWRDN = 'uid=\+user1' + + # + # First enable dynamic plugins + # + try: + topology.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-dynamic-plugins', 'on')]) + except ldap.LDAPError as e: + ldap.fatal('Failed to enable dynamic plugin!' + e.message['desc']) + assert False + topology.standalone.plugins.enable(name=PLUGIN_MANAGED_ENTRY) + + # + # Add our org units (they should already exist, but do it just in case) + # + try: + topology.standalone.add_s(Entry((PEOPLE_OU, { + 'objectclass': 'top extensibleObject'.split(), + 'ou': 'people'}))) + except ldap.ALREADY_EXISTS: + pass + except ldap.LDAPError as e: + log.fatal('test_mep: Failed to add people org unit: error ' + e.message['desc']) + assert False + + try: + topology.standalone.add_s(Entry((GROUP_OU, { + 'objectclass': 'top extensibleObject'.split(), + 'ou': 'people'}))) + except ldap.ALREADY_EXISTS: + pass + except ldap.LDAPError as e: + log.fatal('test_mep: Failed to add people org unit: error ' + e.message['desc']) + assert False + + # + # Add the template entry + # + try: + topology.standalone.add_s(Entry((TEMPLATE_DN, { + 'objectclass': 'top mepTemplateEntry extensibleObject'.split(), + 'cn': 'MEP Template', + 'mepRDNAttr': 'cn', + 'mepStaticAttr': ['objectclass: posixGroup', 'objectclass: extensibleObject'], + 'mepMappedAttr': ['cn: $uid', 'uid: $cn', 'gidNumber: $uidNumber'] + }))) + except ldap.LDAPError as e: + log.fatal('test_mep: Failed to add template entry: error ' + e.message['desc']) + assert False + + # + # Add the definition entry + # + try: + topology.standalone.add_s(Entry((CONFIG_DN, { + 'objectclass': 'top extensibleObject'.split(), + 'cn': 'config', + 'originScope': PEOPLE_OU, + 'originFilter': 'objectclass=posixAccount', + 'managedBase': GROUP_OU, + 'managedTemplate': TEMPLATE_DN + }))) + except ldap.LDAPError as e: + log.fatal('test_mep: Failed to add config entry: error ' + e.message['desc']) + assert False + + # + # Add an entry that meets the MEP scope + # + try: + topology.standalone.add_s(Entry((USER_DN, { + 'objectclass': 'top posixAccount extensibleObject'.split(), + 'uid': 'user1', + 'cn': 'user1', + 'uidNumber': '1', + 'gidNumber': '1', + 'homeDirectory': '/home/user1', + 'description': 'uiser description' + }))) + except ldap.LDAPError as e: + log.fatal('test_mep: Failed to user1: error ' + e.message['desc']) + assert False + + # + # Perform a modrdn on USER_DN + # + try: + topology.standalone.rename_s(USER_DN, USER_NEWRDN, delold=1) + except ldap.LDAPError as e: + log.error('Failed to modrdn: error ' + e.message['desc']) + assert False + + log.info('Test complete') + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) \ No newline at end of file diff --git a/dirsrvtests/tests/tickets/ticket48325_test.py b/dirsrvtests/tests/tickets/ticket48325_test.py new file mode 100644 index 0000000..3505d1a --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket48325_test.py @@ -0,0 +1,270 @@ +import os +import sys +import time +import ldap +import logging +import pytest +from lib389 import DirSrv, Entry, tools, tasks +from lib389.tools import DirSrvTools +from lib389._constants import * +from lib389.properties import * +from lib389.tasks import * +from lib389.utils import * + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + +installation1_prefix = None + + +class TopologyReplication(object): + def __init__(self, master1, hub1, consumer1): + master1.open() + self.master1 = master1 + hub1.open() + self.hub1 = hub1 + consumer1.open() + self.consumer1 = consumer1 + + +@pytest.fixture(scope="module") +def topology(request): + global installation1_prefix + if installation1_prefix: + args_instance[SER_DEPLOYED_DIR] = installation1_prefix + + # Creating master 1... + master1 = DirSrv(verbose=False) + args_instance[SER_HOST] = HOST_MASTER_1 + args_instance[SER_PORT] = PORT_MASTER_1 + args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1 + args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX + args_master = args_instance.copy() + master1.allocate(args_master) + instance_master1 = master1.exists() + if instance_master1: + master1.delete() + master1.create() + master1.open() + master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, + replicaId=REPLICAID_MASTER_1) + + # Creating hub 1... + hub1 = DirSrv(verbose=False) + args_instance[SER_HOST] = HOST_HUB_1 + args_instance[SER_PORT] = PORT_HUB_1 + args_instance[SER_SERVERID_PROP] = SERVERID_HUB_1 + args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX + args_hub = args_instance.copy() + hub1.allocate(args_hub) + instance_hub1 = hub1.exists() + if instance_hub1: + hub1.delete() + hub1.create() + hub1.open() + hub1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_HUB, + replicaId=REPLICAID_HUB_1) + + # Creating consumer 1... + consumer1 = DirSrv(verbose=False) + args_instance[SER_HOST] = HOST_CONSUMER_1 + args_instance[SER_PORT] = PORT_CONSUMER_1 + args_instance[SER_SERVERID_PROP] = SERVERID_CONSUMER_1 + args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX + args_consumer = args_instance.copy() + consumer1.allocate(args_consumer) + instance_consumer1 = consumer1.exists() + if instance_consumer1: + consumer1.delete() + consumer1.create() + consumer1.open() + consumer1.changelog.create() + consumer1.replica.enableReplication(suffix=SUFFIX, + role=REPLICAROLE_CONSUMER, + replicaId=CONSUMER_REPLICAID) + + # + # Create all the agreements + # + # Creating agreement from master 1 to hub 1 + properties = {RA_NAME: r'meTo_$host:$port', + RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], + RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], + RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], + RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} + m1_h1_agmt = master1.agreement.create(suffix=SUFFIX, host=hub1.host, + port=hub1.port, + properties=properties) + if not m1_h1_agmt: + log.fatal("Fail to create a master -> hub replica agreement") + sys.exit(1) + log.debug("%s created" % m1_h1_agmt) + + # Creating agreement from hub 1 to consumer 1 + properties = {RA_NAME: r'meTo_$host:$port', + RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], + RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], + RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], + RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} + h1_c1_agmt = hub1.agreement.create(suffix=SUFFIX, host=consumer1.host, + port=consumer1.port, + properties=properties) + if not h1_c1_agmt: + log.fatal("Fail to create a hub -> consumer replica agreement") + sys.exit(1) + log.debug("%s created" % h1_c1_agmt) + + # Allow the replicas to get situated with the new agreements... + time.sleep(5) + + # + # Initialize all the agreements + # + master1.agreement.init(SUFFIX, HOST_HUB_1, PORT_HUB_1) + master1.waitForReplInit(m1_h1_agmt) + hub1.agreement.init(SUFFIX, HOST_CONSUMER_1, PORT_CONSUMER_1) + hub1.waitForReplInit(h1_c1_agmt) + + # Check replication is working... + if master1.testReplication(DEFAULT_SUFFIX, consumer1): + log.info('Replication is working.') + else: + log.fatal('Replication is not working.') + assert False + + # Delete each instance in the end + def fin(): + master1.delete() + hub1.delete() + consumer1.delete() + pass + + request.addfinalizer(fin) + + # Clear out the tmp dir + master1.clearTmpDir(__file__) + + return TopologyReplication(master1, hub1, consumer1) + + +def checkFirstElement(ds, rid): + """ + Return True if the first RUV element is for the specified rid + """ + try: + entry = ds.search_s(DEFAULT_SUFFIX, + ldap.SCOPE_SUBTREE, + REPLICA_RUV_FILTER, + ['nsds50ruv']) + assert entry + entry = entry[0] + except ldap.LDAPError as e: + log.fatal('Failed to retrieve RUV entry: %s' % str(e)) + assert False + + ruv_elements = entry.getValues('nsds50ruv') + if ('replica %s ' % rid) in ruv_elements[1]: + return True + else: + return False + + +def test_ticket48325(topology): + """ + Test that the RUV element order is correctly maintained when promoting + a hub or consumer. + """ + + # + # Promote consumer to master + # + try: + DN = topology.consumer1.replica._get_mt_entry(DEFAULT_SUFFIX) + topology.consumer1.modify_s(DN, [(ldap.MOD_REPLACE, + 'nsDS5ReplicaType', + '3'), + (ldap.MOD_REPLACE, + 'nsDS5ReplicaID', + '1234'), + (ldap.MOD_REPLACE, + 'nsDS5Flags', + '1')]) + except ldap.LDAPError as e: + log.fatal('Failed to promote consuemr to master: error %s' % str(e)) + assert False + time.sleep(1) + + # + # Check ruv has been reordered + # + if not checkFirstElement(topology.consumer1, '1234'): + log.fatal('RUV was not reordered') + assert False + + # + # Create repl agreement from the newly promoted master to master1 + # + properties = {RA_NAME: r'meTo_$host:$port', + RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], + RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], + RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], + RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} + new_agmt = topology.consumer1.agreement.create(suffix=SUFFIX, + host=topology.master1.host, + port=topology.master1.port, + properties=properties) + + if not new_agmt: + log.fatal("Fail to create new agmt from old consumer to the master") + assert False + + # + # Test replication is working + # + if topology.consumer1.testReplication(DEFAULT_SUFFIX, topology.master1): + log.info('Replication is working.') + else: + log.fatal('Replication is not working.') + assert False + + # + # Promote hub to master + # + try: + DN = topology.hub1.replica._get_mt_entry(DEFAULT_SUFFIX) + topology.hub1.modify_s(DN, [(ldap.MOD_REPLACE, + 'nsDS5ReplicaType', + '3'), + (ldap.MOD_REPLACE, + 'nsDS5ReplicaID', + '5678')]) + except ldap.LDAPError as e: + log.fatal('Failed to promote consuemr to master: error %s' % str(e)) + assert False + time.sleep(1) + + # + # Check ruv has been reordered + # + if not checkFirstElement(topology.hub1, '5678'): + log.fatal('RUV was not reordered') + assert False + + # + # Test replication is working + # + if topology.hub1.testReplication(DEFAULT_SUFFIX, topology.master1): + log.info('Replication is working.') + else: + log.fatal('Replication is not working.') + assert False + + # Done + log.info('Test complete') + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) \ No newline at end of file diff --git a/dirsrvtests/tests/tickets/ticket48362_test.py b/dirsrvtests/tests/tickets/ticket48362_test.py new file mode 100644 index 0000000..1b5651f --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket48362_test.py @@ -0,0 +1,278 @@ +import os +import sys +import time +import ldap +import logging +import pytest +from lib389 import DirSrv, Entry, tools, tasks +from lib389.tools import DirSrvTools +from lib389._constants import * +from lib389.properties import * +from lib389.tasks import * +from lib389.utils import * + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + +installation1_prefix = None + + +PEOPLE_OU='people' +PEOPLE_DN = "ou=%s,%s" % (PEOPLE_OU, SUFFIX) +MAX_ACCOUNTS=5 + +BINDMETHOD_ATTR = 'dnaRemoteBindMethod' +BINDMETHOD_VALUE = "SASL/GSSAPI" +PROTOCOLE_ATTR = 'dnaRemoteConnProtocol' +PROTOCOLE_VALUE = 'LDAP' + +class TopologyReplication(object): + def __init__(self, master1, master2): + master1.open() + self.master1 = master1 + master2.open() + self.master2 = master2 + + +#@pytest.fixture(scope="module") +def topology(request): + global installation1_prefix + if installation1_prefix: + args_instance[SER_DEPLOYED_DIR] = installation1_prefix + + # Creating master 1... + master1 = DirSrv(verbose=False) + if installation1_prefix: + args_instance[SER_DEPLOYED_DIR] = installation1_prefix + args_instance[SER_HOST] = HOST_MASTER_1 + args_instance[SER_PORT] = PORT_MASTER_1 + args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1 + args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX + args_master = args_instance.copy() + master1.allocate(args_master) + instance_master1 = master1.exists() + if instance_master1: + master1.delete() + master1.create() + master1.open() + master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_1) + + # Creating master 2... + master2 = DirSrv(verbose=False) + if installation1_prefix: + args_instance[SER_DEPLOYED_DIR] = installation1_prefix + args_instance[SER_HOST] = HOST_MASTER_2 + args_instance[SER_PORT] = PORT_MASTER_2 + args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_2 + args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX + args_master = args_instance.copy() + master2.allocate(args_master) + instance_master2 = master2.exists() + if instance_master2: + master2.delete() + master2.create() + master2.open() + master2.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_2) + + # + # Create all the agreements + # + # Creating agreement from master 1 to master 2 + properties = {RA_NAME: r'meTo_$host:$port', + RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], + RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], + RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], + RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} + m1_m2_agmt = master1.agreement.create(suffix=SUFFIX, host=master2.host, port=master2.port, properties=properties) + if not m1_m2_agmt: + log.fatal("Fail to create a master -> master replica agreement") + sys.exit(1) + log.debug("%s created" % m1_m2_agmt) + + # Creating agreement from master 2 to master 1 + properties = {RA_NAME: r'meTo_$host:$port', + RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], + RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], + RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], + RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} + m2_m1_agmt = master2.agreement.create(suffix=SUFFIX, host=master1.host, port=master1.port, properties=properties) + if not m2_m1_agmt: + log.fatal("Fail to create a master -> master replica agreement") + sys.exit(1) + log.debug("%s created" % m2_m1_agmt) + + # Allow the replicas to get situated with the new agreements... + time.sleep(5) + + # + # Initialize all the agreements + # + master1.agreement.init(SUFFIX, HOST_MASTER_2, PORT_MASTER_2) + master1.waitForReplInit(m1_m2_agmt) + + # Check replication is working... + if master1.testReplication(DEFAULT_SUFFIX, master2): + log.info('Replication is working.') + else: + log.fatal('Replication is not working.') + assert False + + # Delete each instance in the end + def fin(): + master1.delete() + master2.delete() + #request.addfinalizer(fin) + + # Clear out the tmp dir + master1.clearTmpDir(__file__) + + return TopologyReplication(master1, master2) + + +def _dna_config(server, nextValue=500, maxValue=510): + log.info("Add dna plugin config entry...%s" % server) + + cfg_base_dn = 'cn=dna config,cn=Distributed Numeric Assignment Plugin,cn=plugins,cn=config' + + try: + server.add_s(Entry((cfg_base_dn, { + 'objectclass': 'top dnaPluginConfig'.split(), + 'dnaType': 'description', + 'dnaMagicRegen': '-1', + 'dnaFilter': '(objectclass=posixAccount)', + 'dnaScope': 'ou=people,%s' % SUFFIX, + 'dnaNextValue': str(nextValue), + 'dnaMaxValue' : str(nextValue+maxValue), + 'dnaSharedCfgDN': 'ou=ranges,%s' % SUFFIX + }))) + + except ldap.LDAPError as e: + log.error('Failed to add DNA config entry: error ' + e.message['desc']) + assert False + + log.info("Enable the DNA plugin...") + try: + server.plugins.enable(name=PLUGIN_DNA) + except e: + log.error("Failed to enable DNA Plugin: error " + e.message['desc']) + assert False + + log.info("Restarting the server...") + server.stop(timeout=120) + time.sleep(1) + server.start(timeout=120) + time.sleep(3) + + +SHARE_CFG_BASE = 'ou=ranges,' + SUFFIX + +def _wait_shared_cfg_servers(server, expected): + attempts = 0 + ents = [] + try: + ents = server.search_s(SHARE_CFG_BASE, ldap.SCOPE_ONELEVEL, "(objectclass=*)") + except ldap.NO_SUCH_OBJECT: + pass + except lib389.NoSuchEntryError: + pass + while (len(ents) != expected): + assert attempts < 10 + time.sleep(5) + try: + ents = server.search_s(SHARE_CFG_BASE, ldap.SCOPE_ONELEVEL, "(objectclass=*)") + except ldap.NO_SUCH_OBJECT: + pass + except lib389.NoSuchEntryError: + pass + +def _shared_cfg_server_update(server, method=BINDMETHOD_VALUE, transport=PROTOCOLE_VALUE): + log.info('\n======================== Update dnaPortNum=%d ============================\n'% server.port) + try: + ent = server.getEntry(SHARE_CFG_BASE, ldap.SCOPE_ONELEVEL, "(dnaPortNum=%d)" % server.port) + mod = [(ldap.MOD_REPLACE, BINDMETHOD_ATTR, method), + (ldap.MOD_REPLACE, PROTOCOLE_ATTR, transport)] + server.modify_s(ent.dn, mod) + + log.info('\n======================== Update done\n') + ent = server.getEntry(SHARE_CFG_BASE, ldap.SCOPE_ONELEVEL, "(dnaPortNum=%d)" % server.port) + except ldap.NO_SUCH_OBJECT: + log.fatal("Unknown host") + assert False + + +def test_ticket48362(topology): + """Write your replication testcase here. + + To access each DirSrv instance use: topology.master1, topology.master2, + ..., topology.hub1, ..., topology.consumer1, ... + + Also, if you need any testcase initialization, + please, write additional fixture for that(include finalizer). + """ + + try: + topology.master1.add_s(Entry((PEOPLE_DN, { + 'objectclass': "top extensibleObject".split(), + 'ou': 'people'}))) + except ldap.ALREADY_EXISTS: + pass + + topology.master1.add_s(Entry((SHARE_CFG_BASE, { + 'objectclass': 'top organizationalunit'.split(), + 'ou': 'ranges' + }))) + # master 1 will have a valid remaining range (i.e. 101) + # master 2 will not have a valid remaining range (i.e. 0) so dna servers list on master2 + # will not contain master 2. So at restart, master 2 is recreated without the method/protocol attribute + _dna_config(topology.master1, nextValue=1000, maxValue=100) + _dna_config(topology.master2, nextValue=2000, maxValue=-1) + + # check we have all the servers available + _wait_shared_cfg_servers(topology.master1, 2) + _wait_shared_cfg_servers(topology.master2, 2) + + # now force the method/transport on the servers entry + _shared_cfg_server_update(topology.master1) + _shared_cfg_server_update(topology.master2) + + + + log.info('\n======================== BEFORE RESTART ============================\n') + ent = topology.master1.getEntry(SHARE_CFG_BASE, ldap.SCOPE_ONELEVEL, "(dnaPortNum=%d)" % topology.master1.port) + log.info('\n======================== BEFORE RESTART ============================\n') + assert(ent.hasAttr(BINDMETHOD_ATTR) and ent.getValue(BINDMETHOD_ATTR) == BINDMETHOD_VALUE) + assert(ent.hasAttr(PROTOCOLE_ATTR) and ent.getValue(PROTOCOLE_ATTR) == PROTOCOLE_VALUE) + + + ent = topology.master1.getEntry(SHARE_CFG_BASE, ldap.SCOPE_ONELEVEL, "(dnaPortNum=%d)" % topology.master2.port) + log.info('\n======================== BEFORE RESTART ============================\n') + assert(ent.hasAttr(BINDMETHOD_ATTR) and ent.getValue(BINDMETHOD_ATTR) == BINDMETHOD_VALUE) + assert(ent.hasAttr(PROTOCOLE_ATTR) and ent.getValue(PROTOCOLE_ATTR) == PROTOCOLE_VALUE) + topology.master1.restart(10) + topology.master2.restart(10) + + # to allow DNA plugin to recreate the local host entry + time.sleep(40) + + log.info('\n=================== AFTER RESTART =================================\n') + ent = topology.master1.getEntry(SHARE_CFG_BASE, ldap.SCOPE_ONELEVEL, "(dnaPortNum=%d)" % topology.master1.port) + log.info('\n=================== AFTER RESTART =================================\n') + assert(ent.hasAttr(BINDMETHOD_ATTR) and ent.getValue(BINDMETHOD_ATTR) == BINDMETHOD_VALUE) + assert(ent.hasAttr(PROTOCOLE_ATTR) and ent.getValue(PROTOCOLE_ATTR) == PROTOCOLE_VALUE) + + ent = topology.master1.getEntry(SHARE_CFG_BASE, ldap.SCOPE_ONELEVEL, "(dnaPortNum=%d)" % topology.master2.port) + log.info('\n=================== AFTER RESTART =================================\n') + assert(ent.hasAttr(BINDMETHOD_ATTR) and ent.getValue(BINDMETHOD_ATTR) == BINDMETHOD_VALUE) + assert(ent.hasAttr(PROTOCOLE_ATTR) and ent.getValue(PROTOCOLE_ATTR) == PROTOCOLE_VALUE) + log.info('Test complete') + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + global installation1_prefix + installation1_prefix='/home/tbordaz/install_1.3.4' + topo = topology(True) + test_ticket48362(topo) +# CURRENT_FILE = os.path.realpath(__file__) +# pytest.main("-s %s" % CURRENT_FILE) \ No newline at end of file diff --git a/dirsrvtests/tests/tickets/ticket48369_test.py b/dirsrvtests/tests/tickets/ticket48369_test.py new file mode 100644 index 0000000..0b65fa2 --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket48369_test.py @@ -0,0 +1,124 @@ +import os +import time +import ldap +import logging +import pytest +from lib389 import DirSrv, Entry +from lib389._constants import * +from lib389.properties import * +from lib389.tasks import * +from lib389.utils import * +from ldap.controls.ppolicy import PasswordPolicyControl + + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + +installation1_prefix = None + + +class TopologyStandalone(object): + def __init__(self, standalone): + standalone.open() + self.standalone = standalone + + +@pytest.fixture(scope="module") +def topology(request): + global installation1_prefix + if installation1_prefix: + args_instance[SER_DEPLOYED_DIR] = installation1_prefix + + # Creating standalone instance ... + standalone = DirSrv(verbose=False) + args_instance[SER_HOST] = HOST_STANDALONE + args_instance[SER_PORT] = PORT_STANDALONE + args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE + args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX + args_standalone = args_instance.copy() + standalone.allocate(args_standalone) + instance_standalone = standalone.exists() + if instance_standalone: + standalone.delete() + standalone.create() + standalone.open() + + # Delete each instance in the end + def fin(): + standalone.delete() + + request.addfinalizer(fin) + + # Clear out the tmp dir + standalone.clearTmpDir(__file__) + + return TopologyStandalone(standalone) + + +def test_ticket48369(topology): + """ + Test RFE 48369 - return password policy controls by default without needing + to be requested. + """ + + DN = 'uid=test,' + DEFAULT_SUFFIX + + # + # Setup password policy + # + try: + topology.standalone.modify_s('cn=config', [(ldap.MOD_REPLACE, + 'passwordExp', + 'on'), + (ldap.MOD_REPLACE, + 'passwordMaxAge', + '864000'), + (ldap.MOD_REPLACE, + 'passwordSendExpiringTime', + 'on')]) + except ldap.LDAPError as e: + log.fatal('Failed to set config: %s' % str(e)) + assert False + + # + # Add entry + # + try: + topology.standalone.add_s(Entry((DN, + {'objectclass': 'top extensibleObject'.split(), + 'uid': 'test', + 'userpassword': 'password'}))) + except ldap.LDAPError as e: + log.fatal('Failed to add user entry: %s' % str(e)) + assert False + time.sleep(1) + + # + # Bind as the new user, and request the control + # + try: + msgid = topology.standalone.simple_bind(DN, "password", + serverctrls=[PasswordPolicyControl()]) + res_type, res_data, res_msgid, res_ctrls = \ + topology.standalone.result3(msgid) + except ldap.LDAPError as e: + log.fatal('Failed to bind: %s: Error %s' % (ctl_resp, str(e))) + assert False + + if res_ctrls[0].controlType == PasswordPolicyControl.controlType: + ppolicy_ctrl = res_ctrls[0] + else: + log.fatal('Control not found') + assert False + + log.info('Time until expiration (%s)' % + repr(ppolicy_ctrl.timeBeforeExpiration)) + + log.info('Test complete') + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) \ No newline at end of file diff --git a/dirsrvtests/tests/tickets/ticket48370_test.py b/dirsrvtests/tests/tickets/ticket48370_test.py new file mode 100644 index 0000000..f5b1f47 --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket48370_test.py @@ -0,0 +1,236 @@ +import os +import ldap +import logging +import pytest +from lib389 import DirSrv, Entry +from lib389._constants import * +from lib389.properties import * +from lib389.tasks import * +from lib389.utils import * + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + +installation1_prefix = None + + +class TopologyStandalone(object): + def __init__(self, standalone): + standalone.open() + self.standalone = standalone + + +@pytest.fixture(scope="module") +def topology(request): + global installation1_prefix + if installation1_prefix: + args_instance[SER_DEPLOYED_DIR] = installation1_prefix + + # Creating standalone instance ... + standalone = DirSrv(verbose=False) + args_instance[SER_HOST] = HOST_STANDALONE + args_instance[SER_PORT] = PORT_STANDALONE + args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE + args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX + args_standalone = args_instance.copy() + standalone.allocate(args_standalone) + instance_standalone = standalone.exists() + if instance_standalone: + standalone.delete() + standalone.create() + standalone.open() + + # Delete each instance in the end + def fin(): + standalone.delete() + request.addfinalizer(fin) + + # Clear out the tmp dir + standalone.clearTmpDir(__file__) + + return TopologyStandalone(standalone) + + +def test_ticket48370(topology): + """ + Deleting attirbute values and readding a value does not properly update + the pres index. The values are not actually deleted from the index + """ + + DN = 'uid=user0099,' + DEFAULT_SUFFIX + + # + # Add an entry + # + topology.standalone.add_s(Entry((DN, { + 'objectclass': ['top', 'person', + 'organizationalPerson', + 'inetorgperson', + 'posixAccount'], + 'givenname': 'test', + 'sn': 'user', + 'loginshell': '/bin/bash', + 'uidNumber': '10099', + 'gidNumber': '10099', + 'gecos': 'Test User', + 'mail': ['user0099@dev.null', + 'alias@dev.null', + 'user0099@redhat.com'], + 'cn': 'Test User', + 'homeDirectory': '/home/user0099', + 'uid': 'admin2', + 'userpassword': 'password'}))) + + # + # Perform modify (delete & add mail attributes) + # + try: + topology.standalone.modify_s(DN, [(ldap.MOD_DELETE, + 'mail', + 'user0099@dev.null'), + (ldap.MOD_DELETE, + 'mail', + 'alias@dev.null'), + (ldap.MOD_ADD, + 'mail', 'user0099@dev.null')]) + except ldap.LDAPError as e: + log.fatal('Failedto modify user: ' + str(e)) + assert False + + # + # Search using deleted attribute value- no entries should be returned + # + try: + entry = topology.standalone.search_s(DEFAULT_SUFFIX, + ldap.SCOPE_SUBTREE, + 'mail=alias@dev.null') + if entry: + log.fatal('Entry incorrectly returned') + assert False + except ldap.LDAPError as e: + log.fatal('Failed to search for user: ' + str(e)) + assert False + + # + # Search using existing attribute value - the entry should be returned + # + try: + entry = topology.standalone.search_s(DEFAULT_SUFFIX, + ldap.SCOPE_SUBTREE, + 'mail=user0099@dev.null') + if entry is None: + log.fatal('Entry not found, but it should have been') + assert False + except ldap.LDAPError as e: + log.fatal('Failed to search for user: ' + str(e)) + assert False + + # + # Delete the last values + # + try: + topology.standalone.modify_s(DN, [(ldap.MOD_DELETE, + 'mail', + 'user0099@dev.null'), + (ldap.MOD_DELETE, + 'mail', + 'user0099@redhat.com') + ]) + except ldap.LDAPError as e: + log.fatal('Failed to modify user: ' + str(e)) + assert False + + # + # Search using deleted attribute value - no entries should be returned + # + try: + entry = topology.standalone.search_s(DEFAULT_SUFFIX, + ldap.SCOPE_SUBTREE, + 'mail=user0099@redhat.com') + if entry: + log.fatal('Entry incorrectly returned') + assert False + except ldap.LDAPError as e: + log.fatal('Failed to search for user: ' + str(e)) + assert False + + # + # Make sure presence index is correctly updated - no entries should be + # returned + # + try: + entry = topology.standalone.search_s(DEFAULT_SUFFIX, + ldap.SCOPE_SUBTREE, + 'mail=*') + if entry: + log.fatal('Entry incorrectly returned') + assert False + except ldap.LDAPError as e: + log.fatal('Failed to search for user: ' + str(e)) + assert False + + # + # Now add the attributes back, and lets run a different set of tests with + # a different number of attributes + # + try: + topology.standalone.modify_s(DN, [(ldap.MOD_ADD, + 'mail', + ['user0099@dev.null', + 'alias@dev.null'])]) + except ldap.LDAPError as e: + log.fatal('Failedto modify user: ' + str(e)) + assert False + + # + # Remove and readd some attibutes + # + try: + topology.standalone.modify_s(DN, [(ldap.MOD_DELETE, + 'mail', + 'alias@dev.null'), + (ldap.MOD_DELETE, + 'mail', + 'user0099@dev.null'), + (ldap.MOD_ADD, + 'mail', 'user0099@dev.null')]) + except ldap.LDAPError as e: + log.fatal('Failedto modify user: ' + str(e)) + assert False + + # + # Search using deleted attribute value - no entries should be returned + # + try: + entry = topology.standalone.search_s(DEFAULT_SUFFIX, + ldap.SCOPE_SUBTREE, + 'mail=alias@dev.null') + if entry: + log.fatal('Entry incorrectly returned') + assert False + except ldap.LDAPError as e: + log.fatal('Failed to search for user: ' + str(e)) + assert False + + # + # Search using existing attribute value - the entry should be returned + # + try: + entry = topology.standalone.search_s(DEFAULT_SUFFIX, + ldap.SCOPE_SUBTREE, + 'mail=user0099@dev.null') + if entry is None: + log.fatal('Entry not found, but it should have been') + assert False + except ldap.LDAPError as e: + log.fatal('Failed to search for user: ' + str(e)) + assert False + + log.info('Test PASSED') + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tmp/README b/dirsrvtests/tests/tmp/README new file mode 100644 index 0000000..0e8f416 --- /dev/null +++ b/dirsrvtests/tests/tmp/README @@ -0,0 +1,10 @@ +TMP DIRECTORY README + +This directory is used to store files(LDIFs, etc) that are created during the ticket script runtime. The script is also responsible for removing any files it places in this directory. This directory can be retrieved via getDir() from the DirSrv class. + +Example: + + tmp_dir_path = topology.standalone.getDir(__file__, TMP_DIR) + + new_ldif = tmp_dir_path + "export.ldif" + diff --git a/dirsrvtests/tickets/finalizer.py b/dirsrvtests/tickets/finalizer.py deleted file mode 100644 index bfbeadd..0000000 --- a/dirsrvtests/tickets/finalizer.py +++ /dev/null @@ -1,64 +0,0 @@ -# --- BEGIN COPYRIGHT BLOCK --- -# Copyright (C) 2015 Red Hat, Inc. -# All rights reserved. -# -# License: GPL (version 3 or any later version). -# See LICENSE for details. -# --- END COPYRIGHT BLOCK --- -# -''' -Created on Nov 5, 2013 - -@author: tbordaz -''' -import os -import sys -import time -import ldap -import logging -import socket -import time -import logging -import pytest -from lib389 import DirSrv, Entry, tools -from lib389.tools import DirSrvTools -from lib389._constants import DN_DM -from lib389.properties import * - -log = logging.getLogger(__name__) - -global installation_prefix -installation_prefix=os.getenv('PREFIX') - -def test_finalizer(): - global installation_prefix - - # for each defined instance, remove it - for args_instance in ALL_INSTANCES: - if installation_prefix: - # overwrite the environment setting - args_instance[SER_DEPLOYED_DIR] = installation_prefix - - instance = DirSrv(verbose=True) - instance.allocate(args_instance) - if instance.exists(): - instance.delete() - - # remove any existing backup for this instance - instance.clearBackupFS() - -def run_isolated(): - ''' - run_isolated is used to run these test cases independently of a test scheduler (xunit, py.test..) - To run isolated without py.test, you need to - - set the installation prefix - - run this program - ''' - global installation_prefix - installation_prefix = None - - test_finalizer() - -if __name__ == '__main__': - run_isolated() - diff --git a/dirsrvtests/tickets/ticket365_test.py b/dirsrvtests/tickets/ticket365_test.py deleted file mode 100644 index 44aa3e8..0000000 --- a/dirsrvtests/tickets/ticket365_test.py +++ /dev/null @@ -1,169 +0,0 @@ -# --- BEGIN COPYRIGHT BLOCK --- -# Copyright (C) 2015 Red Hat, Inc. -# All rights reserved. -# -# License: GPL (version 3 or any later version). -# See LICENSE for details. -# --- END COPYRIGHT BLOCK --- -# -import os -import sys -import time -import ldap -import logging -import pytest -from lib389 import DirSrv, Entry, tools, tasks -from lib389.tools import DirSrvTools -from lib389._constants import * -from lib389.properties import * -from lib389.tasks import * - -logging.getLogger(__name__).setLevel(logging.DEBUG) -log = logging.getLogger(__name__) - -installation1_prefix = None - - -class TopologyStandalone(object): - def __init__(self, standalone): - standalone.open() - self.standalone = standalone - - -@pytest.fixture(scope="module") -def topology(request): - global installation1_prefix - if installation1_prefix: - args_instance[SER_DEPLOYED_DIR] = installation1_prefix - - # Creating standalone instance ... - standalone = DirSrv(verbose=False) - args_instance[SER_HOST] = HOST_STANDALONE - args_instance[SER_PORT] = PORT_STANDALONE - args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE - args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX - args_standalone = args_instance.copy() - standalone.allocate(args_standalone) - instance_standalone = standalone.exists() - if instance_standalone: - standalone.delete() - standalone.create() - standalone.open() - - # Clear out the tmp dir - standalone.clearTmpDir(__file__) - - return TopologyStandalone(standalone) - - -def test_ticket365(topology): - ''' - Write your testcase here... - - nsslapd-auditlog-logging-hide-unhashed-pw - - and test - - nsslapd-unhashed-pw-switch ticket 561 - - on, off, nolog? - ''' - - USER_DN = 'uid=test_entry,' + DEFAULT_SUFFIX - - # - # Add the test entry - # - try: - topology.standalone.add_s(Entry((USER_DN, { - 'objectclass': 'top extensibleObject'.split(), - 'uid': 'test_entry', - 'userpassword': 'password' - }))) - except ldap.LDAPError as e: - log.error('Failed to add test user: error ' + e.message['desc']) - assert False - - # - # Enable the audit log - # - try: - topology.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-auditlog-logging-enabled', 'on')]) - except ldap.LDAPError as e: - log.fatal('Failed to enable audit log, error: ' + e.message['desc']) - assert False - ''' - try: - ent = topology.standalone.getEntry(DN_CONFIG, attrlist=[ - 'nsslapd-instancedir', - 'nsslapd-errorlog', - 'nsslapd-accesslog', - 'nsslapd-certdir', - 'nsslapd-schemadir']) - ''' - # - # Allow the unhashed password to be written to audit log - # - try: - topology.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, - 'nsslapd-auditlog-logging-hide-unhashed-pw', 'off')]) - except ldap.LDAPError as e: - log.fatal('Failed to enable writing unhashed password to audit log, error: ' + e.message['desc']) - assert False - - # - # Set new password, and check the audit log - # - try: - topology.standalone.modify_s(USER_DN, [(ldap.MOD_REPLACE, 'userpassword', 'mypassword')]) - except ldap.LDAPError as e: - log.fatal('Failed to enable writing unhashed password to audit log, error: ' + e.message['desc']) - assert False - - # Check audit log - if not topology.standalone.searchAuditLog('unhashed#user#password: mypassword'): - log.fatal('failed to find unhashed password in auditlog') - assert False - - # - # Hide unhashed password in audit log - # - try: - topology.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-auditlog-logging-hide-unhashed-pw', 'on')]) - except ldap.LDAPError as e: - log.fatal('Failed to deny writing unhashed password to audit log, error: ' + e.message['desc']) - assert False - log.info('Test complete') - - # - # Modify password, and check the audit log - # - try: - topology.standalone.modify_s(USER_DN, [(ldap.MOD_REPLACE, 'userpassword', 'hidepassword')]) - except ldap.LDAPError as e: - log.fatal('Failed to enable writing unhashed password to audit log, error: ' + e.message['desc']) - assert False - - # Check audit log - if topology.standalone.searchAuditLog('unhashed#user#password: hidepassword'): - log.fatal('Found unhashed password in auditlog') - assert False - - -def test_ticket365_final(topology): - topology.standalone.delete() - log.info('Testcase PASSED') - - -def run_isolated(): - global installation1_prefix - installation1_prefix = None - - topo = topology(True) - test_ticket365(topo) - test_ticket365_final(topo) - - -if __name__ == '__main__': - run_isolated() - diff --git a/dirsrvtests/tickets/ticket47313_test.py b/dirsrvtests/tickets/ticket47313_test.py deleted file mode 100644 index 35f2456..0000000 --- a/dirsrvtests/tickets/ticket47313_test.py +++ /dev/null @@ -1,174 +0,0 @@ -# --- BEGIN COPYRIGHT BLOCK --- -# Copyright (C) 2015 Red Hat, Inc. -# All rights reserved. -# -# License: GPL (version 3 or any later version). -# See LICENSE for details. -# --- END COPYRIGHT BLOCK --- -# -import os -import sys -import time -import ldap -import logging -import time -import pytest -from lib389 import DirSrv, Entry, tools -from lib389.tools import DirSrvTools -from lib389._constants import * -from lib389.properties import * - -log = logging.getLogger(__name__) - -installation_prefix = None - -ENTRY_NAME = 'test_entry' - - -class TopologyStandalone(object): - def __init__(self, standalone): - standalone.open() - self.standalone = standalone - - -@pytest.fixture(scope="module") -def topology(request): - ''' - This fixture is used to standalone topology for the 'module'. - ''' - global installation_prefix - - if installation_prefix: - args_instance[SER_DEPLOYED_DIR] = installation_prefix - - standalone = DirSrv(verbose=False) - - # Args for the standalone instance - args_instance[SER_HOST] = HOST_STANDALONE - args_instance[SER_PORT] = PORT_STANDALONE - args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE - args_standalone = args_instance.copy() - standalone.allocate(args_standalone) - - # Get the status of the instance - instance_standalone = standalone.exists() - - # Remove the instance - if instance_standalone: - standalone.delete() - - # Create the instance - standalone.create() - - # Used to retrieve configuration information (dbdir, confdir...) - standalone.open() - - # clear the tmp directory - standalone.clearTmpDir(__file__) - - return TopologyStandalone(standalone) - - -def test_ticket47313_run(topology): - """ - It adds 2 test entries - Search with filters including subtype and ! - It deletes the added entries - """ - - # bind as directory manager - topology.standalone.log.info("Bind as %s" % DN_DM) - topology.standalone.simple_bind_s(DN_DM, PASSWORD) - - # enable filter error logging - #mod = [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', '32')] - #topology.standalone.modify_s(DN_CONFIG, mod) - - topology.standalone.log.info("\n\n######################### ADD ######################\n") - - # Prepare the entry with cn;fr & cn;en - entry_name_fr = '%s fr' % (ENTRY_NAME) - entry_name_en = '%s en' % (ENTRY_NAME) - entry_name_both = '%s both' % (ENTRY_NAME) - entry_dn_both = 'cn=%s, %s' % (entry_name_both, SUFFIX) - entry_both = Entry(entry_dn_both) - entry_both.setValues('objectclass', 'top', 'person') - entry_both.setValues('sn', entry_name_both) - entry_both.setValues('cn', entry_name_both) - entry_both.setValues('cn;fr', entry_name_fr) - entry_both.setValues('cn;en', entry_name_en) - - # Prepare the entry with one member - entry_name_en_only = '%s en only' % (ENTRY_NAME) - entry_dn_en_only = 'cn=%s, %s' % (entry_name_en_only, SUFFIX) - entry_en_only = Entry(entry_dn_en_only) - entry_en_only.setValues('objectclass', 'top', 'person') - entry_en_only.setValues('sn', entry_name_en_only) - entry_en_only.setValues('cn', entry_name_en_only) - entry_en_only.setValues('cn;en', entry_name_en) - - topology.standalone.log.info("Try to add Add %s: %r" % (entry_dn_both, entry_both)) - topology.standalone.add_s(entry_both) - - topology.standalone.log.info("Try to add Add %s: %r" % (entry_dn_en_only, entry_en_only)) - topology.standalone.add_s(entry_en_only) - - topology.standalone.log.info("\n\n######################### SEARCH ######################\n") - - # filter: (&(cn=test_entry en only)(!(cn=test_entry fr))) - myfilter = '(&(sn=%s)(!(cn=%s)))' % (entry_name_en_only, entry_name_fr) - topology.standalone.log.info("Try to search with filter %s" % myfilter) - ents = topology.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, myfilter) - assert len(ents) == 1 - assert ents[0].sn == entry_name_en_only - topology.standalone.log.info("Found %s" % ents[0].dn) - - # filter: (&(cn=test_entry en only)(!(cn;fr=test_entry fr))) - myfilter = '(&(sn=%s)(!(cn;fr=%s)))' % (entry_name_en_only, entry_name_fr) - topology.standalone.log.info("Try to search with filter %s" % myfilter) - ents = topology.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, myfilter) - assert len(ents) == 1 - assert ents[0].sn == entry_name_en_only - topology.standalone.log.info("Found %s" % ents[0].dn) - - # filter: (&(cn=test_entry en only)(!(cn;en=test_entry en))) - myfilter = '(&(sn=%s)(!(cn;en=%s)))' % (entry_name_en_only, entry_name_en) - topology.standalone.log.info("Try to search with filter %s" % myfilter) - ents = topology.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, myfilter) - assert len(ents) == 0 - topology.standalone.log.info("Found none") - - topology.standalone.log.info("\n\n######################### DELETE ######################\n") - - topology.standalone.log.info("Try to delete %s " % entry_dn_both) - topology.standalone.delete_s(entry_dn_both) - - topology.standalone.log.info("Try to delete %s " % entry_dn_en_only) - topology.standalone.delete_s(entry_dn_en_only) - - -def test_ticket47313_final(topology): - topology.standalone.delete() - log.info('Testcase PASSED') - - -def run_isolated(): - ''' - run_isolated is used to run these test cases independently of a test scheduler (xunit, py.test..) - To run isolated without py.test, you need to - - edit this file and comment '@pytest.fixture' line before 'topology' function. - - set the installation prefix - - run this program - ''' - global installation_prefix - installation_prefix = None - - topo = topology(True) - test_ticket47313_run(topo) - - test_ticket47313_final(topo) - - -if __name__ == '__main__': - run_isolated() - diff --git a/dirsrvtests/tickets/ticket47384_test.py b/dirsrvtests/tickets/ticket47384_test.py deleted file mode 100644 index e5dc354..0000000 --- a/dirsrvtests/tickets/ticket47384_test.py +++ /dev/null @@ -1,167 +0,0 @@ -# --- BEGIN COPYRIGHT BLOCK --- -# Copyright (C) 2015 Red Hat, Inc. -# All rights reserved. -# -# License: GPL (version 3 or any later version). -# See LICENSE for details. -# --- END COPYRIGHT BLOCK --- -# -import os -import sys -import time -import ldap -import logging -import pytest -import shutil -from lib389 import DirSrv, Entry, tools, tasks -from lib389.tools import DirSrvTools -from lib389._constants import * -from lib389.properties import * -from lib389.tasks import * -from lib389.utils import * - -logging.getLogger(__name__).setLevel(logging.DEBUG) -log = logging.getLogger(__name__) - -installation1_prefix = None - - -class TopologyStandalone(object): - def __init__(self, standalone): - standalone.open() - self.standalone = standalone - - -@pytest.fixture(scope="module") -def topology(request): - global installation1_prefix - if installation1_prefix: - args_instance[SER_DEPLOYED_DIR] = installation1_prefix - - # Creating standalone instance ... - standalone = DirSrv(verbose=False) - args_instance[SER_HOST] = HOST_STANDALONE - args_instance[SER_PORT] = PORT_STANDALONE - args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE - args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX - args_standalone = args_instance.copy() - standalone.allocate(args_standalone) - instance_standalone = standalone.exists() - if instance_standalone: - standalone.delete() - standalone.create() - standalone.open() - - # Clear out the tmp dir - standalone.clearTmpDir(__file__) - - return TopologyStandalone(standalone) - - -def test_ticket47384(topology): - ''' - Test pluginpath validation: relative and absolute paths - - With the inclusion of ticket 47601 - we do allow plugin paths - outside the default location - ''' - PLUGIN_DN = 'cn=%s,cn=plugins,cn=config' % PLUGIN_WHOAMI - tmp_dir = topology.standalone.getDir(__file__, TMP_DIR) - plugin_dir = get_plugin_dir(topology.standalone.prefix) - - # Copy the library to our tmp directory - try: - shutil.copy('%s/libwhoami-plugin.so' % plugin_dir, tmp_dir) - except IOError as e: - log.fatal('Failed to copy libwhoami-plugin.so to the tmp directory, error: ' - + e.strerror) - assert False - try: - shutil.copy('%s/libwhoami-plugin.la' % plugin_dir, tmp_dir) - except IOError as e: - log.fatal('Failed to copy libwhoami-plugin.la to the tmp directory, error: ' - + e.strerror) - assert False - - # - # Test adding valid plugin paths - # - # Try using the absolute path to the current library - try: - topology.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, - 'nsslapd-pluginPath', '%s/libwhoami-plugin' % plugin_dir)]) - except ldap.LDAPError as e: - log.error('Failed to set valid plugin path (%s): error (%s)' % - ('%s/libwhoami-plugin' % plugin_dir, e.message['desc'])) - assert False - - # Try using new remote location - try: - topology.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, - 'nsslapd-pluginPath', '%s/libwhoami-plugin' % tmp_dir)]) - except ldap.LDAPError as e: - log.error('Failed to set valid plugin path (%s): error (%s)' % - ('%s/libwhoami-plugin' % tmp_dir, e.message['desc'])) - assert False - - # Set plugin path back to the default - try: - topology.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, - 'nsslapd-pluginPath', 'libwhoami-plugin')]) - except ldap.LDAPError as e: - log.error('Failed to set valid relative plugin path (%s): error (%s)' % - ('libwhoami-plugin' % tmp_dir, e.message['desc'])) - assert False - - # - # Test invalid path (no library present) - # - try: - topology.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, - 'nsslapd-pluginPath', '/bin/libwhoami-plugin')]) - # No exception?! This is an error - log.error('Invalid plugin path was incorrectly accepted by the server!') - assert False - except ldap.UNWILLING_TO_PERFORM: - # Correct, operation should be rejected - pass - except ldap.LDAPError as e: - log.error('Failed to set invalid plugin path (%s): error (%s)' % - ('/bin/libwhoami-plugin', e.message['desc'])) - - # - # Test invalid relative path (no library present) - # - try: - topology.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, - 'nsslapd-pluginPath', '../libwhoami-plugin')]) - # No exception?! This is an error - log.error('Invalid plugin path was incorrectly accepted by the server!') - assert False - except ldap.UNWILLING_TO_PERFORM: - # Correct, operation should be rejected - pass - except ldap.LDAPError as e: - log.error('Failed to set invalid plugin path (%s): error (%s)' % - ('../libwhoami-plugin', e.message['desc'])) - - log.info('Test complete') - - -def test_ticket47384_final(topology): - topology.standalone.delete() - log.info('Testcase PASSED') - - -def run_isolated(): - global installation1_prefix - installation1_prefix = None - - topo = topology(True) - test_ticket47384(topo) - test_ticket47384_final(topo) - - -if __name__ == '__main__': - run_isolated() - diff --git a/dirsrvtests/tickets/ticket47431_test.py b/dirsrvtests/tickets/ticket47431_test.py deleted file mode 100644 index a102248..0000000 --- a/dirsrvtests/tickets/ticket47431_test.py +++ /dev/null @@ -1,259 +0,0 @@ -# --- BEGIN COPYRIGHT BLOCK --- -# Copyright (C) 2015 Red Hat, Inc. -# All rights reserved. -# -# License: GPL (version 3 or any later version). -# See LICENSE for details. -# --- END COPYRIGHT BLOCK --- -# -import os -import sys -import time -import ldap -import logging -import pytest -from lib389 import DirSrv, Entry, tools, tasks -from lib389.tools import DirSrvTools -from lib389._constants import * -from lib389.properties import * -from lib389.tasks import * -from lib389.utils import * - -logging.getLogger(__name__).setLevel(logging.DEBUG) -log = logging.getLogger(__name__) - -installation1_prefix = None - -DN_7BITPLUGIN="cn=7-bit check,%s" % DN_PLUGIN -ATTRS = ["uid", "mail", "userpassword", ",", SUFFIX, None] - -class TopologyStandalone(object): - def __init__(self, standalone): - standalone.open() - self.standalone = standalone - - -@pytest.fixture(scope="module") -def topology(request): - global installation1_prefix - if installation1_prefix: - args_instance[SER_DEPLOYED_DIR] = installation1_prefix - - # Creating standalone instance ... - standalone = DirSrv(verbose=False) - args_instance[SER_HOST] = HOST_STANDALONE - args_instance[SER_PORT] = PORT_STANDALONE - args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE - args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX - args_standalone = args_instance.copy() - standalone.allocate(args_standalone) - instance_standalone = standalone.exists() - if instance_standalone: - standalone.delete() - standalone.create() - standalone.open() - - # Clear out the tmp dir - standalone.clearTmpDir(__file__) - - return TopologyStandalone(standalone) - - -def test_ticket47431_0(topology): - ''' - Enable 7 bit plugin - ''' - log.info("Ticket 47431 - 0: Enable 7bit plugin...") - topology.standalone.plugins.enable(name=PLUGIN_7_BIT_CHECK) - - -def test_ticket47431_1(topology): - ''' - nsslapd-pluginarg0: uid - nsslapd-pluginarg1: mail - nsslapd-pluginarg2: userpassword <== repeat 27 times - nsslapd-pluginarg3: , - nsslapd-pluginarg4: dc=example,dc=com - - The duplicated values are removed by str2entry_dupcheck as follows: - [..] - str2entry_dupcheck: 27 duplicate values for attribute type nsslapd-pluginarg2 - detected in entry cn=7-bit check,cn=plugins,cn=config. Extra values ignored. - ''' - - log.info("Ticket 47431 - 1: Check 26 duplicate values are treated as one...") - expected = "str2entry_dupcheck: .* duplicate values for attribute type nsslapd-pluginarg2 detected in entry cn=7-bit check,cn=plugins,cn=config." - - log.debug('modify_s %s' % DN_7BITPLUGIN) - try: - topology.standalone.modify_s(DN_7BITPLUGIN, - [(ldap.MOD_REPLACE, 'nsslapd-pluginarg0', "uid"), - (ldap.MOD_REPLACE, 'nsslapd-pluginarg1', "mail"), - (ldap.MOD_REPLACE, 'nsslapd-pluginarg2', "userpassword"), - (ldap.MOD_REPLACE, 'nsslapd-pluginarg3', ","), - (ldap.MOD_REPLACE, 'nsslapd-pluginarg4', SUFFIX)]) - except ValueError: - log.error('modify failed: Some problem occured with a value that was provided') - assert False - - arg2 = "nsslapd-pluginarg2: userpassword" - topology.standalone.stop(timeout=10) - dse_ldif = topology.standalone.confdir + '/dse.ldif' - os.system('mv %s %s.47431' % (dse_ldif, dse_ldif)) - os.system('sed -e "s/\\(%s\\)/\\1\\n\\1\\n\\1\\n\\1\\n\\1\\n\\1\\n\\1\\n\\1\\n\\1\\n\\1\\n\\1\\n\\1\\n\\1\\n\\1\\n\\1\\n\\1\\n\\1\\n\\1\\n\\1\\n\\1\\n\\1\\n\\1\\n\\1\\n\\1\\n\\1\\n\\1\\n\\1/" %s.47431 > %s' % (arg2, dse_ldif, dse_ldif)) - topology.standalone.start(timeout=10) - - cmdline = 'egrep -i "%s" %s' % (expected, topology.standalone.errlog) - p = os.popen(cmdline, "r") - line = p.readline() - if line == "": - log.error('Expected error "%s" not logged in %s' % (expected, topology.standalone.errlog)) - assert False - else: - log.debug('line: %s' % line) - log.info('Expected error "%s" logged in %s' % (expected, topology.standalone.errlog)) - - - log.info("Ticket 47431 - 1: done") - - -def test_ticket47431_2(topology): - ''' - nsslapd-pluginarg0: uid - nsslapd-pluginarg0: mail - nsslapd-pluginarg1: userpassword - nsslapd-pluginarg2: , - nsslapd-pluginarg3: dc=example,dc=com - ==> - nsslapd-pluginarg0: uid - nsslapd-pluginarg1: mail - nsslapd-pluginarg2: userpassword - nsslapd-pluginarg3: , - nsslapd-pluginarg4: dc=example,dc=com - Should be logged in error log: - [..] NS7bitAttr_Init - 0: uid - [..] NS7bitAttr_Init - 1: userpassword - [..] NS7bitAttr_Init - 2: mail - [..] NS7bitAttr_Init - 3: , - [..] NS7bitAttr_Init - 4: dc=example,dc=com - ''' - - log.info("Ticket 47431 - 2: Check two values belonging to one arg is fixed...") - - try: - topology.standalone.modify_s(DN_7BITPLUGIN, - [(ldap.MOD_REPLACE, 'nsslapd-pluginarg0', "uid"), - (ldap.MOD_ADD, 'nsslapd-pluginarg0', "mail"), - (ldap.MOD_REPLACE, 'nsslapd-pluginarg1', "userpassword"), - (ldap.MOD_REPLACE, 'nsslapd-pluginarg2', ","), - (ldap.MOD_REPLACE, 'nsslapd-pluginarg3', SUFFIX), - (ldap.MOD_DELETE, 'nsslapd-pluginarg4', None)]) - except ValueError: - log.error('modify failed: Some problem occured with a value that was provided') - assert False - - # PLUGIN LOG LEVEL - topology.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', '65536')]) - - topology.standalone.restart(timeout=10) - - cmdline = 'egrep -i %s %s' % ("NS7bitAttr_Init", topology.standalone.errlog) - p = os.popen(cmdline, "r") - i = 0 - while ATTRS[i]: - line = p.readline() - log.debug('line - %s' % line) - log.debug('ATTRS[%d] %s' % (i, ATTRS[i])) - if line == "": - break - elif line.find(ATTRS[i]) >= 0: - log.debug('%s was logged' % ATTRS[i]) - else: - log.error('%s was not logged.' % ATTRS[i]) - assert False - i = i + 1 - - log.info("Ticket 47431 - 2: done") - - -def test_ticket47431_3(topology): - ''' - nsslapd-pluginarg1: uid - nsslapd-pluginarg3: mail - nsslapd-pluginarg5: userpassword - nsslapd-pluginarg7: , - nsslapd-pluginarg9: dc=example,dc=com - ==> - nsslapd-pluginarg0: uid - nsslapd-pluginarg1: mail - nsslapd-pluginarg2: userpassword - nsslapd-pluginarg3: , - nsslapd-pluginarg4: dc=example,dc=com - Should be logged in error log: - [..] NS7bitAttr_Init - 0: uid - [..] NS7bitAttr_Init - 1: userpassword - [..] NS7bitAttr_Init - 2: mail - [..] NS7bitAttr_Init - 3: , - [..] NS7bitAttr_Init - 4: dc=example,dc=com - ''' - - log.info("Ticket 47431 - 3: Check missing args are fixed...") - - try: - topology.standalone.modify_s(DN_7BITPLUGIN, - [(ldap.MOD_DELETE, 'nsslapd-pluginarg0', None), - (ldap.MOD_REPLACE, 'nsslapd-pluginarg1', "uid"), - (ldap.MOD_DELETE, 'nsslapd-pluginarg2', None), - (ldap.MOD_REPLACE, 'nsslapd-pluginarg3', "mail"), - (ldap.MOD_REPLACE, 'nsslapd-pluginarg5', "userpassword"), - (ldap.MOD_REPLACE, 'nsslapd-pluginarg7', ","), - (ldap.MOD_REPLACE, 'nsslapd-pluginarg9', SUFFIX)]) - except ValueError: - log.error('modify failed: Some problem occured with a value that was provided') - assert False - - # PLUGIN LOG LEVEL - topology.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', '65536')]) - - topology.standalone.stop(timeout=10) - os.system('mv %s %s.47431' % (topology.standalone.errlog, topology.standalone.errlog)) - os.system('touch %s' % (topology.standalone.errlog)) - topology.standalone.start(timeout=10) - - cmdline = 'egrep -i %s %s' % ("NS7bitAttr_Init", topology.standalone.errlog) - p = os.popen(cmdline, "r") - i = 0 - while ATTRS[i]: - line = p.readline() - if line == "": - break - elif line.find(ATTRS[i]) >= 0: - log.debug('%s was logged' % ATTRS[i]) - else: - log.error('%s was not logged.' % ATTRS[i]) - assert False - i = i + 1 - - log.info("Ticket 47431 - 3: done") - log.info('Test complete') - - -def test_ticket47431_final(topology): - topology.standalone.delete() - log.info('Testcase PASSED') - - -def run_isolated(): - global installation1_prefix - installation1_prefix = None - - topo = topology(True) - test_ticket47431_0(topo) - test_ticket47431_1(topo) - test_ticket47431_2(topo) - test_ticket47431_3(topo) - test_ticket47431_final(topo) - - -if __name__ == '__main__': - run_isolated() - diff --git a/dirsrvtests/tickets/ticket47462_test.py b/dirsrvtests/tickets/ticket47462_test.py deleted file mode 100644 index c88cf43..0000000 --- a/dirsrvtests/tickets/ticket47462_test.py +++ /dev/null @@ -1,365 +0,0 @@ -# --- BEGIN COPYRIGHT BLOCK --- -# Copyright (C) 2015 Red Hat, Inc. -# All rights reserved. -# -# License: GPL (version 3 or any later version). -# See LICENSE for details. -# --- END COPYRIGHT BLOCK --- -# -import sys -import time -import ldap -import logging -import pytest -from lib389 import DirSrv, Entry, tools -from lib389.tools import DirSrvTools -from lib389._constants import * -from lib389.properties import * - -logging.getLogger(__name__).setLevel(logging.DEBUG) -log = logging.getLogger(__name__) - -# -# important part. We can deploy Master1 and Master2 on different versions -# -installation1_prefix = None -installation2_prefix = None - -DES_PLUGIN = 'cn=DES,cn=Password Storage Schemes,cn=plugins,cn=config' -AES_PLUGIN = 'cn=AES,cn=Password Storage Schemes,cn=plugins,cn=config' -MMR_PLUGIN = 'cn=Multimaster Replication Plugin,cn=plugins,cn=config' -AGMT_DN = '' -USER_DN = 'cn=test_user,' + DEFAULT_SUFFIX -USER1_DN = 'cn=test_user1,' + DEFAULT_SUFFIX -TEST_REPL_DN = 'cn=test repl,' + DEFAULT_SUFFIX - - -class TopologyMaster1Master2(object): - def __init__(self, master1, master2): - master1.open() - self.master1 = master1 - - master2.open() - self.master2 = master2 - - -@pytest.fixture(scope="module") -def topology(request): - ''' - This fixture is used to create a replicated topology for the 'module'. - The replicated topology is MASTER1 <-> Master2. - ''' - global installation1_prefix - global installation2_prefix - - # allocate master1 on a given deployement - master1 = DirSrv(verbose=False) - if installation1_prefix: - args_instance[SER_DEPLOYED_DIR] = installation1_prefix - - # Args for the master1 instance - args_instance[SER_HOST] = HOST_MASTER_1 - args_instance[SER_PORT] = PORT_MASTER_1 - args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1 - args_master = args_instance.copy() - master1.allocate(args_master) - - # allocate master1 on a given deployement - master2 = DirSrv(verbose=False) - if installation2_prefix: - args_instance[SER_DEPLOYED_DIR] = installation2_prefix - - # Args for the consumer instance - args_instance[SER_HOST] = HOST_MASTER_2 - args_instance[SER_PORT] = PORT_MASTER_2 - args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_2 - args_master = args_instance.copy() - master2.allocate(args_master) - - # Get the status of the instance and restart it if it exists - instance_master1 = master1.exists() - instance_master2 = master2.exists() - - # Remove all the instances - if instance_master1: - master1.delete() - if instance_master2: - master2.delete() - - # Create the instances - master1.create() - master1.open() - master2.create() - master2.open() - - # - # Now prepare the Master-Consumer topology - # - # First Enable replication - master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_1) - master2.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_2) - - # Initialize the supplier->consumer - - properties = {RA_NAME: r'meTo_$host:$port', - RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], - RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], - RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], - RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} - AGMT_DN = master1.agreement.create(suffix=SUFFIX, host=master2.host, port=master2.port, properties=properties) - master1.agreement - if not AGMT_DN: - log.fatal("Fail to create a replica agreement") - sys.exit(1) - - log.debug("%s created" % AGMT_DN) - - properties = {RA_NAME: r'meTo_$host:$port', - RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], - RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], - RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], - RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} - master2.agreement.create(suffix=DEFAULT_SUFFIX, host=master1.host, port=master1.port, properties=properties) - - master1.agreement.init(SUFFIX, HOST_MASTER_2, PORT_MASTER_2) - master1.waitForReplInit(AGMT_DN) - - # Check replication is working fine - if master1.testReplication(DEFAULT_SUFFIX, master2): - log.info('Replication is working.') - else: - log.fatal('Replication is not working.') - assert False - - # clear the tmp directory - master1.clearTmpDir(__file__) - - return TopologyMaster1Master2(master1, master2) - - -def test_ticket47462(topology): - """ - Test that AES properly replaces DES during an update/restart, and that - replication also works correctly. - """ - - # - # First set config as if it's an older version. Set DES to use libdes-plugin, - # MMR to depend on DES, delete the existing AES plugin, and set a DES password - # for the replication agreement. - # - - # - # Add an extra attribute to the DES plugin args - # - try: - topology.master1.modify_s(DES_PLUGIN, - [(ldap.MOD_REPLACE, 'nsslapd-pluginEnabled', 'on')]) - except ldap.LDAPError as e: - log.fatal('Failed to enable DES plugin, error: ' + e.message['desc']) - assert False - - try: - topology.master1.modify_s(DES_PLUGIN, - [(ldap.MOD_ADD, 'nsslapd-pluginarg2', 'description')]) - except ldap.LDAPError as e: - log.fatal('Failed to reset DES plugin, error: ' + e.message['desc']) - assert False - - try: - topology.master1.modify_s(MMR_PLUGIN, - [(ldap.MOD_DELETE, 'nsslapd-plugin-depends-on-named', 'AES')]) - - except ldap.NO_SUCH_ATTRIBUTE: - pass - except ldap.LDAPError as e: - log.fatal('Failed to reset MMR plugin, error: ' + e.message['desc']) - assert False - - # - # Delete the AES plugin - # - try: - topology.master1.delete_s(AES_PLUGIN) - except ldap.NO_SUCH_OBJECT: - pass - except ldap.LDAPError as e: - log.fatal('Failed to delete AES plugin, error: ' + e.message['desc']) - assert False - - # restart the server so we must use DES plugin - topology.master1.restart(timeout=10) - - # - # Get the agmt dn, and set the password - # - try: - entry = topology.master1.search_s('cn=config', ldap.SCOPE_SUBTREE, 'objectclass=nsDS5ReplicationAgreement') - if entry: - agmt_dn = entry[0].dn - log.info('Found agmt dn (%s)' % agmt_dn) - else: - log.fatal('No replication agreements!') - assert False - except ldap.LDAPError as e: - log.fatal('Failed to search for replica credentials: ' + e.message['desc']) - assert False - - try: - properties = {RA_BINDPW: "password"} - topology.master1.agreement.setProperties(None, agmt_dn, None, properties) - log.info('Successfully modified replication agreement') - except ValueError: - log.error('Failed to update replica agreement: ' + AGMT_DN) - assert False - - # - # Check replication works with the new DES password - # - try: - topology.master1.add_s(Entry((USER1_DN, - {'objectclass': "top person".split(), - 'sn': 'sn', - 'cn': 'test_user'}))) - loop = 0 - ent = None - while loop <= 10: - try: - ent = topology.master2.getEntry(USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)") - break - except ldap.NO_SUCH_OBJECT: - time.sleep(1) - loop += 1 - if not ent: - log.fatal('Replication test failed fo user1!') - assert False - else: - log.info('Replication test passed') - except ldap.LDAPError as e: - log.fatal('Failed to add test user: ' + e.message['desc']) - assert False - - # - # Run the upgrade... - # - topology.master1.upgrade('online') - topology.master1.restart(timeout=10) - topology.master2.restart(timeout=10) - - # - # Check that the restart converted existing DES credentials - # - try: - entry = topology.master1.search_s('cn=config', ldap.SCOPE_SUBTREE, 'nsDS5ReplicaCredentials=*') - if entry: - val = entry[0].getValue('nsDS5ReplicaCredentials') - if val.startswith('{AES-'): - log.info('The DES credentials have been converted to AES') - else: - log.fatal('Failed to convert credentials from DES to AES!') - assert False - else: - log.fatal('Failed to find any entries with nsDS5ReplicaCredentials ') - assert False - except ldap.LDAPError as e: - log.fatal('Failed to search for replica credentials: ' + e.message['desc']) - assert False - - # - # Check that the AES plugin exists, and has all the attributes listed in DES plugin. - # The attributes might not be in the expected order so check all the attributes. - # - try: - entry = topology.master1.search_s(AES_PLUGIN, ldap.SCOPE_BASE, 'objectclass=*') - if not entry[0].hasValue('nsslapd-pluginarg0', 'description') and \ - not entry[0].hasValue('nsslapd-pluginarg1', 'description') and \ - not entry[0].hasValue('nsslapd-pluginarg2', 'description'): - log.fatal('The AES plugin did not have the DES attribute copied over correctly') - assert False - else: - log.info('The AES plugin was correctly setup') - except ldap.LDAPError as e: - log.fatal('Failed to find AES plugin: ' + e.message['desc']) - assert False - - # - # Check that the MMR plugin was updated - # - try: - entry = topology.master1.search_s(MMR_PLUGIN, ldap.SCOPE_BASE, 'objectclass=*') - if not entry[0].hasValue('nsslapd-plugin-depends-on-named', 'AES'): - log.fatal('The MMR Plugin was not correctly updated') - assert False - else: - log.info('The MMR plugin was correctly updated') - except ldap.LDAPError as e: - log.fatal('Failed to find AES plugin: ' + e.message['desc']) - assert False - - # - # Check that the DES plugin was correctly updated - # - try: - entry = topology.master1.search_s(DES_PLUGIN, ldap.SCOPE_BASE, 'objectclass=*') - if not entry[0].hasValue('nsslapd-pluginPath', 'libpbe-plugin'): - log.fatal('The DES Plugin was not correctly updated') - assert False - else: - log.info('The DES plugin was correctly updated') - except ldap.LDAPError as e: - log.fatal('Failed to find AES plugin: ' + e.message['desc']) - assert False - - # - # Check replication one last time - # - try: - topology.master1.add_s(Entry((USER_DN, - {'objectclass': "top person".split(), - 'sn': 'sn', - 'cn': 'test_user'}))) - loop = 0 - ent = None - while loop <= 10: - try: - ent = topology.master2.getEntry(USER_DN, ldap.SCOPE_BASE, "(objectclass=*)") - break - except ldap.NO_SUCH_OBJECT: - time.sleep(1) - loop += 1 - if not ent: - log.fatal('Replication test failed!') - assert False - else: - log.info('Replication test passed') - except ldap.LDAPError as e: - log.fatal('Failed to add test user: ' + e.message['desc']) - assert False - - -def test_ticket47462_final(topology): - topology.master1.delete() - topology.master2.delete() - log.info('Testcase PASSED') - - -def run_isolated(): - ''' - run_isolated is used to run these test cases independently of a test scheduler (xunit, py.test..) - To run isolated without py.test, you need to - - edit this file and comment '@pytest.fixture' line before 'topology' function. - - set the installation prefix - - run this program - ''' - global installation1_prefix - global installation2_prefix - installation1_prefix = None - installation2_prefix = None - - topo = topology(True) - test_ticket47462(topo) - test_ticket47462_final(topo) - - -if __name__ == '__main__': - run_isolated() diff --git a/dirsrvtests/tickets/ticket47490_test.py b/dirsrvtests/tickets/ticket47490_test.py deleted file mode 100644 index b61d443..0000000 --- a/dirsrvtests/tickets/ticket47490_test.py +++ /dev/null @@ -1,691 +0,0 @@ -# --- BEGIN COPYRIGHT BLOCK --- -# Copyright (C) 2015 Red Hat, Inc. -# All rights reserved. -# -# License: GPL (version 3 or any later version). -# See LICENSE for details. -# --- END COPYRIGHT BLOCK --- -# -''' -Created on Nov 7, 2013 - -@author: tbordaz -''' -import os -import sys -import ldap -import socket -import time -import logging -import pytest -import re -from lib389 import DirSrv, Entry, tools -from lib389.tools import DirSrvTools -from lib389._constants import * -from lib389.properties import * - -logging.getLogger(__name__).setLevel(logging.DEBUG) -log = logging.getLogger(__name__) - -installation_prefix = None - -TEST_REPL_DN = "cn=test_repl, %s" % SUFFIX -ENTRY_DN = "cn=test_entry, %s" % SUFFIX -MUST_OLD = "(postalAddress $ preferredLocale)" -MUST_NEW = "(postalAddress $ preferredLocale $ telexNumber)" -MAY_OLD = "(postalCode $ street)" -MAY_NEW = "(postalCode $ street $ postOfficeBox)" - - -class TopologyMasterConsumer(object): - def __init__(self, master, consumer): - master.open() - self.master = master - - consumer.open() - self.consumer = consumer - - -def _header(topology, label): - topology.master.log.info("\n\n###############################################") - topology.master.log.info("#######") - topology.master.log.info("####### %s" % label) - topology.master.log.info("#######") - topology.master.log.info("###################################################") - - -def pattern_errorlog(file, log_pattern): - try: - pattern_errorlog.last_pos += 1 - except AttributeError: - pattern_errorlog.last_pos = 0 - - found = None - log.debug("_pattern_errorlog: start at offset %d" % pattern_errorlog.last_pos) - file.seek(pattern_errorlog.last_pos) - - # Use a while true iteration because 'for line in file: hit a - # python bug that break file.tell() - while True: - line = file.readline() - log.debug("_pattern_errorlog: [%d] %s" % (file.tell(), line)) - found = log_pattern.search(line) - if ((line == '') or (found)): - break - - log.debug("_pattern_errorlog: end at offset %d" % file.tell()) - pattern_errorlog.last_pos = file.tell() - return found - - -def _oc_definition(oid_ext, name, must=None, may=None): - oid = "1.2.3.4.5.6.7.8.9.10.%d" % oid_ext - desc = 'To test ticket 47490' - sup = 'person' - if not must: - must = MUST_OLD - if not may: - may = MAY_OLD - - new_oc = "( %s NAME '%s' DESC '%s' SUP %s AUXILIARY MUST %s MAY %s )" % (oid, name, desc, sup, must, may) - return new_oc - - -def add_OC(instance, oid_ext, name): - new_oc = _oc_definition(oid_ext, name) - instance.schema.add_schema('objectClasses', new_oc) - - -def mod_OC(instance, oid_ext, name, old_must=None, old_may=None, new_must=None, new_may=None): - old_oc = _oc_definition(oid_ext, name, old_must, old_may) - new_oc = _oc_definition(oid_ext, name, new_must, new_may) - instance.schema.del_schema('objectClasses', old_oc) - instance.schema.add_schema('objectClasses', new_oc) - - -def support_schema_learning(topology): - """ - with https://fedorahosted.org/389/ticket/47721, the supplier and consumer can learn - schema definitions when a replication occurs. - Before that ticket: replication of the schema fails requiring administrative operation - In the test the schemaCSN (master consumer) differs - - After that ticket: replication of the schema succeeds (after an initial phase of learning) - In the test the schema CSN (master consumer) are in sync - - This function returns True if 47721 is fixed in the current release - False else - """ - ent = topology.consumer.getEntry(DN_CONFIG, ldap.SCOPE_BASE, "(cn=config)", ['nsslapd-versionstring']) - if ent.hasAttr('nsslapd-versionstring'): - val = ent.getValue('nsslapd-versionstring') - version = val.split('/')[1].split('.') # something like ['1', '3', '1', '23', 'final_fix'] - major = int(version[0]) - minor = int(version[1]) - if major > 1: - return True - if minor > 3: - # version is 1.4 or after - return True - if minor == 3: - if version[2].isdigit(): - if int(version[2]) >= 3: - return True - return False - - -def trigger_update(topology): - """ - It triggers an update on the supplier. This will start a replication - session and a schema push - """ - try: - trigger_update.value += 1 - except AttributeError: - trigger_update.value = 1 - replace = [(ldap.MOD_REPLACE, 'telephonenumber', str(trigger_update.value))] - topology.master.modify_s(ENTRY_DN, replace) - - # wait 10 seconds that the update is replicated - loop = 0 - while loop <= 10: - try: - ent = topology.consumer.getEntry(ENTRY_DN, ldap.SCOPE_BASE, "(objectclass=*)", ['telephonenumber']) - val = ent.telephonenumber or "0" - if int(val) == trigger_update.value: - return - # the expected value is not yet replicated. try again - time.sleep(1) - loop += 1 - log.debug("trigger_update: receive %s (expected %d)" % (val, trigger_update.value)) - except ldap.NO_SUCH_OBJECT: - time.sleep(1) - loop += 1 - - -def trigger_schema_push(topology): - ''' - Trigger update to create a replication session. - In case of 47721 is fixed and the replica needs to learn the missing definition, then - the first replication session learn the definition and the second replication session - push the schema (and the schemaCSN. - This is why there is two updates and replica agreement is stopped/start (to create a second session) - ''' - agreements = topology.master.agreement.list(suffix=SUFFIX, consumer_host=topology.consumer.host, consumer_port=topology.consumer.port) - assert(len(agreements) == 1) - ra = agreements[0] - trigger_update(topology) - topology.master.agreement.pause(ra.dn) - topology.master.agreement.resume(ra.dn) - trigger_update(topology) - - -@pytest.fixture(scope="module") -def topology(request): - ''' - This fixture is used to create a replicated topology for the 'module'. - The replicated topology is MASTER -> Consumer. - ''' - global installation_prefix - - if installation_prefix: - args_instance[SER_DEPLOYED_DIR] = installation_prefix - - master = DirSrv(verbose=False) - consumer = DirSrv(verbose=False) - - # Args for the master instance - args_instance[SER_HOST] = HOST_MASTER_1 - args_instance[SER_PORT] = PORT_MASTER_1 - args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1 - args_master = args_instance.copy() - master.allocate(args_master) - - # Args for the consumer instance - args_instance[SER_HOST] = HOST_CONSUMER_1 - args_instance[SER_PORT] = PORT_CONSUMER_1 - args_instance[SER_SERVERID_PROP] = SERVERID_CONSUMER_1 - args_consumer = args_instance.copy() - consumer.allocate(args_consumer) - - # Get the status of the instance - instance_master = master.exists() - instance_consumer = consumer.exists() - - # Remove all the instances - if instance_master: - master.delete() - if instance_consumer: - consumer.delete() - - # Create the instances - master.create() - master.open() - consumer.create() - consumer.open() - - # - # Now prepare the Master-Consumer topology - # - # First Enable replication - master.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_1) - consumer.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_CONSUMER) - - # Initialize the supplier->consumer - properties = {RA_NAME: r'meTo_$host:$port', - RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], - RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], - RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], - RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} - repl_agreement = master.agreement.create(suffix=SUFFIX, host=consumer.host, port=consumer.port, properties=properties) - - if not repl_agreement: - log.fatal("Fail to create a replica agreement") - sys.exit(1) - - log.debug("%s created" % repl_agreement) - master.agreement.init(SUFFIX, HOST_CONSUMER_1, PORT_CONSUMER_1) - master.waitForReplInit(repl_agreement) - - # Check replication is working fine - if master.testReplication(DEFAULT_SUFFIX, consumer): - log.info('Replication is working.') - else: - log.fatal('Replication is not working.') - assert False - - # clear the tmp directory - master.clearTmpDir(__file__) - - # - # Here we have two instances master and consumer - # with replication working. - return TopologyMasterConsumer(master, consumer) - - -def test_ticket47490_init(topology): - """ - Initialize the test environment - """ - log.debug("test_ticket47490_init topology %r (master %r, consumer %r" % (topology, topology.master, topology.consumer)) - # the test case will check if a warning message is logged in the - # error log of the supplier - topology.master.errorlog_file = open(topology.master.errlog, "r") - - # This entry will be used to trigger attempt of schema push - topology.master.add_s(Entry((ENTRY_DN, { - 'objectclass': "top person".split(), - 'sn': 'test_entry', - 'cn': 'test_entry'}))) - - -def test_ticket47490_one(topology): - """ - Summary: Extra OC Schema is pushed - no error - - If supplier schema is a superset (one extra OC) of consumer schema, then - schema is pushed and there is no message in the error log - State at startup: - - supplier default schema - - consumer default schema - Final state - - supplier +masterNewOCA - - consumer +masterNewOCA - - """ - _header(topology, "Extra OC Schema is pushed - no error") - - log.debug("test_ticket47490_one topology %r (master %r, consumer %r" % (topology, topology.master, topology.consumer)) - # update the schema of the supplier so that it is a superset of - # consumer. Schema should be pushed - add_OC(topology.master, 2, 'masterNewOCA') - - trigger_schema_push(topology) - master_schema_csn = topology.master.schema.get_schema_csn() - consumer_schema_csn = topology.consumer.schema.get_schema_csn() - - # Check the schemaCSN was updated on the consumer - log.debug("test_ticket47490_one master_schema_csn=%s", master_schema_csn) - log.debug("ctest_ticket47490_one onsumer_schema_csn=%s", consumer_schema_csn) - assert master_schema_csn == consumer_schema_csn - - # Check the error log of the supplier does not contain an error - regex = re.compile("must not be overwritten \(set replication log for additional info\)") - res = pattern_errorlog(topology.master.errorlog_file, regex) - if res is not None: - assert False - - -def test_ticket47490_two(topology): - """ - Summary: Extra OC Schema is pushed - (ticket 47721 allows to learn missing def) - - If consumer schema is a superset (one extra OC) of supplier schema, then - schema is pushed and there is a message in the error log - State at startup - - supplier +masterNewOCA - - consumer +masterNewOCA - Final state - - supplier +masterNewOCA +masterNewOCB - - consumer +masterNewOCA +consumerNewOCA - - """ - - _header(topology, "Extra OC Schema is pushed - (ticket 47721 allows to learn missing def)") - - # add this OC on consumer. Supplier will no push the schema - add_OC(topology.consumer, 1, 'consumerNewOCA') - - # add a new OC on the supplier so that its nsSchemaCSN is larger than the consumer (wait 2s) - time.sleep(2) - add_OC(topology.master, 3, 'masterNewOCB') - - # now push the scheam - trigger_schema_push(topology) - master_schema_csn = topology.master.schema.get_schema_csn() - consumer_schema_csn = topology.consumer.schema.get_schema_csn() - - # Check the schemaCSN was NOT updated on the consumer - # with 47721, supplier learns the missing definition - log.debug("test_ticket47490_two master_schema_csn=%s", master_schema_csn) - log.debug("test_ticket47490_two consumer_schema_csn=%s", consumer_schema_csn) - if support_schema_learning(topology): - assert master_schema_csn == consumer_schema_csn - else: - assert master_schema_csn != consumer_schema_csn - - # Check the error log of the supplier does not contain an error - # This message may happen during the learning phase - regex = re.compile("must not be overwritten \(set replication log for additional info\)") - res = pattern_errorlog(topology.master.errorlog_file, regex) - - -def test_ticket47490_three(topology): - """ - Summary: Extra OC Schema is pushed - no error - - If supplier schema is again a superset (one extra OC), then - schema is pushed and there is no message in the error log - State at startup - - supplier +masterNewOCA +masterNewOCB - - consumer +masterNewOCA +consumerNewOCA - Final state - - supplier +masterNewOCA +masterNewOCB +consumerNewOCA - - consumer +masterNewOCA +masterNewOCB +consumerNewOCA - - """ - _header(topology, "Extra OC Schema is pushed - no error") - - # Do an upate to trigger the schema push attempt - # add this OC on consumer. Supplier will no push the schema - add_OC(topology.master, 1, 'consumerNewOCA') - - # now push the scheam - trigger_schema_push(topology) - master_schema_csn = topology.master.schema.get_schema_csn() - consumer_schema_csn = topology.consumer.schema.get_schema_csn() - - # Check the schemaCSN was NOT updated on the consumer - log.debug("test_ticket47490_three master_schema_csn=%s", master_schema_csn) - log.debug("test_ticket47490_three consumer_schema_csn=%s", consumer_schema_csn) - assert master_schema_csn == consumer_schema_csn - - # Check the error log of the supplier does not contain an error - regex = re.compile("must not be overwritten \(set replication log for additional info\)") - res = pattern_errorlog(topology.master.errorlog_file, regex) - if res is not None: - assert False - - -def test_ticket47490_four(topology): - """ - Summary: Same OC - extra MUST: Schema is pushed - no error - - If supplier schema is again a superset (OC with more MUST), then - schema is pushed and there is no message in the error log - State at startup - - supplier +masterNewOCA +masterNewOCB +consumerNewOCA - - consumer +masterNewOCA +masterNewOCB +consumerNewOCA - Final state - - supplier +masterNewOCA +masterNewOCB +consumerNewOCA - +must=telexnumber - - consumer +masterNewOCA +masterNewOCB +consumerNewOCA - +must=telexnumber - - """ - _header(topology, "Same OC - extra MUST: Schema is pushed - no error") - - mod_OC(topology.master, 2, 'masterNewOCA', old_must=MUST_OLD, new_must=MUST_NEW, old_may=MAY_OLD, new_may=MAY_OLD) - - trigger_schema_push(topology) - master_schema_csn = topology.master.schema.get_schema_csn() - consumer_schema_csn = topology.consumer.schema.get_schema_csn() - - # Check the schemaCSN was updated on the consumer - log.debug("test_ticket47490_four master_schema_csn=%s", master_schema_csn) - log.debug("ctest_ticket47490_four onsumer_schema_csn=%s", consumer_schema_csn) - assert master_schema_csn == consumer_schema_csn - - # Check the error log of the supplier does not contain an error - regex = re.compile("must not be overwritten \(set replication log for additional info\)") - res = pattern_errorlog(topology.master.errorlog_file, regex) - if res is not None: - assert False - - -def test_ticket47490_five(topology): - """ - Summary: Same OC - extra MUST: Schema is pushed - (fix for 47721) - - If consumer schema is a superset (OC with more MUST), then - schema is pushed (fix for 47721) and there is a message in the error log - State at startup - - supplier +masterNewOCA +masterNewOCB +consumerNewOCA - +must=telexnumber - - consumer +masterNewOCA +masterNewOCB +consumerNewOCA - +must=telexnumber - Final state - - supplier +masterNewOCA +masterNewOCB +consumerNewOCA +masterNewOCC - +must=telexnumber - - consumer +masterNewOCA +masterNewOCB +consumerNewOCA - +must=telexnumber +must=telexnumber - - Note: replication log is enabled to get more details - """ - _header(topology, "Same OC - extra MUST: Schema is pushed - (fix for 47721)") - - # get more detail why it fails - topology.master.enableReplLogging() - - # add telenumber to 'consumerNewOCA' on the consumer - mod_OC(topology.consumer, 1, 'consumerNewOCA', old_must=MUST_OLD, new_must=MUST_NEW, old_may=MAY_OLD, new_may=MAY_OLD) - # add a new OC on the supplier so that its nsSchemaCSN is larger than the consumer (wait 2s) - time.sleep(2) - add_OC(topology.master, 4, 'masterNewOCC') - - trigger_schema_push(topology) - master_schema_csn = topology.master.schema.get_schema_csn() - consumer_schema_csn = topology.consumer.schema.get_schema_csn() - - # Check the schemaCSN was NOT updated on the consumer - # with 47721, supplier learns the missing definition - log.debug("test_ticket47490_five master_schema_csn=%s", master_schema_csn) - log.debug("ctest_ticket47490_five onsumer_schema_csn=%s", consumer_schema_csn) - if support_schema_learning(topology): - assert master_schema_csn == consumer_schema_csn - else: - assert master_schema_csn != consumer_schema_csn - - # Check the error log of the supplier does not contain an error - # This message may happen during the learning phase - regex = re.compile("must not be overwritten \(set replication log for additional info\)") - res = pattern_errorlog(topology.master.errorlog_file, regex) - - -def test_ticket47490_six(topology): - """ - Summary: Same OC - extra MUST: Schema is pushed - no error - - If supplier schema is again a superset (OC with more MUST), then - schema is pushed and there is no message in the error log - State at startup - - supplier +masterNewOCA +masterNewOCB +consumerNewOCA +masterNewOCC - +must=telexnumber - - consumer +masterNewOCA +masterNewOCB +consumerNewOCA - +must=telexnumber +must=telexnumber - Final state - - - supplier +masterNewOCA +masterNewOCB +consumerNewOCA +masterNewOCC - +must=telexnumber +must=telexnumber - - consumer +masterNewOCA +masterNewOCB +consumerNewOCA +masterNewOCC - +must=telexnumber +must=telexnumber - - Note: replication log is enabled to get more details - """ - _header(topology, "Same OC - extra MUST: Schema is pushed - no error") - - # add telenumber to 'consumerNewOCA' on the consumer - mod_OC(topology.master, 1, 'consumerNewOCA', old_must=MUST_OLD, new_must=MUST_NEW, old_may=MAY_OLD, new_may=MAY_OLD) - - trigger_schema_push(topology) - master_schema_csn = topology.master.schema.get_schema_csn() - consumer_schema_csn = topology.consumer.schema.get_schema_csn() - - # Check the schemaCSN was NOT updated on the consumer - log.debug("test_ticket47490_six master_schema_csn=%s", master_schema_csn) - log.debug("ctest_ticket47490_six onsumer_schema_csn=%s", consumer_schema_csn) - assert master_schema_csn == consumer_schema_csn - - # Check the error log of the supplier does not contain an error - # This message may happen during the learning phase - regex = re.compile("must not be overwritten \(set replication log for additional info\)") - res = pattern_errorlog(topology.master.errorlog_file, regex) - if res is not None: - assert False - - -def test_ticket47490_seven(topology): - """ - Summary: Same OC - extra MAY: Schema is pushed - no error - - If supplier schema is again a superset (OC with more MAY), then - schema is pushed and there is no message in the error log - State at startup - - supplier +masterNewOCA +masterNewOCB +consumerNewOCA +masterNewOCC - +must=telexnumber +must=telexnumber - - consumer +masterNewOCA +masterNewOCB +consumerNewOCA +masterNewOCC - +must=telexnumber +must=telexnumber - Final stat - - supplier +masterNewOCA +masterNewOCB +consumerNewOCA +masterNewOCC - +must=telexnumber +must=telexnumber - +may=postOfficeBox - - consumer +masterNewOCA +masterNewOCB +consumerNewOCA +masterNewOCC - +must=telexnumber +must=telexnumber - +may=postOfficeBox - """ - _header(topology, "Same OC - extra MAY: Schema is pushed - no error") - - mod_OC(topology.master, 2, 'masterNewOCA', old_must=MUST_NEW, new_must=MUST_NEW, old_may=MAY_OLD, new_may=MAY_NEW) - - trigger_schema_push(topology) - master_schema_csn = topology.master.schema.get_schema_csn() - consumer_schema_csn = topology.consumer.schema.get_schema_csn() - - # Check the schemaCSN was updated on the consumer - log.debug("test_ticket47490_seven master_schema_csn=%s", master_schema_csn) - log.debug("ctest_ticket47490_seven consumer_schema_csn=%s", consumer_schema_csn) - assert master_schema_csn == consumer_schema_csn - - # Check the error log of the supplier does not contain an error - regex = re.compile("must not be overwritten \(set replication log for additional info\)") - res = pattern_errorlog(topology.master.errorlog_file, regex) - if res is not None: - assert False - - -def test_ticket47490_eight(topology): - """ - Summary: Same OC - extra MAY: Schema is pushed (fix for 47721) - - If consumer schema is a superset (OC with more MAY), then - schema is pushed (fix for 47721) and there is message in the error log - State at startup - - supplier +masterNewOCA +masterNewOCB +consumerNewOCA +masterNewOCC - +must=telexnumber +must=telexnumber - +may=postOfficeBox - - consumer +masterNewOCA +masterNewOCB +consumerNewOCA +masterNewOCC - +must=telexnumber +must=telexnumber - +may=postOfficeBox - Final state - - supplier +masterNewOCA +masterNewOCB +consumerNewOCA +masterNewOCC - +must=telexnumber +must=telexnumber - +may=postOfficeBox +may=postOfficeBox - - consumer +masterNewOCA +masterNewOCB +consumerNewOCA +masterNewOCC - +must=telexnumber +must=telexnumber - +may=postOfficeBox +may=postOfficeBox - """ - _header(topology, "Same OC - extra MAY: Schema is pushed (fix for 47721)") - - mod_OC(topology.consumer, 1, 'consumerNewOCA', old_must=MUST_NEW, new_must=MUST_NEW, old_may=MAY_OLD, new_may=MAY_NEW) - - # modify OC on the supplier so that its nsSchemaCSN is larger than the consumer (wait 2s) - time.sleep(2) - mod_OC(topology.master, 4, 'masterNewOCC', old_must=MUST_OLD, new_must=MUST_OLD, old_may=MAY_OLD, new_may=MAY_NEW) - - trigger_schema_push(topology) - master_schema_csn = topology.master.schema.get_schema_csn() - consumer_schema_csn = topology.consumer.schema.get_schema_csn() - - # Check the schemaCSN was not updated on the consumer - # with 47721, supplier learns the missing definition - log.debug("test_ticket47490_eight master_schema_csn=%s", master_schema_csn) - log.debug("ctest_ticket47490_eight onsumer_schema_csn=%s", consumer_schema_csn) - if support_schema_learning(topology): - assert master_schema_csn == consumer_schema_csn - else: - assert master_schema_csn != consumer_schema_csn - - # Check the error log of the supplier does not contain an error - # This message may happen during the learning phase - regex = re.compile("must not be overwritten \(set replication log for additional info\)") - res = pattern_errorlog(topology.master.errorlog_file, regex) - - -def test_ticket47490_nine(topology): - """ - Summary: Same OC - extra MAY: Schema is pushed - no error - - If consumer schema is a superset (OC with more MAY), then - schema is not pushed and there is message in the error log - State at startup - - supplier +masterNewOCA +masterNewOCB +consumerNewOCA +masterNewOCC - +must=telexnumber +must=telexnumber - +may=postOfficeBox +may=postOfficeBox - - consumer +masterNewOCA +masterNewOCB +consumerNewOCA +masterNewOCC - +must=telexnumber +must=telexnumber - +may=postOfficeBox +may=postOfficeBox - - Final state - - - supplier +masterNewOCA +masterNewOCB +consumerNewOCA +masterNewOCC - +must=telexnumber +must=telexnumber - +may=postOfficeBox +may=postOfficeBox +may=postOfficeBox - - consumer +masterNewOCA +masterNewOCB +consumerNewOCA +masterNewOCC - +must=telexnumber +must=telexnumber - +may=postOfficeBox +may=postOfficeBox +may=postOfficeBox - """ - _header(topology, "Same OC - extra MAY: Schema is pushed - no error") - - mod_OC(topology.master, 1, 'consumerNewOCA', old_must=MUST_NEW, new_must=MUST_NEW, old_may=MAY_OLD, new_may=MAY_NEW) - - trigger_schema_push(topology) - master_schema_csn = topology.master.schema.get_schema_csn() - consumer_schema_csn = topology.consumer.schema.get_schema_csn() - - # Check the schemaCSN was updated on the consumer - log.debug("test_ticket47490_nine master_schema_csn=%s", master_schema_csn) - log.debug("ctest_ticket47490_nine onsumer_schema_csn=%s", consumer_schema_csn) - assert master_schema_csn == consumer_schema_csn - - # Check the error log of the supplier does not contain an error - regex = re.compile("must not be overwritten \(set replication log for additional info\)") - res = pattern_errorlog(topology.master.errorlog_file, regex) - if res is not None: - assert False - - -def test_ticket47490_final(topology): - topology.master.delete() - topology.consumer.delete() - log.info('Testcase PASSED') - - -def run_isolated(): - ''' - run_isolated is used to run these test cases independently of a test scheduler (xunit, py.test..) - To run isolated without py.test, you need to - - edit this file and comment '@pytest.fixture' line before 'topology' function. - - set the installation prefix - - run this program - ''' - global installation_prefix - installation_prefix = None - - topo = topology(True) - test_ticket47490_init(topo) - test_ticket47490_one(topo) - test_ticket47490_two(topo) - test_ticket47490_three(topo) - test_ticket47490_four(topo) - test_ticket47490_five(topo) - test_ticket47490_six(topo) - test_ticket47490_seven(topo) - test_ticket47490_eight(topo) - test_ticket47490_nine(topo) - - test_ticket47490_final(topo) - - -if __name__ == '__main__': - run_isolated() - diff --git a/dirsrvtests/tickets/ticket47553_test.py b/dirsrvtests/tickets/ticket47553_test.py deleted file mode 100644 index 84d462d..0000000 --- a/dirsrvtests/tickets/ticket47553_test.py +++ /dev/null @@ -1,166 +0,0 @@ -# --- BEGIN COPYRIGHT BLOCK --- -# Copyright (C) 2015 Red Hat, Inc. -# All rights reserved. -# -# License: GPL (version 3 or any later version). -# See LICENSE for details. -# --- END COPYRIGHT BLOCK --- -# -import os -import sys -import time -import ldap -import logging -import pytest -from lib389 import DirSrv, Entry, tools, tasks -from lib389.tools import DirSrvTools -from lib389._constants import * -from lib389.properties import * -from lib389.tasks import * -from lib389.utils import * - -logging.getLogger(__name__).setLevel(logging.DEBUG) -log = logging.getLogger(__name__) - -installation1_prefix = None - -CONTAINER_1_OU = 'test_ou_1' -CONTAINER_2_OU = 'test_ou_2' -CONTAINER_1 = 'ou=%s,dc=example,dc=com' % CONTAINER_1_OU -CONTAINER_2 = 'ou=%s,dc=example,dc=com' % CONTAINER_2_OU -USER_CN = 'test_user' -USER_PWD = 'Secret123' -USER = 'cn=%s,%s' % (USER_CN, CONTAINER_1) - - -class TopologyStandalone(object): - def __init__(self, standalone): - standalone.open() - self.standalone = standalone - - -@pytest.fixture(scope="module") -def topology(request): - global installation1_prefix - if installation1_prefix: - args_instance[SER_DEPLOYED_DIR] = installation1_prefix - - # Creating standalone instance ... - standalone = DirSrv(verbose=False) - args_instance[SER_HOST] = HOST_STANDALONE - args_instance[SER_PORT] = PORT_STANDALONE - args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE - args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX - args_standalone = args_instance.copy() - standalone.allocate(args_standalone) - instance_standalone = standalone.exists() - if instance_standalone: - standalone.delete() - standalone.create() - standalone.open() - - # Delete each instance in the end - def fin(): - standalone.delete() - request.addfinalizer(fin) - - # Clear out the tmp dir - standalone.clearTmpDir(__file__) - - return TopologyStandalone(standalone) - - -@pytest.fixture(scope="module") -def env_setup(topology): - """Adds two containers, one user and two ACI rules""" - - try: - log.info("Add a container: %s" % CONTAINER_1) - topology.standalone.add_s(Entry((CONTAINER_1, - {'objectclass': 'top', - 'objectclass': 'organizationalunit', - 'ou': CONTAINER_1_OU, - }))) - - log.info("Add a container: %s" % CONTAINER_2) - topology.standalone.add_s(Entry((CONTAINER_2, - {'objectclass': 'top', - 'objectclass': 'organizationalunit', - 'ou': CONTAINER_2_OU, - }))) - - log.info("Add a user: %s" % USER) - topology.standalone.add_s(Entry((USER, - {'objectclass': 'top person'.split(), - 'cn': USER_CN, - 'sn': USER_CN, - 'userpassword': USER_PWD - }))) - except ldap.LDAPError as e: - log.error('Failed to add object to database: %s' % e.message['desc']) - assert False - - ACI_TARGET = '(targetattr="*")' - ACI_ALLOW = '(version 3.0; acl "All rights for %s"; allow (all) ' % USER - ACI_SUBJECT = 'userdn="ldap:///%s";)' % USER - ACI_BODY = ACI_TARGET + ACI_ALLOW + ACI_SUBJECT - mod = [(ldap.MOD_ADD, 'aci', ACI_BODY)] - - try: - log.info("Add an ACI 'allow (all)' by %s to the %s" % (USER, - CONTAINER_1)) - topology.standalone.modify_s(CONTAINER_1, mod) - - log.info("Add an ACI 'allow (all)' by %s to the %s" % (USER, - CONTAINER_2)) - topology.standalone.modify_s(CONTAINER_2, mod) - except ldap.LDAPError as e: - log.fatal('Failed to add ACI: error (%s)' % (e.message['desc'])) - assert False - - -def test_ticket47553(topology, env_setup): - """Tests, that MODRDN operation is allowed, - if user has ACI right '(all)' under superior entries, - but doesn't have '(modrdn)' - """ - - log.info("Bind as %s" % USER) - try: - topology.standalone.simple_bind_s(USER, USER_PWD) - except ldap.LDAPError as e: - log.error('Bind failed for %s, error %s' % (USER, e.message['desc'])) - assert False - - log.info("User MODRDN operation from %s to %s" % (CONTAINER_1, - CONTAINER_2)) - try: - topology.standalone.rename_s(USER, "cn=%s" % USER_CN, - newsuperior=CONTAINER_2, delold=1) - except ldap.LDAPError as e: - log.error('MODRDN failed for %s, error %s' % (USER, e.message['desc'])) - assert False - - try: - log.info("Check there is no user in %s" % CONTAINER_1) - entries = topology.standalone.search_s(CONTAINER_1, - ldap.SCOPE_ONELEVEL, - 'cn=%s' % USER_CN) - assert not entries - - log.info("Check there is our user in %s" % CONTAINER_2) - entries = topology.standalone.search_s(CONTAINER_2, - ldap.SCOPE_ONELEVEL, - 'cn=%s' % USER_CN) - assert entries - except ldap.LDAPError as e: - log.fatal('Search failed, error: ' + e.message['desc']) - assert False - - -if __name__ == '__main__': - # Run isolated - # -s for DEBUG mode - # -v for additional verbose - CURRENT_FILE = os.path.realpath(__file__) - pytest.main("-s -v %s" % CURRENT_FILE) diff --git a/dirsrvtests/tickets/ticket47560_test.py b/dirsrvtests/tickets/ticket47560_test.py deleted file mode 100644 index da86217..0000000 --- a/dirsrvtests/tickets/ticket47560_test.py +++ /dev/null @@ -1,253 +0,0 @@ -# --- BEGIN COPYRIGHT BLOCK --- -# Copyright (C) 2015 Red Hat, Inc. -# All rights reserved. -# -# License: GPL (version 3 or any later version). -# See LICENSE for details. -# --- END COPYRIGHT BLOCK --- -# -import os -import sys -import time -import ldap -import logging -import pytest -from lib389 import DirSrv, Entry, tools -from lib389.tools import DirSrvTools -from lib389._constants import * -from lib389.properties import * - -log = logging.getLogger(__name__) - -installation_prefix = None - - -class TopologyStandalone(object): - def __init__(self, standalone): - standalone.open() - self.standalone = standalone - - -@pytest.fixture(scope="module") -def topology(request): - ''' - This fixture is used to standalone topology for the 'module'. - ''' - global installation_prefix - - if installation_prefix: - args_instance[SER_DEPLOYED_DIR] = installation_prefix - - standalone = DirSrv(verbose=False) - - # Args for the standalone instance - args_instance[SER_HOST] = HOST_STANDALONE - args_instance[SER_PORT] = PORT_STANDALONE - args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE - args_standalone = args_instance.copy() - standalone.allocate(args_standalone) - - # Get the status of the instance - instance_standalone = standalone.exists() - - # Remove the instance - if instance_standalone: - standalone.delete() - - # Create the instance - standalone.create() - - # Used to retrieve configuration information (dbdir, confdir...) - standalone.open() - - # clear the tmp directory - standalone.clearTmpDir(__file__) - - # Here we have standalone instance up and running - return TopologyStandalone(standalone) - - -def test_ticket47560(topology): - """ - This test case does the following: - SETUP - - Create entry cn=group,SUFFIX - - Create entry cn=member,SUFFIX - - Update 'cn=member,SUFFIX' to add "memberOf: cn=group,SUFFIX" - - Enable Memberof Plugins - - # Here the cn=member entry has a 'memberOf' but - # cn=group entry does not contain 'cn=member' in its member - - TEST CASE - - start the fixupmemberof task - - read the cn=member entry - - check 'memberOf is now empty - - TEARDOWN - - Delete entry cn=group,SUFFIX - - Delete entry cn=member,SUFFIX - - Disable Memberof Plugins - """ - - def _enable_disable_mbo(value): - """ - Enable or disable mbo plugin depending on 'value' ('on'/'off') - """ - # enable/disable the mbo plugin - if value == 'on': - topology.standalone.plugins.enable(name=PLUGIN_MEMBER_OF) - else: - topology.standalone.plugins.disable(name=PLUGIN_MEMBER_OF) - - log.debug("-------------> _enable_disable_mbo(%s)" % value) - - topology.standalone.stop(timeout=120) - time.sleep(1) - topology.standalone.start(timeout=120) - time.sleep(3) - - # need to reopen a connection toward the instance - topology.standalone.open() - - def _test_ticket47560_setup(): - """ - - Create entry cn=group,SUFFIX - - Create entry cn=member,SUFFIX - - Update 'cn=member,SUFFIX' to add "memberOf: cn=group,SUFFIX" - - Enable Memberof Plugins - """ - log.debug("-------- > _test_ticket47560_setup\n") - - # - # By default the memberof plugin is disabled create - # - create a group entry - # - create a member entry - # - set the member entry as memberof the group entry - # - entry = Entry(group_DN) - entry.setValues('objectclass', 'top', 'groupOfNames', 'inetUser') - entry.setValues('cn', 'group') - try: - topology.standalone.add_s(entry) - except ldap.ALREADY_EXISTS: - log.debug("Entry %s already exists" % (group_DN)) - - entry = Entry(member_DN) - entry.setValues('objectclass', 'top', 'person', 'organizationalPerson', 'inetorgperson', 'inetUser') - entry.setValues('uid', 'member') - entry.setValues('cn', 'member') - entry.setValues('sn', 'member') - try: - topology.standalone.add_s(entry) - except ldap.ALREADY_EXISTS: - log.debug("Entry %s already exists" % (member_DN)) - - replace = [(ldap.MOD_REPLACE, 'memberof', group_DN)] - topology.standalone.modify_s(member_DN, replace) - - # - # enable the memberof plugin and restart the instance - # - _enable_disable_mbo('on') - - # - # check memberof attribute is still present - # - filt = 'uid=member' - ents = topology.standalone.search_s(member_DN, ldap.SCOPE_BASE, filt) - assert len(ents) == 1 - ent = ents[0] - #print ent - value = ent.getValue('memberof') - #print "memberof: %s" % (value) - assert value == group_DN - - def _test_ticket47560_teardown(): - """ - - Delete entry cn=group,SUFFIX - - Delete entry cn=member,SUFFIX - - Disable Memberof Plugins - """ - log.debug("-------- > _test_ticket47560_teardown\n") - # remove the entries group_DN and member_DN - try: - topology.standalone.delete_s(group_DN) - except: - log.warning("Entry %s fail to delete" % (group_DN)) - try: - topology.standalone.delete_s(member_DN) - except: - log.warning("Entry %s fail to delete" % (member_DN)) - # - # disable the memberof plugin and restart the instance - # - _enable_disable_mbo('off') - - group_DN = "cn=group,%s" % (SUFFIX) - member_DN = "uid=member,%s" % (SUFFIX) - - # - # Initialize the test case - # - _test_ticket47560_setup() - - # - # start the test - # - start the fixup task - # - check the entry is fixed (no longer memberof the group) - # - log.debug("-------- > Start ticket tests\n") - - filt = 'uid=member' - ents = topology.standalone.search_s(member_DN, ldap.SCOPE_BASE, filt) - assert len(ents) == 1 - ent = ents[0] - log.debug("Unfixed entry %r\n" % ent) - - # run the fixup task - topology.standalone.tasks.fixupMemberOf(suffix=SUFFIX, args={TASK_WAIT: True}) - - ents = topology.standalone.search_s(member_DN, ldap.SCOPE_BASE, filt) - assert len(ents) == 1 - ent = ents[0] - log.debug("Fixed entry %r\n" % ent) - - if ent.getValue('memberof') == group_DN: - log.warning("Error the fixupMemberOf did not fix %s" % (member_DN)) - result_successful = False - else: - result_successful = True - - # - # cleanup up the test case - # - _test_ticket47560_teardown() - - assert result_successful is True - - -def test_ticket47560_final(topology): - topology.standalone.delete() - log.info('Testcase PASSED') - - -def run_isolated(): - ''' - run_isolated is used to run these test cases independently of a test scheduler (xunit, py.test..) - To run isolated without py.test, you need to - - edit this file and comment '@pytest.fixture' line before 'topology' function. - - set the installation prefix - - run this program - ''' - global installation_prefix - installation_prefix = None - - topo = topology(True) - test_ticket47560(topo) - test_ticket47560_final(topo) - - -if __name__ == '__main__': - run_isolated() - diff --git a/dirsrvtests/tickets/ticket47573_test.py b/dirsrvtests/tickets/ticket47573_test.py deleted file mode 100644 index 8edf113..0000000 --- a/dirsrvtests/tickets/ticket47573_test.py +++ /dev/null @@ -1,347 +0,0 @@ -# --- BEGIN COPYRIGHT BLOCK --- -# Copyright (C) 2015 Red Hat, Inc. -# All rights reserved. -# -# License: GPL (version 3 or any later version). -# See LICENSE for details. -# --- END COPYRIGHT BLOCK --- -# -''' -Created on Nov 7, 2013 - -@author: tbordaz -''' -import os -import sys -import time -import ldap -import logging -import pytest -import re -from lib389 import DirSrv, Entry, tools -from lib389.tools import DirSrvTools -from lib389._constants import * -from lib389.properties import * - -logging.getLogger(__name__).setLevel(logging.DEBUG) -log = logging.getLogger(__name__) - -installation_prefix = None - -TEST_REPL_DN = "cn=test_repl, %s" % SUFFIX -ENTRY_DN = "cn=test_entry, %s" % SUFFIX - -MUST_OLD = "(postalAddress $ preferredLocale $ telexNumber)" -MAY_OLD = "(postalCode $ street)" - -MUST_NEW = "(postalAddress $ preferredLocale)" -MAY_NEW = "(telexNumber $ postalCode $ street)" - - -class TopologyMasterConsumer(object): - def __init__(self, master, consumer): - master.open() - self.master = master - - consumer.open() - self.consumer = consumer - - -def pattern_errorlog(file, log_pattern): - try: - pattern_errorlog.last_pos += 1 - except AttributeError: - pattern_errorlog.last_pos = 0 - - found = None - log.debug("_pattern_errorlog: start at offset %d" % pattern_errorlog.last_pos) - file.seek(pattern_errorlog.last_pos) - - # Use a while true iteration because 'for line in file: hit a - # python bug that break file.tell() - while True: - line = file.readline() - log.debug("_pattern_errorlog: [%d] %s" % (file.tell(), line)) - found = log_pattern.search(line) - if ((line == '') or (found)): - break - - log.debug("_pattern_errorlog: end at offset %d" % file.tell()) - pattern_errorlog.last_pos = file.tell() - return found - - -def _oc_definition(oid_ext, name, must=None, may=None): - oid = "1.2.3.4.5.6.7.8.9.10.%d" % oid_ext - desc = 'To test ticket 47573' - sup = 'person' - if not must: - must = MUST_OLD - if not may: - may = MAY_OLD - - new_oc = "( %s NAME '%s' DESC '%s' SUP %s AUXILIARY MUST %s MAY %s )" % (oid, name, desc, sup, must, may) - return new_oc - - -def add_OC(instance, oid_ext, name): - new_oc = _oc_definition(oid_ext, name) - instance.schema.add_schema('objectClasses', new_oc) - - -def mod_OC(instance, oid_ext, name, old_must=None, old_may=None, new_must=None, new_may=None): - old_oc = _oc_definition(oid_ext, name, old_must, old_may) - new_oc = _oc_definition(oid_ext, name, new_must, new_may) - instance.schema.del_schema('objectClasses', old_oc) - instance.schema.add_schema('objectClasses', new_oc) - - -def trigger_schema_push(topology): - """ - It triggers an update on the supplier. This will start a replication - session and a schema push - """ - try: - trigger_schema_push.value += 1 - except AttributeError: - trigger_schema_push.value = 1 - replace = [(ldap.MOD_REPLACE, 'telephonenumber', str(trigger_schema_push.value))] - topology.master.modify_s(ENTRY_DN, replace) - - # wait 10 seconds that the update is replicated - loop = 0 - while loop <= 10: - try: - ent = topology.consumer.getEntry(ENTRY_DN, ldap.SCOPE_BASE, "(objectclass=*)", ['telephonenumber']) - val = ent.telephonenumber or "0" - if int(val) == trigger_schema_push.value: - return - # the expected value is not yet replicated. try again - time.sleep(1) - loop += 1 - log.debug("trigger_schema_push: receive %s (expected %d)" % (val, trigger_schema_push.value)) - except ldap.NO_SUCH_OBJECT: - time.sleep(1) - loop += 1 - - -@pytest.fixture(scope="module") -def topology(request): - ''' - This fixture is used to create a replicated topology for the 'module'. - The replicated topology is MASTER -> Consumer. - ''' - global installation_prefix - - if installation_prefix: - args_instance[SER_DEPLOYED_DIR] = installation_prefix - - master = DirSrv(verbose=False) - consumer = DirSrv(verbose=False) - - # Args for the master instance - args_instance[SER_HOST] = HOST_MASTER_1 - args_instance[SER_PORT] = PORT_MASTER_1 - args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1 - args_master = args_instance.copy() - master.allocate(args_master) - - # Args for the consumer instance - args_instance[SER_HOST] = HOST_CONSUMER_1 - args_instance[SER_PORT] = PORT_CONSUMER_1 - args_instance[SER_SERVERID_PROP] = SERVERID_CONSUMER_1 - args_consumer = args_instance.copy() - consumer.allocate(args_consumer) - - # Get the status of the instance - instance_master = master.exists() - instance_consumer = consumer.exists() - - # Remove all the instances - if instance_master: - master.delete() - if instance_consumer: - consumer.delete() - - # Create the instances - master.create() - master.open() - consumer.create() - consumer.open() - - # - # Now prepare the Master-Consumer topology - # - # First Enable replication - master.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_1) - consumer.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_CONSUMER) - - # Initialize the supplier->consumer - - properties = {RA_NAME: r'meTo_$host:$port', - RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], - RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], - RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], - RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} - repl_agreement = master.agreement.create(suffix=SUFFIX, host=consumer.host, port=consumer.port, properties=properties) - - if not repl_agreement: - log.fatal("Fail to create a replica agreement") - sys.exit(1) - - log.debug("%s created" % repl_agreement) - master.agreement.init(SUFFIX, HOST_CONSUMER_1, PORT_CONSUMER_1) - master.waitForReplInit(repl_agreement) - - # Check replication is working fine - if master.testReplication(DEFAULT_SUFFIX, consumer): - log.info('Replication is working.') - else: - log.fatal('Replication is not working.') - assert False - - # clear the tmp directory - master.clearTmpDir(__file__) - - # Here we have two instances master and consumer - # with replication working. - return TopologyMasterConsumer(master, consumer) - - -def test_ticket47573_init(topology): - """ - Initialize the test environment - """ - log.debug("test_ticket47573_init topology %r (master %r, consumer %r" % - (topology, topology.master, topology.consumer)) - # the test case will check if a warning message is logged in the - # error log of the supplier - topology.master.errorlog_file = open(topology.master.errlog, "r") - - # This entry will be used to trigger attempt of schema push - topology.master.add_s(Entry((ENTRY_DN, { - 'objectclass': "top person".split(), - 'sn': 'test_entry', - 'cn': 'test_entry'}))) - - -def test_ticket47573_one(topology): - """ - Summary: Add a custom OC with MUST and MAY - MUST = postalAddress $ preferredLocale - MAY = telexNumber $ postalCode $ street - - Final state - - supplier +OCwithMayAttr - - consumer +OCwithMayAttr - - """ - log.debug("test_ticket47573_one topology %r (master %r, consumer %r" % (topology, topology.master, topology.consumer)) - # update the schema of the supplier so that it is a superset of - # consumer. Schema should be pushed - new_oc = _oc_definition(2, 'OCwithMayAttr', - must = MUST_OLD, - may = MAY_OLD) - topology.master.schema.add_schema('objectClasses', new_oc) - - trigger_schema_push(topology) - master_schema_csn = topology.master.schema.get_schema_csn() - consumer_schema_csn = topology.consumer.schema.get_schema_csn() - - # Check the schemaCSN was updated on the consumer - log.debug("test_ticket47573_one master_schema_csn=%s", master_schema_csn) - log.debug("ctest_ticket47573_one onsumer_schema_csn=%s", consumer_schema_csn) - assert master_schema_csn == consumer_schema_csn - - # Check the error log of the supplier does not contain an error - regex = re.compile("must not be overwritten \(set replication log for additional info\)") - res = pattern_errorlog(topology.master.errorlog_file, regex) - assert res is None - - -def test_ticket47573_two(topology): - """ - Summary: Change OCwithMayAttr to move a MAY attribute to a MUST attribute - - - Final state - - supplier OCwithMayAttr updated - - consumer OCwithMayAttr updated - - """ - - # Update the objectclass so that a MAY attribute is moved to MUST attribute - mod_OC(topology.master, 2, 'OCwithMayAttr', old_must=MUST_OLD, new_must=MUST_NEW, old_may=MAY_OLD, new_may=MAY_NEW) - - # now push the scheam - trigger_schema_push(topology) - master_schema_csn = topology.master.schema.get_schema_csn() - consumer_schema_csn = topology.consumer.schema.get_schema_csn() - - # Check the schemaCSN was NOT updated on the consumer - log.debug("test_ticket47573_two master_schema_csn=%s", master_schema_csn) - log.debug("test_ticket47573_two consumer_schema_csn=%s", consumer_schema_csn) - assert master_schema_csn == consumer_schema_csn - - # Check the error log of the supplier does not contain an error - regex = re.compile("must not be overwritten \(set replication log for additional info\)") - res = pattern_errorlog(topology.master.errorlog_file, regex) - assert res is None - - -def test_ticket47573_three(topology): - ''' - Create a entry with OCwithMayAttr OC - ''' - # Check replication is working fine - dn = "cn=ticket47573, %s" % SUFFIX - topology.master.add_s(Entry((dn, - {'objectclass': "top person OCwithMayAttr".split(), - 'sn': 'test_repl', - 'cn': 'test_repl', - 'postalAddress': 'here', - 'preferredLocale': 'en', - 'telexNumber': '12$us$21', - 'postalCode': '54321'}))) - loop = 0 - ent = None - while loop <= 10: - try: - ent = topology.consumer.getEntry(dn, ldap.SCOPE_BASE, "(objectclass=*)") - break - except ldap.NO_SUCH_OBJECT: - time.sleep(1) - loop += 1 - if ent is None: - assert False - - -def test_ticket47573_final(topology): - topology.master.delete() - topology.consumer.delete() - log.info('Testcase PASSED') - - -def run_isolated(): - ''' - run_isolated is used to run these test cases independently of a test scheduler (xunit, py.test..) - To run isolated without py.test, you need to - - edit this file and comment '@pytest.fixture' line before 'topology' function. - - set the installation prefix - - run this program - ''' - global installation_prefix - installation_prefix = None - - topo = topology(True) - test_ticket47573_init(topo) - test_ticket47573_one(topo) - test_ticket47573_two(topo) - test_ticket47573_three(topo) - - test_ticket47573_final(topo) - - -if __name__ == '__main__': - run_isolated() - diff --git a/dirsrvtests/tickets/ticket47619_test.py b/dirsrvtests/tickets/ticket47619_test.py deleted file mode 100644 index 0b9961e..0000000 --- a/dirsrvtests/tickets/ticket47619_test.py +++ /dev/null @@ -1,220 +0,0 @@ -# --- BEGIN COPYRIGHT BLOCK --- -# Copyright (C) 2015 Red Hat, Inc. -# All rights reserved. -# -# License: GPL (version 3 or any later version). -# See LICENSE for details. -# --- END COPYRIGHT BLOCK --- -# -''' -Created on Nov 7, 2013 - -@author: tbordaz -''' -import os -import sys -import time -import ldap -import logging -import pytest -from lib389 import DirSrv, Entry, tools -from lib389.tools import DirSrvTools -from lib389._constants import * -from lib389.properties import * - -logging.getLogger(__name__).setLevel(logging.DEBUG) -log = logging.getLogger(__name__) - -installation_prefix = None - -TEST_REPL_DN = "cn=test_repl, %s" % SUFFIX -ENTRY_DN = "cn=test_entry, %s" % SUFFIX - -OTHER_NAME = 'other_entry' -MAX_OTHERS = 100 - -ATTRIBUTES = ['street', 'countryName', 'description', 'postalAddress', 'postalCode', 'title', 'l', 'roomNumber'] - - -class TopologyMasterConsumer(object): - def __init__(self, master, consumer): - master.open() - self.master = master - - consumer.open() - self.consumer = consumer - - def __repr__(self): - return "Master[%s] -> Consumer[%s" % (self.master, self.consumer) - - -@pytest.fixture(scope="module") -def topology(request): - ''' - This fixture is used to create a replicated topology for the 'module'. - The replicated topology is MASTER -> Consumer. - ''' - global installation_prefix - - if installation_prefix: - args_instance[SER_DEPLOYED_DIR] = installation_prefix - - master = DirSrv(verbose=False) - consumer = DirSrv(verbose=False) - - # Args for the master instance - args_instance[SER_HOST] = HOST_MASTER_1 - args_instance[SER_PORT] = PORT_MASTER_1 - args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1 - args_master = args_instance.copy() - master.allocate(args_master) - - # Args for the consumer instance - args_instance[SER_HOST] = HOST_CONSUMER_1 - args_instance[SER_PORT] = PORT_CONSUMER_1 - args_instance[SER_SERVERID_PROP] = SERVERID_CONSUMER_1 - args_consumer = args_instance.copy() - consumer.allocate(args_consumer) - - # Get the status of the instance - instance_master = master.exists() - instance_consumer = consumer.exists() - - # Remove all the instances - if instance_master: - master.delete() - if instance_consumer: - consumer.delete() - - # Create the instances - master.create() - master.open() - consumer.create() - consumer.open() - - # - # Now prepare the Master-Consumer topology - # - # First Enable replication - master.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_1) - consumer.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_CONSUMER) - - # Initialize the supplier->consumer - properties = {RA_NAME: r'meTo_$host:$port', - RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], - RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], - RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], - RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} - repl_agreement = master.agreement.create(suffix=SUFFIX, host=consumer.host, port=consumer.port, properties=properties) - - if not repl_agreement: - log.fatal("Fail to create a replica agreement") - sys.exit(1) - - log.debug("%s created" % repl_agreement) - master.agreement.init(SUFFIX, HOST_CONSUMER_1, PORT_CONSUMER_1) - master.waitForReplInit(repl_agreement) - - # Check replication is working fine - if master.testReplication(DEFAULT_SUFFIX, consumer): - log.info('Replication is working.') - else: - log.fatal('Replication is not working.') - assert False - - # clear the tmp directory - master.clearTmpDir(__file__) - - # Here we have two instances master and consumer - # with replication working. - return TopologyMasterConsumer(master, consumer) - - -def test_ticket47619_init(topology): - """ - Initialize the test environment - """ - topology.master.plugins.enable(name=PLUGIN_RETRO_CHANGELOG) - #topology.master.plugins.enable(name=PLUGIN_MEMBER_OF) - #topology.master.plugins.enable(name=PLUGIN_REFER_INTEGRITY) - topology.master.stop(timeout=10) - topology.master.start(timeout=10) - - topology.master.log.info("test_ticket47619_init topology %r" % (topology)) - # the test case will check if a warning message is logged in the - # error log of the supplier - topology.master.errorlog_file = open(topology.master.errlog, "r") - - # add dummy entries - for cpt in range(MAX_OTHERS): - name = "%s%d" % (OTHER_NAME, cpt) - topology.master.add_s(Entry(("cn=%s,%s" % (name, SUFFIX), { - 'objectclass': "top person".split(), - 'sn': name, - 'cn': name}))) - - topology.master.log.info("test_ticket47619_init: %d entries ADDed %s[0..%d]" % (MAX_OTHERS, OTHER_NAME, MAX_OTHERS-1)) - - # Check the number of entries in the retro changelog - time.sleep(2) - ents = topology.master.search_s(RETROCL_SUFFIX, ldap.SCOPE_ONELEVEL, "(objectclass=*)") - assert len(ents) == MAX_OTHERS - - -def test_ticket47619_create_index(topology): - args = {INDEX_TYPE: 'eq'} - for attr in ATTRIBUTES: - topology.master.index.create(suffix=RETROCL_SUFFIX, attr=attr, args=args) - - -def test_ticket47619_reindex(topology): - ''' - Reindex all the attributes in ATTRIBUTES - ''' - args = {TASK_WAIT: True} - for attr in ATTRIBUTES: - rc = topology.master.tasks.reindex(suffix=RETROCL_SUFFIX, attrname=attr, args=args) - assert rc == 0 - - -def test_ticket47619_check_indexed_search(topology): - for attr in ATTRIBUTES: - ents = topology.master.search_s(RETROCL_SUFFIX, ldap.SCOPE_SUBTREE, "(%s=hello)" % attr) - assert len(ents) == 0 - - -def test_ticket47619_final(topology): - topology.master.delete() - topology.consumer.delete() - log.info('Testcase PASSED') - - -def run_isolated(): - ''' - run_isolated is used to run these test cases independently of a test scheduler (xunit, py.test..) - To run isolated without py.test, you need to - - edit this file and comment '@pytest.fixture' line before 'topology' function. - - set the installation prefix - - run this program - ''' - global installation_prefix - installation_prefix = None - - topo = topology(True) - test_ticket47619_init(topo) - - test_ticket47619_create_index(topo) - - # important restart that trigger the hang - # at restart, finding the new 'changelog' backend, the backend is acquired in Read - # preventing the reindex task to complete - topo.master.restart(timeout=10) - test_ticket47619_reindex(topo) - test_ticket47619_check_indexed_search(topo) - - test_ticket47619_final(topo) - - -if __name__ == '__main__': - run_isolated() - diff --git a/dirsrvtests/tickets/ticket47640_test.py b/dirsrvtests/tickets/ticket47640_test.py deleted file mode 100644 index cd450ab..0000000 --- a/dirsrvtests/tickets/ticket47640_test.py +++ /dev/null @@ -1,130 +0,0 @@ -# --- BEGIN COPYRIGHT BLOCK --- -# Copyright (C) 2015 Red Hat, Inc. -# All rights reserved. -# -# License: GPL (version 3 or any later version). -# See LICENSE for details. -# --- END COPYRIGHT BLOCK --- -# -import os -import sys -import time -import ldap -import logging -import pytest -from lib389 import DirSrv, Entry, tools, tasks -from lib389.tools import DirSrvTools -from lib389._constants import * -from lib389.properties import * -from lib389.tasks import * -from lib389.utils import * - -logging.getLogger(__name__).setLevel(logging.DEBUG) -log = logging.getLogger(__name__) - -installation1_prefix = None - - -class TopologyStandalone(object): - def __init__(self, standalone): - standalone.open() - self.standalone = standalone - - -@pytest.fixture(scope="module") -def topology(request): - global installation1_prefix - if installation1_prefix: - args_instance[SER_DEPLOYED_DIR] = installation1_prefix - - # Creating standalone instance ... - standalone = DirSrv(verbose=False) - args_instance[SER_HOST] = HOST_STANDALONE - args_instance[SER_PORT] = PORT_STANDALONE - args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE - args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX - args_standalone = args_instance.copy() - standalone.allocate(args_standalone) - instance_standalone = standalone.exists() - if instance_standalone: - standalone.delete() - standalone.create() - standalone.open() - - # Clear out the tmp dir - standalone.clearTmpDir(__file__) - - return TopologyStandalone(standalone) - - -def test_ticket47640(topology): - ''' - Linked Attrs Plugins - verify that if the plugin fails to update the link entry - that the entire operation is aborted - ''' - - # Enable Dynamic plugins, and the linked Attrs plugin - try: - topology.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-dynamic-plugins', 'on')]) - except ldap.LDAPError as e: - ldap.fatal('Failed to enable dynamic plugin!' + e.message['desc']) - assert False - - try: - topology.standalone.plugins.enable(name=PLUGIN_LINKED_ATTRS) - except ValueError as e: - ldap.fatal('Failed to enable linked attributes plugin!' + e.message['desc']) - assert False - - # Add the plugin config entry - try: - topology.standalone.add_s(Entry(('cn=manager link,cn=Linked Attributes,cn=plugins,cn=config', { - 'objectclass': 'top extensibleObject'.split(), - 'cn': 'Manager Link', - 'linkType': 'seeAlso', - 'managedType': 'seeAlso' - }))) - except ldap.LDAPError as e: - log.fatal('Failed to add linked attr config entry: error ' + e.message['desc']) - assert False - - # Add an entry who has a link to an entry that does not exist - OP_REJECTED = False - try: - topology.standalone.add_s(Entry(('uid=manager,' + DEFAULT_SUFFIX, { - 'objectclass': 'top extensibleObject'.split(), - 'uid': 'manager', - 'seeAlso': 'uid=user,dc=example,dc=com' - }))) - except ldap.UNWILLING_TO_PERFORM: - # Success - log.info('Add operation correctly rejected.') - OP_REJECTED = True - except ldap.LDAPError as e: - log.fatal('Add operation incorrectly rejected: error %s - ' + - 'expected "unwilling to perform"' % e.message['desc']) - assert False - if not OP_REJECTED: - log.fatal('Add operation incorrectly allowed') - assert False - - log.info('Test complete') - - -def test_ticket47640_final(topology): - topology.standalone.delete() - log.info('Testcase PASSED') - - -def run_isolated(): - global installation1_prefix - installation1_prefix = None - - topo = topology(True) - test_ticket47640(topo) - test_ticket47640_final(topo) - - -if __name__ == '__main__': - run_isolated() - diff --git a/dirsrvtests/tickets/ticket47653MMR_test.py b/dirsrvtests/tickets/ticket47653MMR_test.py deleted file mode 100644 index f951e55..0000000 --- a/dirsrvtests/tickets/ticket47653MMR_test.py +++ /dev/null @@ -1,473 +0,0 @@ -# --- BEGIN COPYRIGHT BLOCK --- -# Copyright (C) 2015 Red Hat, Inc. -# All rights reserved. -# -# License: GPL (version 3 or any later version). -# See LICENSE for details. -# --- END COPYRIGHT BLOCK --- -# -''' -Created on Nov 7, 2013 - -@author: tbordaz -''' -import os -import sys -import time -import ldap -import logging -import pytest -from lib389 import DirSrv, Entry, tools -from lib389.tools import DirSrvTools -from lib389._constants import * -from lib389.properties import * - -logging.getLogger(__name__).setLevel(logging.DEBUG) -log = logging.getLogger(__name__) - -# -# important part. We can deploy Master1 and Master2 on different versions -# -installation1_prefix = None -installation2_prefix = None - -TEST_REPL_DN = "cn=test_repl, %s" % SUFFIX -OC_NAME = 'OCticket47653' -MUST = "(postalAddress $ postalCode)" -MAY = "(member $ street)" - -OTHER_NAME = 'other_entry' -MAX_OTHERS = 10 - -BIND_NAME = 'bind_entry' -BIND_DN = 'cn=%s, %s' % (BIND_NAME, SUFFIX) -BIND_PW = 'password' - -ENTRY_NAME = 'test_entry' -ENTRY_DN = 'cn=%s, %s' % (ENTRY_NAME, SUFFIX) -ENTRY_OC = "top person %s" % OC_NAME - - -def _oc_definition(oid_ext, name, must=None, may=None): - oid = "1.2.3.4.5.6.7.8.9.10.%d" % oid_ext - desc = 'To test ticket 47490' - sup = 'person' - if not must: - must = MUST - if not may: - may = MAY - - new_oc = "( %s NAME '%s' DESC '%s' SUP %s AUXILIARY MUST %s MAY %s )" % (oid, name, desc, sup, must, may) - return new_oc - - -class TopologyMaster1Master2(object): - def __init__(self, master1, master2): - master1.open() - self.master1 = master1 - - master2.open() - self.master2 = master2 - - -@pytest.fixture(scope="module") -def topology(request): - ''' - This fixture is used to create a replicated topology for the 'module'. - The replicated topology is MASTER1 <-> Master2. - ''' - global installation1_prefix - global installation2_prefix - - # allocate master1 on a given deployement - master1 = DirSrv(verbose=False) - if installation1_prefix: - args_instance[SER_DEPLOYED_DIR] = installation1_prefix - - # Args for the master1 instance - args_instance[SER_HOST] = HOST_MASTER_1 - args_instance[SER_PORT] = PORT_MASTER_1 - args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1 - args_master = args_instance.copy() - master1.allocate(args_master) - - # allocate master1 on a given deployement - master2 = DirSrv(verbose=False) - if installation2_prefix: - args_instance[SER_DEPLOYED_DIR] = installation2_prefix - - # Args for the consumer instance - args_instance[SER_HOST] = HOST_MASTER_2 - args_instance[SER_PORT] = PORT_MASTER_2 - args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_2 - args_master = args_instance.copy() - master2.allocate(args_master) - - # Get the status of the instance and restart it if it exists - instance_master1 = master1.exists() - instance_master2 = master2.exists() - - # Remove all the instances - if instance_master1: - master1.delete() - if instance_master2: - master2.delete() - - # Create the instances - master1.create() - master1.open() - master2.create() - master2.open() - - # - # Now prepare the Master-Consumer topology - # - # First Enable replication - master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_1) - master2.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_2) - - # Initialize the supplier->consumer - - properties = {RA_NAME: r'meTo_$host:$port', - RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], - RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], - RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], - RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} - repl_agreement = master1.agreement.create(suffix=SUFFIX, host=master2.host, port=master2.port, properties=properties) - - if not repl_agreement: - log.fatal("Fail to create a replica agreement") - sys.exit(1) - - log.debug("%s created" % repl_agreement) - - properties = {RA_NAME: r'meTo_$host:$port', - RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], - RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], - RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], - RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} - master2.agreement.create(suffix=SUFFIX, host=master1.host, port=master1.port, properties=properties) - - master1.agreement.init(SUFFIX, HOST_MASTER_2, PORT_MASTER_2) - master1.waitForReplInit(repl_agreement) - - # Check replication is working fine - if master1.testReplication(DEFAULT_SUFFIX, master2): - log.info('Replication is working.') - else: - log.fatal('Replication is not working.') - assert False - - # clear the tmp directory - master1.clearTmpDir(__file__) - - # Here we have two instances master and consumer - # with replication working. - return TopologyMaster1Master2(master1, master2) - - -def test_ticket47653_init(topology): - """ - It adds - - Objectclass with MAY 'member' - - an entry ('bind_entry') with which we bind to test the 'SELFDN' operation - It deletes the anonymous aci - - """ - - topology.master1.log.info("Add %s that allows 'member' attribute" % OC_NAME) - new_oc = _oc_definition(2, OC_NAME, must=MUST, may=MAY) - topology.master1.schema.add_schema('objectClasses', new_oc) - - # entry used to bind with - topology.master1.log.info("Add %s" % BIND_DN) - topology.master1.add_s(Entry((BIND_DN, { - 'objectclass': "top person".split(), - 'sn': BIND_NAME, - 'cn': BIND_NAME, - 'userpassword': BIND_PW}))) - - # enable acl error logging - mod = [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', str(128 + 8192))] # ACL + REPL - topology.master1.modify_s(DN_CONFIG, mod) - topology.master2.modify_s(DN_CONFIG, mod) - - # get read of anonymous ACI for use 'read-search' aci in SEARCH test - ACI_ANONYMOUS = "(targetattr!=\"userPassword\")(version 3.0; acl \"Enable anonymous access\"; allow (read, search, compare) userdn=\"ldap:///anyone\";)" - mod = [(ldap.MOD_DELETE, 'aci', ACI_ANONYMOUS)] - topology.master1.modify_s(SUFFIX, mod) - topology.master2.modify_s(SUFFIX, mod) - - # add dummy entries - for cpt in range(MAX_OTHERS): - name = "%s%d" % (OTHER_NAME, cpt) - topology.master1.add_s(Entry(("cn=%s,%s" % (name, SUFFIX), { - 'objectclass': "top person".split(), - 'sn': name, - 'cn': name}))) - - -def test_ticket47653_add(topology): - ''' - This test ADD an entry on MASTER1 where 47653 is fixed. Then it checks that entry is replicated - on MASTER2 (even if on MASTER2 47653 is NOT fixed). Then update on MASTER2 and check the update on MASTER1 - - It checks that, bound as bind_entry, - - we can not ADD an entry without the proper SELFDN aci. - - with the proper ACI we can not ADD with 'member' attribute - - with the proper ACI and 'member' it succeeds to ADD - ''' - topology.master1.log.info("\n\n######################### ADD ######################\n") - - # bind as bind_entry - topology.master1.log.info("Bind as %s" % BIND_DN) - topology.master1.simple_bind_s(BIND_DN, BIND_PW) - - # Prepare the entry with multivalued members - entry_with_members = Entry(ENTRY_DN) - entry_with_members.setValues('objectclass', 'top', 'person', 'OCticket47653') - entry_with_members.setValues('sn', ENTRY_NAME) - entry_with_members.setValues('cn', ENTRY_NAME) - entry_with_members.setValues('postalAddress', 'here') - entry_with_members.setValues('postalCode', '1234') - members = [] - for cpt in range(MAX_OTHERS): - name = "%s%d" % (OTHER_NAME, cpt) - members.append("cn=%s,%s" % (name, SUFFIX)) - members.append(BIND_DN) - entry_with_members.setValues('member', members) - - # Prepare the entry with only one member value - entry_with_member = Entry(ENTRY_DN) - entry_with_member.setValues('objectclass', 'top', 'person', 'OCticket47653') - entry_with_member.setValues('sn', ENTRY_NAME) - entry_with_member.setValues('cn', ENTRY_NAME) - entry_with_member.setValues('postalAddress', 'here') - entry_with_member.setValues('postalCode', '1234') - member = [] - member.append(BIND_DN) - entry_with_member.setValues('member', member) - - # entry to add WITH member being BIND_DN but WITHOUT the ACI -> ldap.INSUFFICIENT_ACCESS - try: - topology.master1.log.info("Try to add Add %s (aci is missing): %r" % (ENTRY_DN, entry_with_member)) - - topology.master1.add_s(entry_with_member) - except Exception as e: - topology.master1.log.info("Exception (expected): %s" % type(e).__name__) - assert isinstance(e, ldap.INSUFFICIENT_ACCESS) - - # Ok Now add the proper ACI - topology.master1.log.info("Bind as %s and add the ADD SELFDN aci" % DN_DM) - topology.master1.simple_bind_s(DN_DM, PASSWORD) - - ACI_TARGET = "(target = \"ldap:///cn=*,%s\")" % SUFFIX - ACI_TARGETFILTER = "(targetfilter =\"(objectClass=%s)\")" % OC_NAME - ACI_ALLOW = "(version 3.0; acl \"SelfDN add\"; allow (add)" - ACI_SUBJECT = " userattr = \"member#selfDN\";)" - ACI_BODY = ACI_TARGET + ACI_TARGETFILTER + ACI_ALLOW + ACI_SUBJECT - mod = [(ldap.MOD_ADD, 'aci', ACI_BODY)] - topology.master1.modify_s(SUFFIX, mod) - time.sleep(1) - - # bind as bind_entry - topology.master1.log.info("Bind as %s" % BIND_DN) - topology.master1.simple_bind_s(BIND_DN, BIND_PW) - - # entry to add WITHOUT member and WITH the ACI -> ldap.INSUFFICIENT_ACCESS - try: - topology.master1.log.info("Try to add Add %s (member is missing)" % ENTRY_DN) - topology.master1.add_s(Entry((ENTRY_DN, { - 'objectclass': ENTRY_OC.split(), - 'sn': ENTRY_NAME, - 'cn': ENTRY_NAME, - 'postalAddress': 'here', - 'postalCode': '1234'}))) - except Exception as e: - topology.master1.log.info("Exception (expected): %s" % type(e).__name__) - assert isinstance(e, ldap.INSUFFICIENT_ACCESS) - - # entry to add WITH memberS and WITH the ACI -> ldap.INSUFFICIENT_ACCESS - # member should contain only one value - try: - topology.master1.log.info("Try to add Add %s (with several member values)" % ENTRY_DN) - topology.master1.add_s(entry_with_members) - except Exception as e: - topology.master1.log.info("Exception (expected): %s" % type(e).__name__) - assert isinstance(e, ldap.INSUFFICIENT_ACCESS) - - topology.master1.log.info("Try to add Add %s should be successful" % ENTRY_DN) - try: - topology.master1.add_s(entry_with_member) - except ldap.LDAPError as e: - topology.master1.log.info("Failed to add entry, error: " + e.message['desc']) - assert False - - # - # Now check the entry as been replicated - # - topology.master2.simple_bind_s(DN_DM, PASSWORD) - topology.master1.log.info("Try to retrieve %s from Master2" % ENTRY_DN) - loop = 0 - while loop <= 10: - try: - ent = topology.master2.getEntry(ENTRY_DN, ldap.SCOPE_BASE, "(objectclass=*)") - break - except ldap.NO_SUCH_OBJECT: - time.sleep(1) - loop += 1 - assert loop <= 10 - - # Now update the entry on Master2 (as DM because 47653 is possibly not fixed on M2) - topology.master1.log.info("Update %s on M2" % ENTRY_DN) - mod = [(ldap.MOD_REPLACE, 'description', 'test_add')] - topology.master2.modify_s(ENTRY_DN, mod) - - topology.master1.simple_bind_s(DN_DM, PASSWORD) - loop = 0 - while loop <= 10: - try: - ent = topology.master1.getEntry(ENTRY_DN, ldap.SCOPE_BASE, "(objectclass=*)") - if ent.hasAttr('description') and (ent.getValue('description') == 'test_add'): - break - except ldap.NO_SUCH_OBJECT: - time.sleep(1) - loop += 1 - - assert ent.getValue('description') == 'test_add' - - -def test_ticket47653_modify(topology): - ''' - This test MOD an entry on MASTER1 where 47653 is fixed. Then it checks that update is replicated - on MASTER2 (even if on MASTER2 47653 is NOT fixed). Then update on MASTER2 (bound as BIND_DN). - This update may fail whether or not 47653 is fixed on MASTER2 - - It checks that, bound as bind_entry, - - we can not modify an entry without the proper SELFDN aci. - - adding the ACI, we can modify the entry - ''' - # bind as bind_entry - topology.master1.log.info("Bind as %s" % BIND_DN) - topology.master1.simple_bind_s(BIND_DN, BIND_PW) - - topology.master1.log.info("\n\n######################### MODIFY ######################\n") - - # entry to modify WITH member being BIND_DN but WITHOUT the ACI -> ldap.INSUFFICIENT_ACCESS - try: - topology.master1.log.info("Try to modify %s (aci is missing)" % ENTRY_DN) - mod = [(ldap.MOD_REPLACE, 'postalCode', '9876')] - topology.master1.modify_s(ENTRY_DN, mod) - except Exception as e: - topology.master1.log.info("Exception (expected): %s" % type(e).__name__) - assert isinstance(e, ldap.INSUFFICIENT_ACCESS) - - # Ok Now add the proper ACI - topology.master1.log.info("Bind as %s and add the WRITE SELFDN aci" % DN_DM) - topology.master1.simple_bind_s(DN_DM, PASSWORD) - - ACI_TARGET = "(target = \"ldap:///cn=*,%s\")" % SUFFIX - ACI_TARGETATTR = "(targetattr = *)" - ACI_TARGETFILTER = "(targetfilter =\"(objectClass=%s)\")" % OC_NAME - ACI_ALLOW = "(version 3.0; acl \"SelfDN write\"; allow (write)" - ACI_SUBJECT = " userattr = \"member#selfDN\";)" - ACI_BODY = ACI_TARGET + ACI_TARGETATTR + ACI_TARGETFILTER + ACI_ALLOW + ACI_SUBJECT - mod = [(ldap.MOD_ADD, 'aci', ACI_BODY)] - topology.master1.modify_s(SUFFIX, mod) - time.sleep(1) - - # bind as bind_entry - topology.master1.log.info("M1: Bind as %s" % BIND_DN) - topology.master1.simple_bind_s(BIND_DN, BIND_PW) - - # modify the entry and checks the value - topology.master1.log.info("M1: Try to modify %s. It should succeeds" % ENTRY_DN) - mod = [(ldap.MOD_REPLACE, 'postalCode', '1928')] - topology.master1.modify_s(ENTRY_DN, mod) - - topology.master1.log.info("M1: Bind as %s" % DN_DM) - topology.master1.simple_bind_s(DN_DM, PASSWORD) - - topology.master1.log.info("M1: Check the update of %s" % ENTRY_DN) - ents = topology.master1.search_s(ENTRY_DN, ldap.SCOPE_BASE, 'objectclass=*') - assert len(ents) == 1 - assert ents[0].postalCode == '1928' - - # Now check the update has been replicated on M2 - topology.master1.log.info("M2: Bind as %s" % DN_DM) - topology.master2.simple_bind_s(DN_DM, PASSWORD) - topology.master1.log.info("M2: Try to retrieve %s" % ENTRY_DN) - loop = 0 - while loop <= 10: - try: - ent = topology.master2.getEntry(ENTRY_DN, ldap.SCOPE_BASE, "(objectclass=*)") - if ent.hasAttr('postalCode') and (ent.getValue('postalCode') == '1928'): - break - except ldap.NO_SUCH_OBJECT: - time.sleep(1) - loop += 1 - assert loop <= 10 - assert ent.getValue('postalCode') == '1928' - - # Now update the entry on Master2 bound as BIND_DN (update may fail if 47653 is not fixed on M2) - topology.master1.log.info("M2: Update %s (bound as %s)" % (ENTRY_DN, BIND_DN)) - topology.master2.simple_bind_s(BIND_DN, PASSWORD) - fail = False - try: - mod = [(ldap.MOD_REPLACE, 'postalCode', '1929')] - topology.master2.modify_s(ENTRY_DN, mod) - fail = False - except ldap.INSUFFICIENT_ACCESS: - topology.master1.log.info("M2: Exception (INSUFFICIENT_ACCESS): that is fine the bug is possibly not fixed on M2") - fail = True - except Exception as e: - topology.master1.log.info("M2: Exception (not expected): %s" % type(e).__name__) - assert 0 - - if not fail: - # Check the update has been replicaed on M1 - topology.master1.log.info("M1: Bind as %s" % DN_DM) - topology.master1.simple_bind_s(DN_DM, PASSWORD) - topology.master1.log.info("M1: Check %s.postalCode=1929)" % (ENTRY_DN)) - loop = 0 - while loop <= 10: - try: - ent = topology.master1.getEntry(ENTRY_DN, ldap.SCOPE_BASE, "(objectclass=*)") - if ent.hasAttr('postalCode') and (ent.getValue('postalCode') == '1929'): - break - except ldap.NO_SUCH_OBJECT: - time.sleep(1) - loop += 1 - assert ent.getValue('postalCode') == '1929' - - -def test_ticket47653_final(topology): - topology.master1.delete() - topology.master2.delete() - log.info('Testcase PASSED') - - -def run_isolated(): - ''' - run_isolated is used to run these test cases independently of a test scheduler (xunit, py.test..) - To run isolated without py.test, you need to - - edit this file and comment '@pytest.fixture' line before 'topology' function. - - set the installation prefix - - run this program - ''' - global installation1_prefix - global installation2_prefix - installation1_prefix = None - installation2_prefix = None - - topo = topology(True) - test_ticket47653_init(topo) - - test_ticket47653_add(topo) - test_ticket47653_modify(topo) - - test_ticket47653_final(topo) - - -if __name__ == '__main__': - run_isolated() diff --git a/dirsrvtests/tickets/ticket47653_test.py b/dirsrvtests/tickets/ticket47653_test.py deleted file mode 100644 index 1901b84..0000000 --- a/dirsrvtests/tickets/ticket47653_test.py +++ /dev/null @@ -1,381 +0,0 @@ -# --- BEGIN COPYRIGHT BLOCK --- -# Copyright (C) 2015 Red Hat, Inc. -# All rights reserved. -# -# License: GPL (version 3 or any later version). -# See LICENSE for details. -# --- END COPYRIGHT BLOCK --- -# -import os -import sys -import time -import ldap -import logging -import pytest -from lib389 import DirSrv, Entry, tools -from lib389.tools import DirSrvTools -from lib389._constants import * -from lib389.properties import * - -log = logging.getLogger(__name__) - -installation_prefix = None - -OC_NAME = 'OCticket47653' -MUST = "(postalAddress $ postalCode)" -MAY = "(member $ street)" - -OTHER_NAME = 'other_entry' -MAX_OTHERS = 10 - -BIND_NAME = 'bind_entry' -BIND_DN = 'cn=%s, %s' % (BIND_NAME, SUFFIX) -BIND_PW = 'password' - -ENTRY_NAME = 'test_entry' -ENTRY_DN = 'cn=%s, %s' % (ENTRY_NAME, SUFFIX) -ENTRY_OC = "top person %s" % OC_NAME - - -def _oc_definition(oid_ext, name, must=None, may=None): - oid = "1.2.3.4.5.6.7.8.9.10.%d" % oid_ext - desc = 'To test ticket 47490' - sup = 'person' - if not must: - must = MUST - if not may: - may = MAY - - new_oc = "( %s NAME '%s' DESC '%s' SUP %s AUXILIARY MUST %s MAY %s )" % (oid, name, desc, sup, must, may) - return new_oc - - -class TopologyStandalone(object): - def __init__(self, standalone): - standalone.open() - self.standalone = standalone - - -@pytest.fixture(scope="module") -def topology(request): - global installation_prefix - - if installation_prefix: - args_instance[SER_DEPLOYED_DIR] = installation_prefix - - standalone = DirSrv(verbose=False) - - # Args for the standalone instance - args_instance[SER_HOST] = HOST_STANDALONE - args_instance[SER_PORT] = PORT_STANDALONE - args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE - args_standalone = args_instance.copy() - standalone.allocate(args_standalone) - - # Get the status of the instance and restart it if it exists - instance_standalone = standalone.exists() - - # Remove the instance - if instance_standalone: - standalone.delete() - - # Create the instance - standalone.create() - - # Used to retrieve configuration information (dbdir, confdir...) - standalone.open() - - # clear the tmp directory - standalone.clearTmpDir(__file__) - - # Here we have standalone instance up and running - return TopologyStandalone(standalone) - - -def test_ticket47653_init(topology): - """ - It adds - - Objectclass with MAY 'member' - - an entry ('bind_entry') with which we bind to test the 'SELFDN' operation - It deletes the anonymous aci - - """ - - topology.standalone.log.info("Add %s that allows 'member' attribute" % OC_NAME) - new_oc = _oc_definition(2, OC_NAME, must=MUST, may=MAY) - topology.standalone.schema.add_schema('objectClasses', new_oc) - - # entry used to bind with - topology.standalone.log.info("Add %s" % BIND_DN) - topology.standalone.add_s(Entry((BIND_DN, { - 'objectclass': "top person".split(), - 'sn': BIND_NAME, - 'cn': BIND_NAME, - 'userpassword': BIND_PW}))) - - # enable acl error logging - mod = [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', '128')] - topology.standalone.modify_s(DN_CONFIG, mod) - - # get read of anonymous ACI for use 'read-search' aci in SEARCH test - ACI_ANONYMOUS = "(targetattr!=\"userPassword\")(version 3.0; acl \"Enable anonymous access\"; allow (read, search, compare) userdn=\"ldap:///anyone\";)" - mod = [(ldap.MOD_DELETE, 'aci', ACI_ANONYMOUS)] - topology.standalone.modify_s(SUFFIX, mod) - - # add dummy entries - for cpt in range(MAX_OTHERS): - name = "%s%d" % (OTHER_NAME, cpt) - topology.standalone.add_s(Entry(("cn=%s,%s" % (name, SUFFIX), { - 'objectclass': "top person".split(), - 'sn': name, - 'cn': name}))) - - -def test_ticket47653_add(topology): - ''' - It checks that, bound as bind_entry, - - we can not ADD an entry without the proper SELFDN aci. - - with the proper ACI we can not ADD with 'member' attribute - - with the proper ACI and 'member' it succeeds to ADD - ''' - topology.standalone.log.info("\n\n######################### ADD ######################\n") - - # bind as bind_entry - topology.standalone.log.info("Bind as %s" % BIND_DN) - topology.standalone.simple_bind_s(BIND_DN, BIND_PW) - - # Prepare the entry with multivalued members - entry_with_members = Entry(ENTRY_DN) - entry_with_members.setValues('objectclass', 'top', 'person', 'OCticket47653') - entry_with_members.setValues('sn', ENTRY_NAME) - entry_with_members.setValues('cn', ENTRY_NAME) - entry_with_members.setValues('postalAddress', 'here') - entry_with_members.setValues('postalCode', '1234') - members = [] - for cpt in range(MAX_OTHERS): - name = "%s%d" % (OTHER_NAME, cpt) - members.append("cn=%s,%s" % (name, SUFFIX)) - members.append(BIND_DN) - entry_with_members.setValues('member', members) - - # Prepare the entry with one member - entry_with_member = Entry(ENTRY_DN) - entry_with_member.setValues('objectclass', 'top', 'person', 'OCticket47653') - entry_with_member.setValues('sn', ENTRY_NAME) - entry_with_member.setValues('cn', ENTRY_NAME) - entry_with_member.setValues('postalAddress', 'here') - entry_with_member.setValues('postalCode', '1234') - member = [] - member.append(BIND_DN) - entry_with_member.setValues('member', member) - - # entry to add WITH member being BIND_DN but WITHOUT the ACI -> ldap.INSUFFICIENT_ACCESS - try: - topology.standalone.log.info("Try to add Add %s (aci is missing): %r" % (ENTRY_DN, entry_with_member)) - - topology.standalone.add_s(entry_with_member) - except Exception as e: - topology.standalone.log.info("Exception (expected): %s" % type(e).__name__) - assert isinstance(e, ldap.INSUFFICIENT_ACCESS) - - # Ok Now add the proper ACI - topology.standalone.log.info("Bind as %s and add the ADD SELFDN aci" % DN_DM) - topology.standalone.simple_bind_s(DN_DM, PASSWORD) - - ACI_TARGET = "(target = \"ldap:///cn=*,%s\")" % SUFFIX - ACI_TARGETFILTER = "(targetfilter =\"(objectClass=%s)\")" % OC_NAME - ACI_ALLOW = "(version 3.0; acl \"SelfDN add\"; allow (add)" - ACI_SUBJECT = " userattr = \"member#selfDN\";)" - ACI_BODY = ACI_TARGET + ACI_TARGETFILTER + ACI_ALLOW + ACI_SUBJECT - mod = [(ldap.MOD_ADD, 'aci', ACI_BODY)] - topology.standalone.modify_s(SUFFIX, mod) - - # bind as bind_entry - topology.standalone.log.info("Bind as %s" % BIND_DN) - topology.standalone.simple_bind_s(BIND_DN, BIND_PW) - - # entry to add WITHOUT member and WITH the ACI -> ldap.INSUFFICIENT_ACCESS - try: - topology.standalone.log.info("Try to add Add %s (member is missing)" % ENTRY_DN) - topology.standalone.add_s(Entry((ENTRY_DN, { - 'objectclass': ENTRY_OC.split(), - 'sn': ENTRY_NAME, - 'cn': ENTRY_NAME, - 'postalAddress': 'here', - 'postalCode': '1234'}))) - except Exception as e: - topology.standalone.log.info("Exception (expected): %s" % type(e).__name__) - assert isinstance(e, ldap.INSUFFICIENT_ACCESS) - - # entry to add WITH memberS and WITH the ACI -> ldap.INSUFFICIENT_ACCESS - # member should contain only one value - try: - topology.standalone.log.info("Try to add Add %s (with several member values)" % ENTRY_DN) - topology.standalone.add_s(entry_with_members) - except Exception as e: - topology.standalone.log.info("Exception (expected): %s" % type(e).__name__) - assert isinstance(e, ldap.INSUFFICIENT_ACCESS) - - topology.standalone.log.info("Try to add Add %s should be successful" % ENTRY_DN) - topology.standalone.add_s(entry_with_member) - - -def test_ticket47653_search(topology): - ''' - It checks that, bound as bind_entry, - - we can not search an entry without the proper SELFDN aci. - - adding the ACI, we can search the entry - ''' - topology.standalone.log.info("\n\n######################### SEARCH ######################\n") - # bind as bind_entry - topology.standalone.log.info("Bind as %s" % BIND_DN) - topology.standalone.simple_bind_s(BIND_DN, BIND_PW) - - # entry to search WITH member being BIND_DN but WITHOUT the ACI -> no entry returned - topology.standalone.log.info("Try to search %s (aci is missing)" % ENTRY_DN) - ents = topology.standalone.search_s(ENTRY_DN, ldap.SCOPE_BASE, 'objectclass=*') - assert len(ents) == 0 - - # Ok Now add the proper ACI - topology.standalone.log.info("Bind as %s and add the READ/SEARCH SELFDN aci" % DN_DM) - topology.standalone.simple_bind_s(DN_DM, PASSWORD) - - ACI_TARGET = "(target = \"ldap:///cn=*,%s\")" % SUFFIX - ACI_TARGETATTR = "(targetattr = *)" - ACI_TARGETFILTER = "(targetfilter =\"(objectClass=%s)\")" % OC_NAME - ACI_ALLOW = "(version 3.0; acl \"SelfDN search-read\"; allow (read, search, compare)" - ACI_SUBJECT = " userattr = \"member#selfDN\";)" - ACI_BODY = ACI_TARGET + ACI_TARGETATTR + ACI_TARGETFILTER + ACI_ALLOW + ACI_SUBJECT - mod = [(ldap.MOD_ADD, 'aci', ACI_BODY)] - topology.standalone.modify_s(SUFFIX, mod) - - # bind as bind_entry - topology.standalone.log.info("Bind as %s" % BIND_DN) - topology.standalone.simple_bind_s(BIND_DN, BIND_PW) - - # entry to search with the proper aci - topology.standalone.log.info("Try to search %s should be successful" % ENTRY_DN) - ents = topology.standalone.search_s(ENTRY_DN, ldap.SCOPE_BASE, 'objectclass=*') - assert len(ents) == 1 - - -def test_ticket47653_modify(topology): - ''' - It checks that, bound as bind_entry, - - we can not modify an entry without the proper SELFDN aci. - - adding the ACI, we can modify the entry - ''' - # bind as bind_entry - topology.standalone.log.info("Bind as %s" % BIND_DN) - topology.standalone.simple_bind_s(BIND_DN, BIND_PW) - - topology.standalone.log.info("\n\n######################### MODIFY ######################\n") - - # entry to modify WITH member being BIND_DN but WITHOUT the ACI -> ldap.INSUFFICIENT_ACCESS - try: - topology.standalone.log.info("Try to modify %s (aci is missing)" % ENTRY_DN) - mod = [(ldap.MOD_REPLACE, 'postalCode', '9876')] - topology.standalone.modify_s(ENTRY_DN, mod) - except Exception as e: - topology.standalone.log.info("Exception (expected): %s" % type(e).__name__) - assert isinstance(e, ldap.INSUFFICIENT_ACCESS) - - - # Ok Now add the proper ACI - topology.standalone.log.info("Bind as %s and add the WRITE SELFDN aci" % DN_DM) - topology.standalone.simple_bind_s(DN_DM, PASSWORD) - - ACI_TARGET = "(target = \"ldap:///cn=*,%s\")" % SUFFIX - ACI_TARGETATTR = "(targetattr = *)" - ACI_TARGETFILTER = "(targetfilter =\"(objectClass=%s)\")" % OC_NAME - ACI_ALLOW = "(version 3.0; acl \"SelfDN write\"; allow (write)" - ACI_SUBJECT = " userattr = \"member#selfDN\";)" - ACI_BODY = ACI_TARGET + ACI_TARGETATTR + ACI_TARGETFILTER + ACI_ALLOW + ACI_SUBJECT - mod = [(ldap.MOD_ADD, 'aci', ACI_BODY)] - topology.standalone.modify_s(SUFFIX, mod) - - # bind as bind_entry - topology.standalone.log.info("Bind as %s" % BIND_DN) - topology.standalone.simple_bind_s(BIND_DN, BIND_PW) - - # modify the entry and checks the value - topology.standalone.log.info("Try to modify %s. It should succeeds" % ENTRY_DN) - mod = [(ldap.MOD_REPLACE, 'postalCode', '1928')] - topology.standalone.modify_s(ENTRY_DN, mod) - - ents = topology.standalone.search_s(ENTRY_DN, ldap.SCOPE_BASE, 'objectclass=*') - assert len(ents) == 1 - assert ents[0].postalCode == '1928' - - -def test_ticket47653_delete(topology): - ''' - It checks that, bound as bind_entry, - - we can not delete an entry without the proper SELFDN aci. - - adding the ACI, we can delete the entry - ''' - topology.standalone.log.info("\n\n######################### DELETE ######################\n") - - # bind as bind_entry - topology.standalone.log.info("Bind as %s" % BIND_DN) - topology.standalone.simple_bind_s(BIND_DN, BIND_PW) - - # entry to delete WITH member being BIND_DN but WITHOUT the ACI -> ldap.INSUFFICIENT_ACCESS - try: - topology.standalone.log.info("Try to delete %s (aci is missing)" % ENTRY_DN) - topology.standalone.delete_s(ENTRY_DN) - except Exception as e: - topology.standalone.log.info("Exception (expected): %s" % type(e).__name__) - assert isinstance(e, ldap.INSUFFICIENT_ACCESS) - - # Ok Now add the proper ACI - topology.standalone.log.info("Bind as %s and add the READ/SEARCH SELFDN aci" % DN_DM) - topology.standalone.simple_bind_s(DN_DM, PASSWORD) - - ACI_TARGET = "(target = \"ldap:///cn=*,%s\")" % SUFFIX - ACI_TARGETFILTER = "(targetfilter =\"(objectClass=%s)\")" % OC_NAME - ACI_ALLOW = "(version 3.0; acl \"SelfDN delete\"; allow (delete)" - ACI_SUBJECT = " userattr = \"member#selfDN\";)" - ACI_BODY = ACI_TARGET + ACI_TARGETFILTER + ACI_ALLOW + ACI_SUBJECT - mod = [(ldap.MOD_ADD, 'aci', ACI_BODY)] - topology.standalone.modify_s(SUFFIX, mod) - - # bind as bind_entry - topology.standalone.log.info("Bind as %s" % BIND_DN) - topology.standalone.simple_bind_s(BIND_DN, BIND_PW) - - # entry to search with the proper aci - topology.standalone.log.info("Try to delete %s should be successful" % ENTRY_DN) - topology.standalone.delete_s(ENTRY_DN) - - -def test_ticket47653_final(topology): - topology.standalone.delete() - log.info('Testcase PASSED') - - -def run_isolated(): - ''' - run_isolated is used to run these test cases independently of a test scheduler (xunit, py.test..) - To run isolated without py.test, you need to - - edit this file and comment '@pytest.fixture' line before 'topology' function. - - set the installation prefix - - run this program - ''' - global installation_prefix - installation_prefix = None - - topo = topology(True) - test_ticket47653_init(topo) - - test_ticket47653_add(topo) - test_ticket47653_search(topo) - test_ticket47653_modify(topo) - test_ticket47653_delete(topo) - - test_ticket47653_final(topo) - - -if __name__ == '__main__': - run_isolated() - diff --git a/dirsrvtests/tickets/ticket47664_test.py b/dirsrvtests/tickets/ticket47664_test.py deleted file mode 100644 index 460828d..0000000 --- a/dirsrvtests/tickets/ticket47664_test.py +++ /dev/null @@ -1,225 +0,0 @@ -# --- BEGIN COPYRIGHT BLOCK --- -# Copyright (C) 2015 Red Hat, Inc. -# All rights reserved. -# -# License: GPL (version 3 or any later version). -# See LICENSE for details. -# --- END COPYRIGHT BLOCK --- -# -import os -import sys -import time -import ldap -import logging -import pytest -from lib389 import DirSrv, Entry, tools, tasks -from lib389.tools import DirSrvTools -from lib389._constants import * -from lib389.properties import * -from lib389.tasks import * -from ldap.controls import SimplePagedResultsControl -from ldap.controls.simple import GetEffectiveRightsControl - -log = logging.getLogger(__name__) - -installation_prefix = None - -MYSUFFIX = 'o=ticket47664.org' -MYSUFFIXBE = 'ticket47664' - -_MYLDIF = 'ticket47664.ldif' - -SEARCHFILTER = '(objectclass=*)' - - -class TopologyStandalone(object): - def __init__(self, standalone): - standalone.open() - self.standalone = standalone - - -@pytest.fixture(scope="module") -def topology(request): - ''' - This fixture is used to standalone topology for the 'module'. - ''' - global installation_prefix - - if installation_prefix: - args_instance[SER_DEPLOYED_DIR] = installation_prefix - - standalone = DirSrv(verbose=False) - - # Args for the standalone instance - args_instance[SER_HOST] = HOST_STANDALONE - args_instance[SER_PORT] = PORT_STANDALONE - args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE - args_standalone = args_instance.copy() - standalone.allocate(args_standalone) - - # Get the status of the instance and restart it if it exists - instance_standalone = standalone.exists() - - # Remove the instance - if instance_standalone: - standalone.delete() - - # Create the instance - standalone.create() - - # Used to retrieve configuration information (dbdir, confdir...) - standalone.open() - - # clear the tmp directory - standalone.clearTmpDir(__file__) - - # Here we have standalone instance up and running - return TopologyStandalone(standalone) - - -def test_ticket47664_run(topology): - """ - Import 20 entries - Search with Simple Paged Results Control (pagesize = 4) + Get Effective Rights Control (attrs list = ['cn']) - If Get Effective Rights attribute (attributeLevelRights for 'cn') is returned 4 attrs / page AND - the page count == 20/4, then the fix is verified. - """ - log.info('Testing Ticket 47664 - paged results control is not working in some cases when we have a subsuffix') - - # bind as directory manager - topology.standalone.log.info("Bind as %s" % DN_DM) - topology.standalone.simple_bind_s(DN_DM, PASSWORD) - - topology.standalone.log.info("\n\n######################### SETUP SUFFIX o=ticket47664.org ######################\n") - - topology.standalone.backend.create(MYSUFFIX, {BACKEND_NAME: MYSUFFIXBE}) - topology.standalone.mappingtree.create(MYSUFFIX, bename=MYSUFFIXBE) - - topology.standalone.log.info("\n\n######################### Generate Test data ######################\n") - - # get tmp dir - mytmp = topology.standalone.getDir(__file__, TMP_DIR) - if mytmp is None: - mytmp = "/tmp" - - MYLDIF = '%s%s' % (mytmp, _MYLDIF) - os.system('ls %s' % MYLDIF) - os.system('rm -f %s' % MYLDIF) - if hasattr(topology.standalone, 'prefix'): - prefix = topology.standalone.prefix - else: - prefix = None - dbgen_prog = prefix + '/bin/dbgen.pl' - topology.standalone.log.info('dbgen_prog: %s' % dbgen_prog) - os.system('%s -s %s -o %s -n 14' % (dbgen_prog, MYSUFFIX, MYLDIF)) - cmdline = 'egrep dn: %s | wc -l' % MYLDIF - p = os.popen(cmdline, "r") - dnnumstr = p.readline() - dnnum = int(dnnumstr) - topology.standalone.log.info("We have %d entries.\n", dnnum) - - topology.standalone.log.info("\n\n######################### Import Test data ######################\n") - - args = {TASK_WAIT: True} - importTask = Tasks(topology.standalone) - importTask.importLDIF(MYSUFFIX, MYSUFFIXBE, MYLDIF, args) - - topology.standalone.log.info("\n\n######################### SEARCH ALL ######################\n") - topology.standalone.log.info("Bind as %s and add the READ/SEARCH SELFDN aci" % DN_DM) - topology.standalone.simple_bind_s(DN_DM, PASSWORD) - - entries = topology.standalone.search_s(MYSUFFIX, ldap.SCOPE_SUBTREE, SEARCHFILTER) - topology.standalone.log.info("Returned %d entries.\n", len(entries)) - - #print entries - - assert dnnum == len(entries) - - topology.standalone.log.info('%d entries are successfully imported.' % dnnum) - - topology.standalone.log.info("\n\n######################### SEARCH WITH SIMPLE PAGED RESULTS CONTROL ######################\n") - - page_size = 4 - spr_req_ctrl = SimplePagedResultsControl(True, size=page_size, cookie='') - ger_req_ctrl = GetEffectiveRightsControl(True, "dn: " + DN_DM) - - known_ldap_resp_ctrls = { - SimplePagedResultsControl.controlType: SimplePagedResultsControl, - } - - topology.standalone.log.info("Calling search_ext...") - msgid = topology.standalone.search_ext(MYSUFFIX, - ldap.SCOPE_SUBTREE, - SEARCHFILTER, - ['cn'], - serverctrls=[spr_req_ctrl, ger_req_ctrl]) - attrlevelrightscnt = 0 - pageddncnt = 0 - pages = 0 - while True: - pages += 1 - - topology.standalone.log.info("Getting page %d" % pages) - rtype, rdata, rmsgid, responcectrls = topology.standalone.result3(msgid, resp_ctrl_classes=known_ldap_resp_ctrls) - topology.standalone.log.info("%d results" % len(rdata)) - pageddncnt += len(rdata) - - topology.standalone.log.info("Results:") - for dn, attrs in rdata: - topology.standalone.log.info("dn: %s" % dn) - topology.standalone.log.info("attributeLevelRights: %s" % attrs['attributeLevelRights'][0]) - if attrs['attributeLevelRights'][0] != "": - attrlevelrightscnt += 1 - - pctrls = [ - c for c in responcectrls if c.controlType == SimplePagedResultsControl.controlType - ] - if not pctrls: - topology.standalone.log.info('Warning: Server ignores RFC 2696 control.') - break - - if pctrls[0].cookie: - spr_req_ctrl.cookie = pctrls[0].cookie - topology.standalone.log.info("cookie: %s" % spr_req_ctrl.cookie) - msgid = topology.standalone.search_ext(MYSUFFIX, - ldap.SCOPE_SUBTREE, - SEARCHFILTER, - ['cn'], - serverctrls=[spr_req_ctrl, ger_req_ctrl]) - else: - topology.standalone.log.info("No cookie") - break - - topology.standalone.log.info("Paged result search returned %d entries in %d pages.\n", pageddncnt, pages) - - assert dnnum == len(entries) - assert dnnum == attrlevelrightscnt - assert pages == (dnnum / page_size) - topology.standalone.log.info("ticket47664 was successfully verified.") - - -def test_ticket47664_final(topology): - topology.standalone.delete() - log.info('Testcase PASSED') - - -def run_isolated(): - ''' - run_isolated is used to run these test cases independently of a test scheduler (xunit, py.test..) - To run isolated without py.test, you need to - - edit this file and comment '@pytest.fixture' line before 'topology' function. - - set the installation prefix - - run this program - ''' - global installation_prefix - installation_prefix = None - - topo = topology(True) - test_ticket47664_run(topo) - - test_ticket47664_final(topo) - - -if __name__ == '__main__': - run_isolated() - diff --git a/dirsrvtests/tickets/ticket47669_test.py b/dirsrvtests/tickets/ticket47669_test.py deleted file mode 100644 index 2ef1f3e..0000000 --- a/dirsrvtests/tickets/ticket47669_test.py +++ /dev/null @@ -1,265 +0,0 @@ -# --- BEGIN COPYRIGHT BLOCK --- -# Copyright (C) 2015 Red Hat, Inc. -# All rights reserved. -# -# License: GPL (version 3 or any later version). -# See LICENSE for details. -# --- END COPYRIGHT BLOCK --- -# -import os -import sys -import time -import ldap -import logging -import pytest -from lib389 import DirSrv, Entry, tools, tasks -from lib389.tools import DirSrvTools -from lib389._constants import * -from lib389.properties import * -from lib389.tasks import * -from ldap.controls import SimplePagedResultsControl -from ldap.controls.simple import GetEffectiveRightsControl - -log = logging.getLogger(__name__) - -installation_prefix = None - -CHANGELOG = 'cn=changelog5,cn=config' -RETROCHANGELOG = 'cn=Retro Changelog Plugin,cn=plugins,cn=config' - -MAXAGE = 'nsslapd-changelogmaxage' -TRIMINTERVAL = 'nsslapd-changelogtrim-interval' -COMPACTDBINTERVAL = 'nsslapd-changelogcompactdb-interval' - -FILTER = '(cn=*)' - - -class TopologyStandalone(object): - def __init__(self, standalone): - standalone.open() - self.standalone = standalone - - -@pytest.fixture(scope="module") -def topology(request): - ''' - This fixture is used to standalone topology for the 'module'. - ''' - global installation_prefix - - if installation_prefix: - args_instance[SER_DEPLOYED_DIR] = installation_prefix - - standalone = DirSrv(verbose=False) - - # Args for the standalone instance - args_instance[SER_HOST] = HOST_STANDALONE - args_instance[SER_PORT] = PORT_STANDALONE - args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE - args_standalone = args_instance.copy() - standalone.allocate(args_standalone) - - # Get the status of the instance and restart it if it exists - instance_standalone = standalone.exists() - - # Remove the instance - if instance_standalone: - standalone.delete() - - # Create the instance - standalone.create() - - # Used to retrieve configuration information (dbdir, confdir...) - standalone.open() - - # clear the tmp directory - standalone.clearTmpDir(__file__) - - # Here we have standalone instance up and running - return TopologyStandalone(standalone) - - -def test_ticket47669_init(topology): - """ - Add cn=changelog5,cn=config - Enable cn=Retro Changelog Plugin,cn=plugins,cn=config - """ - log.info('Testing Ticket 47669 - Test duration syntax in the changelogs') - - # bind as directory manager - topology.standalone.log.info("Bind as %s" % DN_DM) - topology.standalone.simple_bind_s(DN_DM, PASSWORD) - - try: - changelogdir = "%s/changelog" % topology.standalone.dbdir - topology.standalone.add_s(Entry((CHANGELOG, - {'objectclass': 'top extensibleObject'.split(), - 'nsslapd-changelogdir': changelogdir}))) - except ldap.LDAPError as e: - log.error('Failed to add ' + CHANGELOG + ': error ' + e.message['desc']) - assert False - - try: - topology.standalone.modify_s(RETROCHANGELOG, [(ldap.MOD_REPLACE, 'nsslapd-pluginEnabled', 'on')]) - except ldap.LDAPError as e: - log.error('Failed to enable ' + RETROCHANGELOG + ': error ' + e.message['desc']) - assert False - - # restart the server - topology.standalone.restart(timeout=10) - - -def add_and_check(topology, plugin, attr, val, isvalid): - """ - Helper function to add/replace attr: val and check the added value - """ - if isvalid: - log.info('Test %s: %s -- valid' % (attr, val)) - try: - topology.standalone.modify_s(plugin, [(ldap.MOD_REPLACE, attr, val)]) - except ldap.LDAPError as e: - log.error('Failed to add ' + attr + ': ' + val + ' to ' + plugin + ': error ' + e.message['desc']) - assert False - else: - log.info('Test %s: %s -- invalid' % (attr, val)) - if plugin == CHANGELOG: - try: - topology.standalone.modify_s(plugin, [(ldap.MOD_REPLACE, attr, val)]) - except ldap.LDAPError as e: - log.error('Expectedly failed to add ' + attr + ': ' + val + - ' to ' + plugin + ': error ' + e.message['desc']) - else: - try: - topology.standalone.modify_s(plugin, [(ldap.MOD_REPLACE, attr, val)]) - except ldap.LDAPError as e: - log.error('Failed to add ' + attr + ': ' + val + ' to ' + plugin + ': error ' + e.message['desc']) - - try: - entries = topology.standalone.search_s(plugin, ldap.SCOPE_BASE, FILTER, [attr]) - if isvalid: - if not entries[0].hasValue(attr, val): - log.fatal('%s does not have expected (%s: %s)' % (plugin, attr, val)) - assert False - else: - if plugin == CHANGELOG: - if entries[0].hasValue(attr, val): - log.fatal('%s has unexpected (%s: %s)' % (plugin, attr, val)) - assert False - else: - if not entries[0].hasValue(attr, val): - log.fatal('%s does not have expected (%s: %s)' % (plugin, attr, val)) - assert False - except ldap.LDAPError as e: - log.fatal('Unable to search for entry %s: error %s' % (plugin, e.message['desc'])) - assert False - - -def test_ticket47669_changelog_maxage(topology): - """ - Test nsslapd-changelogmaxage in cn=changelog5,cn=config - """ - log.info('1. Test nsslapd-changelogmaxage in cn=changelog5,cn=config') - - # bind as directory manager - topology.standalone.log.info("Bind as %s" % DN_DM) - topology.standalone.simple_bind_s(DN_DM, PASSWORD) - - add_and_check(topology, CHANGELOG, MAXAGE, '12345', True) - add_and_check(topology, CHANGELOG, MAXAGE, '10s', True) - add_and_check(topology, CHANGELOG, MAXAGE, '30M', True) - add_and_check(topology, CHANGELOG, MAXAGE, '12h', True) - add_and_check(topology, CHANGELOG, MAXAGE, '2D', True) - add_and_check(topology, CHANGELOG, MAXAGE, '4w', True) - add_and_check(topology, CHANGELOG, MAXAGE, '-123', False) - add_and_check(topology, CHANGELOG, MAXAGE, 'xyz', False) - - -def test_ticket47669_changelog_triminterval(topology): - """ - Test nsslapd-changelogtrim-interval in cn=changelog5,cn=config - """ - log.info('2. Test nsslapd-changelogtrim-interval in cn=changelog5,cn=config') - - # bind as directory manager - topology.standalone.log.info("Bind as %s" % DN_DM) - topology.standalone.simple_bind_s(DN_DM, PASSWORD) - - add_and_check(topology, CHANGELOG, TRIMINTERVAL, '12345', True) - add_and_check(topology, CHANGELOG, TRIMINTERVAL, '10s', True) - add_and_check(topology, CHANGELOG, TRIMINTERVAL, '30M', True) - add_and_check(topology, CHANGELOG, TRIMINTERVAL, '12h', True) - add_and_check(topology, CHANGELOG, TRIMINTERVAL, '2D', True) - add_and_check(topology, CHANGELOG, TRIMINTERVAL, '4w', True) - add_and_check(topology, CHANGELOG, TRIMINTERVAL, '-123', False) - add_and_check(topology, CHANGELOG, TRIMINTERVAL, 'xyz', False) - - -def test_ticket47669_changelog_compactdbinterval(topology): - """ - Test nsslapd-changelogcompactdb-interval in cn=changelog5,cn=config - """ - log.info('3. Test nsslapd-changelogcompactdb-interval in cn=changelog5,cn=config') - - # bind as directory manager - topology.standalone.log.info("Bind as %s" % DN_DM) - topology.standalone.simple_bind_s(DN_DM, PASSWORD) - - add_and_check(topology, CHANGELOG, COMPACTDBINTERVAL, '12345', True) - add_and_check(topology, CHANGELOG, COMPACTDBINTERVAL, '10s', True) - add_and_check(topology, CHANGELOG, COMPACTDBINTERVAL, '30M', True) - add_and_check(topology, CHANGELOG, COMPACTDBINTERVAL, '12h', True) - add_and_check(topology, CHANGELOG, COMPACTDBINTERVAL, '2D', True) - add_and_check(topology, CHANGELOG, COMPACTDBINTERVAL, '4w', True) - add_and_check(topology, CHANGELOG, COMPACTDBINTERVAL, '-123', False) - add_and_check(topology, CHANGELOG, COMPACTDBINTERVAL, 'xyz', False) - - -def test_ticket47669_retrochangelog_maxage(topology): - """ - Test nsslapd-changelogmaxage in cn=Retro Changelog Plugin,cn=plugins,cn=config - """ - log.info('4. Test nsslapd-changelogmaxage in cn=Retro Changelog Plugin,cn=plugins,cn=config') - - # bind as directory manager - topology.standalone.log.info("Bind as %s" % DN_DM) - topology.standalone.simple_bind_s(DN_DM, PASSWORD) - - add_and_check(topology, RETROCHANGELOG, MAXAGE, '12345', True) - add_and_check(topology, RETROCHANGELOG, MAXAGE, '10s', True) - add_and_check(topology, RETROCHANGELOG, MAXAGE, '30M', True) - add_and_check(topology, RETROCHANGELOG, MAXAGE, '12h', True) - add_and_check(topology, RETROCHANGELOG, MAXAGE, '2D', True) - add_and_check(topology, RETROCHANGELOG, MAXAGE, '4w', True) - add_and_check(topology, RETROCHANGELOG, MAXAGE, '-123', False) - add_and_check(topology, RETROCHANGELOG, MAXAGE, 'xyz', False) - - topology.standalone.log.info("ticket47669 was successfully verified.") - - -def test_ticket47669_final(topology): - topology.standalone.delete() - log.info('Testcase PASSED') - - -def run_isolated(): - """ - run_isolated is used to run these test cases independently of a test scheduler (xunit, py.test..) - To run isolated without py.test, you need to - - edit this file and comment '@pytest.fixture' line before 'topology' function. - - set the installation prefix - - run this program - """ - global installation_prefix - installation_prefix = None - - topo = topology(True) - test_ticket47669_init(topo) - test_ticket47669_changelog_maxage(topo) - test_ticket47669_changelog_triminterval(topo) - test_ticket47669_changelog_compactdbinterval(topo) - test_ticket47669_retrochangelog_maxage(topo) - test_ticket47669_final(topo) - -if __name__ == '__main__': - run_isolated() - diff --git a/dirsrvtests/tickets/ticket47676_test.py b/dirsrvtests/tickets/ticket47676_test.py deleted file mode 100644 index 22c2994..0000000 --- a/dirsrvtests/tickets/ticket47676_test.py +++ /dev/null @@ -1,406 +0,0 @@ -# --- BEGIN COPYRIGHT BLOCK --- -# Copyright (C) 2015 Red Hat, Inc. -# All rights reserved. -# -# License: GPL (version 3 or any later version). -# See LICENSE for details. -# --- END COPYRIGHT BLOCK --- -# -''' -Created on Nov 7, 2013 - -@author: tbordaz -''' -import os -import sys -import time -import ldap -import logging -import pytest -from lib389 import DirSrv, Entry, tools -from lib389.tools import DirSrvTools -from lib389._constants import * -from lib389.properties import * - -logging.getLogger(__name__).setLevel(logging.DEBUG) -log = logging.getLogger(__name__) - -# -# important part. We can deploy Master1 and Master2 on different versions -# -installation1_prefix = None -installation2_prefix = None - -SCHEMA_DN = "cn=schema" -TEST_REPL_DN = "cn=test_repl, %s" % SUFFIX -OC_NAME = 'OCticket47676' -OC_OID_EXT = 2 -MUST = "(postalAddress $ postalCode)" -MAY = "(member $ street)" - -OC2_NAME = 'OC2ticket47676' -OC2_OID_EXT = 3 -MUST_2 = "(postalAddress $ postalCode)" -MAY_2 = "(member $ street)" - -REPL_SCHEMA_POLICY_CONSUMER = "cn=consumerUpdatePolicy,cn=replSchema,cn=config" -REPL_SCHEMA_POLICY_SUPPLIER = "cn=supplierUpdatePolicy,cn=replSchema,cn=config" - -OTHER_NAME = 'other_entry' -MAX_OTHERS = 10 - -BIND_NAME = 'bind_entry' -BIND_DN = 'cn=%s, %s' % (BIND_NAME, SUFFIX) -BIND_PW = 'password' - -ENTRY_NAME = 'test_entry' -ENTRY_DN = 'cn=%s, %s' % (ENTRY_NAME, SUFFIX) -ENTRY_OC = "top person %s" % OC_NAME - -BASE_OID = "1.2.3.4.5.6.7.8.9.10" - - -def _oc_definition(oid_ext, name, must=None, may=None): - oid = "%s.%d" % (BASE_OID, oid_ext) - desc = 'To test ticket 47490' - sup = 'person' - if not must: - must = MUST - if not may: - may = MAY - - new_oc = "( %s NAME '%s' DESC '%s' SUP %s AUXILIARY MUST %s MAY %s )" % (oid, name, desc, sup, must, may) - return new_oc - - -class TopologyMaster1Master2(object): - def __init__(self, master1, master2): - master1.open() - self.master1 = master1 - - master2.open() - self.master2 = master2 - - -@pytest.fixture(scope="module") -def topology(request): - ''' - This fixture is used to create a replicated topology for the 'module'. - The replicated topology is MASTER1 <-> Master2. - ''' - global installation1_prefix - global installation2_prefix - - # allocate master1 on a given deployement - master1 = DirSrv(verbose=False) - if installation1_prefix: - args_instance[SER_DEPLOYED_DIR] = installation1_prefix - - # Args for the master1 instance - args_instance[SER_HOST] = HOST_MASTER_1 - args_instance[SER_PORT] = PORT_MASTER_1 - args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1 - args_master = args_instance.copy() - master1.allocate(args_master) - - # allocate master1 on a given deployement - master2 = DirSrv(verbose=False) - if installation2_prefix: - args_instance[SER_DEPLOYED_DIR] = installation2_prefix - - # Args for the consumer instance - args_instance[SER_HOST] = HOST_MASTER_2 - args_instance[SER_PORT] = PORT_MASTER_2 - args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_2 - args_master = args_instance.copy() - master2.allocate(args_master) - - # Get the status of the instance and restart it if it exists - instance_master1 = master1.exists() - instance_master2 = master2.exists() - - # Remove all the instances - if instance_master1: - master1.delete() - if instance_master2: - master2.delete() - - # Create the instances - master1.create() - master1.open() - master2.create() - master2.open() - - # - # Now prepare the Master-Consumer topology - # - # First Enable replication - master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_1) - master2.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_2) - - # Initialize the supplier->consumer - - properties = {RA_NAME: r'meTo_$host:$port', - RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], - RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], - RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], - RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} - repl_agreement = master1.agreement.create(suffix=SUFFIX, host=master2.host, port=master2.port, properties=properties) - - if not repl_agreement: - log.fatal("Fail to create a replica agreement") - sys.exit(1) - - log.debug("%s created" % repl_agreement) - - properties = {RA_NAME: r'meTo_$host:$port', - RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], - RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], - RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], - RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} - master2.agreement.create(suffix=SUFFIX, host=master1.host, port=master1.port, properties=properties) - - master1.agreement.init(SUFFIX, HOST_MASTER_2, PORT_MASTER_2) - master1.waitForReplInit(repl_agreement) - - # Check replication is working fine - if master1.testReplication(DEFAULT_SUFFIX, master2): - log.info('Replication is working.') - else: - log.fatal('Replication is not working.') - assert False - - # clear the tmp directory - master1.clearTmpDir(__file__) - - # Here we have two instances master and consumer - # with replication working. - return TopologyMaster1Master2(master1, master2) - - -def test_ticket47676_init(topology): - """ - It adds - - Objectclass with MAY 'member' - - an entry ('bind_entry') with which we bind to test the 'SELFDN' operation - It deletes the anonymous aci - - """ - - topology.master1.log.info("Add %s that allows 'member' attribute" % OC_NAME) - new_oc = _oc_definition(OC_OID_EXT, OC_NAME, must = MUST, may = MAY) - topology.master1.schema.add_schema('objectClasses', new_oc) - - # entry used to bind with - topology.master1.log.info("Add %s" % BIND_DN) - topology.master1.add_s(Entry((BIND_DN, { - 'objectclass': "top person".split(), - 'sn': BIND_NAME, - 'cn': BIND_NAME, - 'userpassword': BIND_PW}))) - - # enable acl error logging - mod = [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', str(128 + 8192))] # ACL + REPL - topology.master1.modify_s(DN_CONFIG, mod) - topology.master2.modify_s(DN_CONFIG, mod) - - # add dummy entries - for cpt in range(MAX_OTHERS): - name = "%s%d" % (OTHER_NAME, cpt) - topology.master1.add_s(Entry(("cn=%s,%s" % (name, SUFFIX), { - 'objectclass': "top person".split(), - 'sn': name, - 'cn': name}))) - - -def test_ticket47676_skip_oc_at(topology): - ''' - This test ADD an entry on MASTER1 where 47676 is fixed. Then it checks that entry is replicated - on MASTER2 (even if on MASTER2 47676 is NOT fixed). Then update on MASTER2. - If the schema has successfully been pushed, updating Master2 should succeed - ''' - topology.master1.log.info("\n\n######################### ADD ######################\n") - - # bind as 'cn=Directory manager' - topology.master1.log.info("Bind as %s and add the add the entry with specific oc" % DN_DM) - topology.master1.simple_bind_s(DN_DM, PASSWORD) - - # Prepare the entry with multivalued members - entry = Entry(ENTRY_DN) - entry.setValues('objectclass', 'top', 'person', 'OCticket47676') - entry.setValues('sn', ENTRY_NAME) - entry.setValues('cn', ENTRY_NAME) - entry.setValues('postalAddress', 'here') - entry.setValues('postalCode', '1234') - members = [] - for cpt in range(MAX_OTHERS): - name = "%s%d" % (OTHER_NAME, cpt) - members.append("cn=%s,%s" % (name, SUFFIX)) - members.append(BIND_DN) - entry.setValues('member', members) - - topology.master1.log.info("Try to add Add %s should be successful" % ENTRY_DN) - topology.master1.add_s(entry) - - # - # Now check the entry as been replicated - # - topology.master2.simple_bind_s(DN_DM, PASSWORD) - topology.master1.log.info("Try to retrieve %s from Master2" % ENTRY_DN) - loop = 0 - while loop <= 10: - try: - ent = topology.master2.getEntry(ENTRY_DN, ldap.SCOPE_BASE, "(objectclass=*)") - break - except ldap.NO_SUCH_OBJECT: - time.sleep(2) - loop += 1 - assert loop <= 10 - - # Now update the entry on Master2 (as DM because 47676 is possibly not fixed on M2) - topology.master1.log.info("Update %s on M2" % ENTRY_DN) - mod = [(ldap.MOD_REPLACE, 'description', 'test_add')] - topology.master2.modify_s(ENTRY_DN, mod) - - topology.master1.simple_bind_s(DN_DM, PASSWORD) - loop = 0 - while loop <= 10: - ent = topology.master1.getEntry(ENTRY_DN, ldap.SCOPE_BASE, "(objectclass=*)") - if ent.hasAttr('description') and (ent.getValue('description') == 'test_add'): - break - time.sleep(1) - loop += 1 - - assert ent.getValue('description') == 'test_add' - - -def test_ticket47676_reject_action(topology): - - topology.master1.log.info("\n\n######################### REJECT ACTION ######################\n") - - topology.master1.simple_bind_s(DN_DM, PASSWORD) - topology.master2.simple_bind_s(DN_DM, PASSWORD) - - # make master1 to refuse to push the schema if OC_NAME is present in consumer schema - mod = [(ldap.MOD_ADD, 'schemaUpdateObjectclassReject', '%s' % (OC_NAME))] # ACL + REPL - topology.master1.modify_s(REPL_SCHEMA_POLICY_SUPPLIER, mod) - - # Restart is required to take into account that policy - topology.master1.stop(timeout=10) - topology.master1.start(timeout=10) - - # Add a new OC on M1 so that schema CSN will change and M1 will try to push the schema - topology.master1.log.info("Add %s on M1" % OC2_NAME) - new_oc = _oc_definition(OC2_OID_EXT, OC2_NAME, must=MUST, may=MAY) - topology.master1.schema.add_schema('objectClasses', new_oc) - - # Safety checking that the schema has been updated on M1 - topology.master1.log.info("Check %s is in M1" % OC2_NAME) - ent = topology.master1.getEntry(SCHEMA_DN, ldap.SCOPE_BASE, "(objectclass=*)", ["objectclasses"]) - assert ent.hasAttr('objectclasses') - found = False - for objectclass in ent.getValues('objectclasses'): - if str(objectclass).find(OC2_NAME) >= 0: - found = True - break - assert found - - # Do an update of M1 so that M1 will try to push the schema - topology.master1.log.info("Update %s on M1" % ENTRY_DN) - mod = [(ldap.MOD_REPLACE, 'description', 'test_reject')] - topology.master1.modify_s(ENTRY_DN, mod) - - # Check the replication occured and so also M1 attempted to push the schema - topology.master1.log.info("Check updated %s on M2" % ENTRY_DN) - loop = 0 - while loop <= 10: - ent = topology.master2.getEntry(ENTRY_DN, ldap.SCOPE_BASE, "(objectclass=*)", ['description']) - if ent.hasAttr('description') and ent.getValue('description') == 'test_reject': - # update was replicated - break - time.sleep(2) - loop += 1 - assert loop <= 10 - - # Check that the schema has not been pushed - topology.master1.log.info("Check %s is not in M2" % OC2_NAME) - ent = topology.master2.getEntry(SCHEMA_DN, ldap.SCOPE_BASE, "(objectclass=*)", ["objectclasses"]) - assert ent.hasAttr('objectclasses') - found = False - for objectclass in ent.getValues('objectclasses'): - if str(objectclass).find(OC2_NAME) >= 0: - found = True - break - assert not found - - topology.master1.log.info("\n\n######################### NO MORE REJECT ACTION ######################\n") - - # make master1 to do no specific action on OC_NAME - mod = [(ldap.MOD_DELETE, 'schemaUpdateObjectclassReject', '%s' % (OC_NAME))] # ACL + REPL - topology.master1.modify_s(REPL_SCHEMA_POLICY_SUPPLIER, mod) - - # Restart is required to take into account that policy - topology.master1.stop(timeout=10) - topology.master1.start(timeout=10) - - # Do an update of M1 so that M1 will try to push the schema - topology.master1.log.info("Update %s on M1" % ENTRY_DN) - mod = [(ldap.MOD_REPLACE, 'description', 'test_no_more_reject')] - topology.master1.modify_s(ENTRY_DN, mod) - - # Check the replication occured and so also M1 attempted to push the schema - topology.master1.log.info("Check updated %s on M2" % ENTRY_DN) - loop = 0 - while loop <= 10: - ent = topology.master2.getEntry(ENTRY_DN, ldap.SCOPE_BASE, "(objectclass=*)", ['description']) - if ent.hasAttr('description') and ent.getValue('description') == 'test_no_more_reject': - # update was replicated - break - time.sleep(2) - loop += 1 - assert loop <= 10 - - # Check that the schema has been pushed - topology.master1.log.info("Check %s is in M2" % OC2_NAME) - ent = topology.master2.getEntry(SCHEMA_DN, ldap.SCOPE_BASE, "(objectclass=*)", ["objectclasses"]) - assert ent.hasAttr('objectclasses') - found = False - for objectclass in ent.getValues('objectclasses'): - if str(objectclass).find(OC2_NAME) >= 0: - found = True - break - assert found - - -def test_ticket47676_final(topology): - topology.master1.delete() - topology.master2.delete() - log.info('Testcase PASSED') - - -def run_isolated(): - ''' - run_isolated is used to run these test cases independently of a test scheduler (xunit, py.test..) - To run isolated without py.test, you need to - - edit this file and comment '@pytest.fixture' line before 'topology' function. - - set the installation prefix - - run this program - ''' - global installation1_prefix - global installation2_prefix - installation1_prefix = None - installation2_prefix = None - - topo = topology(True) - topo.master1.log.info("\n\n######################### Ticket 47676 ######################\n") - test_ticket47676_init(topo) - - test_ticket47676_skip_oc_at(topo) - test_ticket47676_reject_action(topo) - - test_ticket47676_final(topo) - - -if __name__ == '__main__': - run_isolated() - diff --git a/dirsrvtests/tickets/ticket47714_test.py b/dirsrvtests/tickets/ticket47714_test.py deleted file mode 100644 index 268ddef..0000000 --- a/dirsrvtests/tickets/ticket47714_test.py +++ /dev/null @@ -1,263 +0,0 @@ -# --- BEGIN COPYRIGHT BLOCK --- -# Copyright (C) 2015 Red Hat, Inc. -# All rights reserved. -# -# License: GPL (version 3 or any later version). -# See LICENSE for details. -# --- END COPYRIGHT BLOCK --- -# -import os -import sys -import time -import ldap -import logging -import pytest -import shutil -from lib389 import DirSrv, Entry, tools -from lib389.tools import DirSrvTools -from lib389._constants import * -from lib389.properties import * - -log = logging.getLogger(__name__) - -installation_prefix = None - -ACCT_POLICY_CONFIG_DN = 'cn=config,cn=%s,cn=plugins,cn=config' % PLUGIN_ACCT_POLICY -ACCT_POLICY_DN = 'cn=Account Inactivation Pplicy,%s' % SUFFIX -INACTIVITY_LIMIT = '9' -SEARCHFILTER = '(objectclass=*)' - -TEST_USER = 'ticket47714user' -TEST_USER_DN = 'uid=%s,%s' % (TEST_USER, SUFFIX) -TEST_USER_PW = '%s' % TEST_USER - - -class TopologyStandalone(object): - def __init__(self, standalone): - standalone.open() - self.standalone = standalone - - -@pytest.fixture(scope="module") -def topology(request): - ''' - This fixture is used to standalone topology for the 'module'. - ''' - global installation_prefix - - if installation_prefix: - args_instance[SER_DEPLOYED_DIR] = installation_prefix - - standalone = DirSrv(verbose=False) - - # Args for the standalone instance - args_instance[SER_HOST] = HOST_STANDALONE - args_instance[SER_PORT] = PORT_STANDALONE - args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE - args_standalone = args_instance.copy() - standalone.allocate(args_standalone) - - # Get the status of the instance and restart it if it exists - instance_standalone = standalone.exists() - - # Remove the instance - if instance_standalone: - standalone.delete() - - # Create the instance - standalone.create() - - # Used to retrieve configuration information (dbdir, confdir...) - standalone.open() - - # clear the tmp directory - standalone.clearTmpDir(__file__) - - # Here we have standalone instance up and running - return TopologyStandalone(standalone) - - -def _header(topology, label): - topology.standalone.log.info("\n\n###############################################") - topology.standalone.log.info("#######") - topology.standalone.log.info("####### %s" % label) - topology.standalone.log.info("#######") - topology.standalone.log.info("###############################################") - - -def test_ticket47714_init(topology): - """ - 1. Add account policy entry to the DB - 2. Add a test user to the DB - """ - _header(topology, 'Testing Ticket 47714 - [RFE] Update lastLoginTime also in Account Policy plugin if account lockout is based on passwordExpirationTime.') - - topology.standalone.simple_bind_s(DN_DM, PASSWORD) - - log.info("\n######################### Adding Account Policy entry: %s ######################\n" % ACCT_POLICY_DN) - topology.standalone.add_s(Entry((ACCT_POLICY_DN, {'objectclass': "top ldapsubentry extensibleObject accountpolicy".split(), - 'accountInactivityLimit': INACTIVITY_LIMIT}))) - - log.info("\n######################### Adding Test User entry: %s ######################\n" % TEST_USER_DN) - topology.standalone.add_s(Entry((TEST_USER_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(), - 'cn': TEST_USER, - 'sn': TEST_USER, - 'givenname': TEST_USER, - 'userPassword': TEST_USER_PW, - 'acctPolicySubentry': ACCT_POLICY_DN}))) - - -def test_ticket47714_run_0(topology): - """ - Check this change has no inpact to the existing functionality. - 1. Set account policy config without the new attr alwaysRecordLoginAttr - 2. Bind as a test user - 3. Bind as the test user again and check the lastLoginTime is updated - 4. Waint longer than the accountInactivityLimit time and bind as the test user, - which should fail with CONSTANT_VIOLATION. - """ - _header(topology, 'Account Policy - No new attr alwaysRecordLoginAttr in config') - - topology.standalone.simple_bind_s(DN_DM, PASSWORD) - - # Modify Account Policy config entry - topology.standalone.modify_s(ACCT_POLICY_CONFIG_DN, [(ldap.MOD_REPLACE, 'alwaysrecordlogin', 'yes'), - (ldap.MOD_REPLACE, 'stateattrname', 'lastLoginTime'), - (ldap.MOD_REPLACE, 'altstateattrname', 'createTimestamp'), - (ldap.MOD_REPLACE, 'specattrname', 'acctPolicySubentry'), - (ldap.MOD_REPLACE, 'limitattrname', 'accountInactivityLimit')]) - - # Enable the plugins - topology.standalone.plugins.enable(name=PLUGIN_ACCT_POLICY) - - topology.standalone.restart(timeout=120) - - log.info("\n######################### Bind as %s ######################\n" % TEST_USER_DN) - try: - topology.standalone.simple_bind_s(TEST_USER_DN, TEST_USER_PW) - except ldap.CONSTRAINT_VIOLATION as e: - log.error('CONSTRAINT VIOLATION ' + e.message['desc']) - - topology.standalone.simple_bind_s(DN_DM, PASSWORD) - entry = topology.standalone.search_s(TEST_USER_DN, ldap.SCOPE_BASE, SEARCHFILTER, ['lastLoginTime']) - - lastLoginTime0 = entry[0].lastLoginTime - - time.sleep(2) - - log.info("\n######################### Bind as %s again ######################\n" % TEST_USER_DN) - try: - topology.standalone.simple_bind_s(TEST_USER_DN, TEST_USER_PW) - except ldap.CONSTRAINT_VIOLATION as e: - log.error('CONSTRAINT VIOLATION ' + e.message['desc']) - - topology.standalone.simple_bind_s(DN_DM, PASSWORD) - entry = topology.standalone.search_s(TEST_USER_DN, ldap.SCOPE_BASE, SEARCHFILTER, ['lastLoginTime']) - - lastLoginTime1 = entry[0].lastLoginTime - - log.info("First lastLoginTime: %s, Second lastLoginTime: %s" % (lastLoginTime0, lastLoginTime1)) - assert lastLoginTime0 < lastLoginTime1 - - topology.standalone.simple_bind_s(DN_DM, PASSWORD) - entry = topology.standalone.search_s(ACCT_POLICY_DN, ldap.SCOPE_BASE, SEARCHFILTER) - log.info("\n######################### %s ######################\n" % ACCT_POLICY_CONFIG_DN) - log.info("accountInactivityLimit: %s" % entry[0].accountInactivityLimit) - log.info("\n######################### %s DONE ######################\n" % ACCT_POLICY_CONFIG_DN) - - time.sleep(10) - - log.info("\n######################### Bind as %s again to fail ######################\n" % TEST_USER_DN) - try: - topology.standalone.simple_bind_s(TEST_USER_DN, TEST_USER_PW) - except ldap.CONSTRAINT_VIOLATION as e: - log.info('CONSTRAINT VIOLATION ' + e.message['desc']) - log.info("%s was successfully inactivated." % TEST_USER_DN) - pass - - -def test_ticket47714_run_1(topology): - """ - Verify a new config attr alwaysRecordLoginAttr - 1. Set account policy config with the new attr alwaysRecordLoginAttr: lastLoginTime - Note: bogus attr is set to stateattrname. - altstateattrname type value is used for checking whether the account is idle or not. - 2. Bind as a test user - 3. Bind as the test user again and check the alwaysRecordLoginAttr: lastLoginTime is updated - """ - _header(topology, 'Account Policy - With new attr alwaysRecordLoginAttr in config') - - topology.standalone.simple_bind_s(DN_DM, PASSWORD) - topology.standalone.modify_s(TEST_USER_DN, [(ldap.MOD_DELETE, 'lastLoginTime', None)]) - - # Modify Account Policy config entry - topology.standalone.modify_s(ACCT_POLICY_CONFIG_DN, [(ldap.MOD_REPLACE, 'alwaysrecordlogin', 'yes'), - (ldap.MOD_REPLACE, 'stateattrname', 'bogus'), - (ldap.MOD_REPLACE, 'altstateattrname', 'modifyTimestamp'), - (ldap.MOD_REPLACE, 'alwaysRecordLoginAttr', 'lastLoginTime'), - (ldap.MOD_REPLACE, 'specattrname', 'acctPolicySubentry'), - (ldap.MOD_REPLACE, 'limitattrname', 'accountInactivityLimit')]) - - # Enable the plugins - topology.standalone.plugins.enable(name=PLUGIN_ACCT_POLICY) - - topology.standalone.restart(timeout=120) - - log.info("\n######################### Bind as %s ######################\n" % TEST_USER_DN) - try: - topology.standalone.simple_bind_s(TEST_USER_DN, TEST_USER_PW) - except ldap.CONSTRAINT_VIOLATION as e: - log.error('CONSTRAINT VIOLATION ' + e.message['desc']) - - time.sleep(1) - - topology.standalone.simple_bind_s(DN_DM, PASSWORD) - entry = topology.standalone.search_s(TEST_USER_DN, ldap.SCOPE_BASE, SEARCHFILTER, ['lastLoginTime']) - lastLoginTime0 = entry[0].lastLoginTime - - log.info("\n######################### Bind as %s again ######################\n" % TEST_USER_DN) - try: - topology.standalone.simple_bind_s(TEST_USER_DN, TEST_USER_PW) - except ldap.CONSTRAINT_VIOLATION as e: - log.error('CONSTRAINT VIOLATION ' + e.message['desc']) - - time.sleep(1) - - topology.standalone.simple_bind_s(DN_DM, PASSWORD) - entry = topology.standalone.search_s(TEST_USER_DN, ldap.SCOPE_BASE, SEARCHFILTER, ['lastLoginTime']) - lastLoginTime1 = entry[0].lastLoginTime - - log.info("First lastLoginTime: %s, Second lastLoginTime: %s" % (lastLoginTime0, lastLoginTime1)) - assert lastLoginTime0 < lastLoginTime1 - - topology.standalone.log.info("ticket47714 was successfully verified.") - - -def test_ticket47714_final(topology): - topology.standalone.delete() - log.info('Testcase PASSED') - - -def run_isolated(): - ''' - run_isolated is used to run these test cases independently of a test scheduler (xunit, py.test..) - To run isolated without py.test, you need to - - edit this file and comment '@pytest.fixture' line before 'topology' function. - - set the installation prefix - - run this program - ''' - global installation_prefix - installation_prefix = None - - topo = topology(True) - test_ticket47714_init(topo) - - test_ticket47714_run_0(topo) - - test_ticket47714_run_1(topo) - - test_ticket47714_final(topo) - - -if __name__ == '__main__': - run_isolated() diff --git a/dirsrvtests/tickets/ticket47721_test.py b/dirsrvtests/tickets/ticket47721_test.py deleted file mode 100644 index 7841423..0000000 --- a/dirsrvtests/tickets/ticket47721_test.py +++ /dev/null @@ -1,468 +0,0 @@ -# --- BEGIN COPYRIGHT BLOCK --- -# Copyright (C) 2015 Red Hat, Inc. -# All rights reserved. -# -# License: GPL (version 3 or any later version). -# See LICENSE for details. -# --- END COPYRIGHT BLOCK --- -# -''' -Created on Nov 7, 2013 - -@author: tbordaz -''' -import os -import sys -import time -import ldap -import logging -import pytest -from lib389 import DirSrv, Entry, tools -from lib389.tools import DirSrvTools -from lib389._constants import * -from lib389.properties import * -from lib389._constants import REPLICAROLE_MASTER - -logging.getLogger(__name__).setLevel(logging.DEBUG) -log = logging.getLogger(__name__) - -# -# important part. We can deploy Master1 and Master2 on different versions -# -installation1_prefix = None -installation2_prefix = None - -SCHEMA_DN = "cn=schema" -TEST_REPL_DN = "cn=test_repl, %s" % SUFFIX -OC_NAME = 'OCticket47721' -OC_OID_EXT = 2 -MUST = "(postalAddress $ postalCode)" -MAY = "(member $ street)" - -OC2_NAME = 'OC2ticket47721' -OC2_OID_EXT = 3 -MUST_2 = "(postalAddress $ postalCode)" -MAY_2 = "(member $ street)" - -REPL_SCHEMA_POLICY_CONSUMER = "cn=consumerUpdatePolicy,cn=replSchema,cn=config" -REPL_SCHEMA_POLICY_SUPPLIER = "cn=supplierUpdatePolicy,cn=replSchema,cn=config" - -OTHER_NAME = 'other_entry' -MAX_OTHERS = 10 - -BIND_NAME = 'bind_entry' -BIND_DN = 'cn=%s, %s' % (BIND_NAME, SUFFIX) -BIND_PW = 'password' - -ENTRY_NAME = 'test_entry' -ENTRY_DN = 'cn=%s, %s' % (ENTRY_NAME, SUFFIX) -ENTRY_OC = "top person %s" % OC_NAME - -BASE_OID = "1.2.3.4.5.6.7.8.9.10" - -SLEEP_INTERVAL = 60 - -def _add_custom_at_definition(name='ATticket47721'): - new_at = "( %s-oid NAME '%s' DESC 'test AT ticket 47721' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 X-ORIGIN ( 'Test 47721' 'user defined' ) )" % (name, name) - return new_at - - -def _chg_std_at_defintion(): - new_at = "( 2.16.840.1.113730.3.1.569 NAME 'cosPriority' DESC 'Netscape defined attribute type' SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 X-ORIGIN 'Netscape Directory Server' )" - return new_at - - -def _add_custom_oc_defintion(name='OCticket47721'): - new_oc = "( %s-oid NAME '%s' DESC 'An group of related automount objects' SUP top STRUCTURAL MUST ou X-ORIGIN 'draft-howard-rfc2307bis' )" % (name, name) - return new_oc - - -def _chg_std_oc_defintion(): - new_oc = "( 5.3.6.1.1.1.2.0 NAME 'trustAccount' DESC 'Sets trust accounts information' SUP top AUXILIARY MUST trustModel MAY ( accessTo $ ou ) X-ORIGIN 'nss_ldap/pam_ldap' )" - return new_oc - - -class TopologyMaster1Master2(object): - def __init__(self, master1, master2): - master1.open() - self.master1 = master1 - - master2.open() - self.master2 = master2 - - -@pytest.fixture(scope="module") -def topology(request): - ''' - This fixture is used to create a replicated topology for the 'module'. - The replicated topology is MASTER1 <-> Master2. - ''' - global installation1_prefix - global installation2_prefix - - # allocate master1 on a given deployement - master1 = DirSrv(verbose=False) - if installation1_prefix: - args_instance[SER_DEPLOYED_DIR] = installation1_prefix - - # Args for the master1 instance - args_instance[SER_HOST] = HOST_MASTER_1 - args_instance[SER_PORT] = PORT_MASTER_1 - args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1 - args_master = args_instance.copy() - master1.allocate(args_master) - - # allocate master1 on a given deployement - master2 = DirSrv(verbose=False) - if installation2_prefix: - args_instance[SER_DEPLOYED_DIR] = installation2_prefix - - # Args for the consumer instance - args_instance[SER_HOST] = HOST_MASTER_2 - args_instance[SER_PORT] = PORT_MASTER_2 - args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_2 - args_master = args_instance.copy() - master2.allocate(args_master) - - # Get the status of the instance and restart it if it exists - instance_master1 = master1.exists() - instance_master2 = master2.exists() - - # Remove all the instances - if instance_master1: - master1.delete() - if instance_master2: - master2.delete() - - # Create the instances - master1.create() - master1.open() - master2.create() - master2.open() - - # - # Now prepare the Master-Consumer topology - # - # First Enable replication - master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_1) - master2.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_2) - - # Initialize the supplier->consumer - - properties = {RA_NAME: r'meTo_$host:$port', - RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], - RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], - RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], - RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} - repl_agreement = master1.agreement.create(suffix=SUFFIX, host=master2.host, port=master2.port, properties=properties) - - if not repl_agreement: - log.fatal("Fail to create a replica agreement") - sys.exit(1) - - log.debug("%s created" % repl_agreement) - - properties = {RA_NAME: r'meTo_$host:$port', - RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], - RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], - RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], - RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} - master2.agreement.create(suffix=SUFFIX, host=master1.host, port=master1.port, properties=properties) - - master1.agreement.init(SUFFIX, HOST_MASTER_2, PORT_MASTER_2) - master1.waitForReplInit(repl_agreement) - - # Check replication is working fine - if master1.testReplication(DEFAULT_SUFFIX, master2): - log.info('Replication is working.') - else: - log.fatal('Replication is not working.') - assert False - - # clear the tmp directory - master1.clearTmpDir(__file__) - - # - # Here we have two instances master and consumer - # with replication working. Either coming from a backup recovery - # or from a fresh (re)init - # Time to return the topology - return TopologyMaster1Master2(master1, master2) - - -def test_ticket47721_init(topology): - """ - It adds - - Objectclass with MAY 'member' - - an entry ('bind_entry') with which we bind to test the 'SELFDN' operation - It deletes the anonymous aci - - """ - - # entry used to bind with - topology.master1.log.info("Add %s" % BIND_DN) - topology.master1.add_s(Entry((BIND_DN, { - 'objectclass': "top person".split(), - 'sn': BIND_NAME, - 'cn': BIND_NAME, - 'userpassword': BIND_PW}))) - - # enable repl error logging - mod = [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', str(8192))] # REPL logging - topology.master1.modify_s(DN_CONFIG, mod) - topology.master2.modify_s(DN_CONFIG, mod) - - # add dummy entries - for cpt in range(MAX_OTHERS): - name = "%s%d" % (OTHER_NAME, cpt) - topology.master1.add_s(Entry(("cn=%s,%s" % (name, SUFFIX), { - 'objectclass': "top person".split(), - 'sn': name, - 'cn': name}))) - - -def test_ticket47721_0(topology): - dn = "cn=%s0,%s" % (OTHER_NAME, SUFFIX) - loop = 0 - ent = None - while loop <= 10: - try: - ent = topology.master2.getEntry(dn, ldap.SCOPE_BASE, "(objectclass=*)") - break - except ldap.NO_SUCH_OBJECT: - time.sleep(1) - loop += 1 - if ent is None: - assert False - - -def test_ticket47721_1(topology): - log.info('Running test 1...') - #topology.master1.log.info("Attach debugger\n\n") - #time.sleep(30) - - new = _add_custom_at_definition() - topology.master1.log.info("Add (M2) %s " % new) - topology.master2.schema.add_schema('attributetypes', new) - - new = _chg_std_at_defintion() - topology.master1.log.info("Chg (M2) %s " % new) - topology.master2.schema.add_schema('attributetypes', new) - - new = _add_custom_oc_defintion() - topology.master1.log.info("Add (M2) %s " % new) - topology.master2.schema.add_schema('objectClasses', new) - - new = _chg_std_oc_defintion() - topology.master1.log.info("Chg (M2) %s " % new) - topology.master2.schema.add_schema('objectClasses', new) - - mod = [(ldap.MOD_REPLACE, 'description', 'Hello world 1')] - dn = "cn=%s0,%s" % (OTHER_NAME, SUFFIX) - topology.master2.modify_s(dn, mod) - - loop = 0 - while loop <= 10: - try: - ent = topology.master1.getEntry(dn, ldap.SCOPE_BASE, "(objectclass=*)") - if ent.hasAttr('description') and (ent.getValue('description') == 'Hello world 1'): - break - except ldap.NO_SUCH_OBJECT: - loop += 1 - time.sleep(1) - assert loop <= 10 - - time.sleep(2) - schema_csn_master1 = topology.master1.schema.get_schema_csn() - schema_csn_master2 = topology.master2.schema.get_schema_csn() - log.debug('Master 1 schemaCSN: %s' % schema_csn_master1) - log.debug('Master 2 schemaCSN: %s' % schema_csn_master2) - - -def test_ticket47721_2(topology): - log.info('Running test 2...') - - mod = [(ldap.MOD_REPLACE, 'description', 'Hello world 2')] - dn = "cn=%s0,%s" % (OTHER_NAME, SUFFIX) - topology.master1.modify_s(dn, mod) - - loop = 0 - while loop <= 10: - try: - ent = topology.master2.getEntry(dn, ldap.SCOPE_BASE, "(objectclass=*)") - if ent.hasAttr('description') and (ent.getValue('description') == 'Hello world 2'): - break - except ldap.NO_SUCH_OBJECT: - loop += 1 - time.sleep(1) - assert loop <= 10 - - time.sleep(2) - schema_csn_master1 = topology.master1.schema.get_schema_csn() - schema_csn_master2 = topology.master2.schema.get_schema_csn() - log.debug('Master 1 schemaCSN: %s' % schema_csn_master1) - log.debug('Master 2 schemaCSN: %s' % schema_csn_master2) - if schema_csn_master1 != schema_csn_master2: - # We need to give the server a little more time, then check it again - log.info('Schema CSNs are not in sync yet: m1 (%s) vs m2 (%s), wait a little...' - % (schema_csn_master1, schema_csn_master2)) - time.sleep(SLEEP_INTERVAL) - schema_csn_master1 = topology.master1.schema.get_schema_csn() - schema_csn_master2 = topology.master2.schema.get_schema_csn() - - assert schema_csn_master1 is not None - assert schema_csn_master1 == schema_csn_master2 - - -def test_ticket47721_3(topology): - ''' - Check that the supplier can update its schema from consumer schema - Update M2 schema, then trigger a replication M1->M2 - ''' - log.info('Running test 3...') - - # stop RA M2->M1, so that M1 can only learn being a supplier - ents = topology.master2.agreement.list(suffix=SUFFIX) - assert len(ents) == 1 - topology.master2.agreement.pause(ents[0].dn) - - new = _add_custom_at_definition('ATtest3') - topology.master1.log.info("Update schema (M2) %s " % new) - topology.master2.schema.add_schema('attributetypes', new) - - new = _add_custom_oc_defintion('OCtest3') - topology.master1.log.info("Update schema (M2) %s " % new) - topology.master2.schema.add_schema('objectClasses', new) - - mod = [(ldap.MOD_REPLACE, 'description', 'Hello world 3')] - dn = "cn=%s0,%s" % (OTHER_NAME, SUFFIX) - topology.master1.modify_s(dn, mod) - - loop = 0 - while loop <= 10: - try: - ent = topology.master2.getEntry(dn, ldap.SCOPE_BASE, "(objectclass=*)") - if ent.hasAttr('description') and (ent.getValue('description') == 'Hello world 3'): - break - except ldap.NO_SUCH_OBJECT: - loop += 1 - time.sleep(1) - assert loop <= 10 - - time.sleep(2) - schema_csn_master1 = topology.master1.schema.get_schema_csn() - schema_csn_master2 = topology.master2.schema.get_schema_csn() - log.debug('Master 1 schemaCSN: %s' % schema_csn_master1) - log.debug('Master 2 schemaCSN: %s' % schema_csn_master2) - if schema_csn_master1 == schema_csn_master2: - # We need to give the server a little more time, then check it again - log.info('Schema CSNs are not in sync yet: m1 (%s) vs m2 (%s), wait a little...' - % (schema_csn_master1, schema_csn_master2)) - time.sleep(SLEEP_INTERVAL) - schema_csn_master1 = topology.master1.schema.get_schema_csn() - schema_csn_master2 = topology.master2.schema.get_schema_csn() - - assert schema_csn_master1 is not None - # schema csn on M2 is larger that on M1. M1 only took the new definitions - assert schema_csn_master1 != schema_csn_master2 - - -def test_ticket47721_4(topology): - ''' - Here M2->M1 agreement is disabled. - with test_ticket47721_3, M1 schema and M2 should be identical BUT - the nsschemacsn is M2>M1. But as the RA M2->M1 is disabled, M1 keeps its schemacsn. - Update schema on M2 (nsschemaCSN update), update M2. Check they have the same schemacsn - ''' - log.info('Running test 4...') - - new = _add_custom_at_definition('ATtest4') - topology.master1.log.info("Update schema (M1) %s " % new) - topology.master1.schema.add_schema('attributetypes', new) - - new = _add_custom_oc_defintion('OCtest4') - topology.master1.log.info("Update schema (M1) %s " % new) - topology.master1.schema.add_schema('objectClasses', new) - - topology.master1.log.info("trigger replication M1->M2: to update the schema") - mod = [(ldap.MOD_REPLACE, 'description', 'Hello world 4')] - dn = "cn=%s0,%s" % (OTHER_NAME, SUFFIX) - topology.master1.modify_s(dn, mod) - - loop = 0 - while loop <= 10: - try: - ent = topology.master2.getEntry(dn, ldap.SCOPE_BASE, "(objectclass=*)") - if ent.hasAttr('description') and (ent.getValue('description') == 'Hello world 4'): - break - except ldap.NO_SUCH_OBJECT: - loop += 1 - time.sleep(1) - assert loop <= 10 - - topology.master1.log.info("trigger replication M1->M2: to push the schema") - mod = [(ldap.MOD_REPLACE, 'description', 'Hello world 5')] - dn = "cn=%s0,%s" % (OTHER_NAME, SUFFIX) - topology.master1.modify_s(dn, mod) - - loop = 0 - while loop <= 10: - try: - ent = topology.master2.getEntry(dn, ldap.SCOPE_BASE, "(objectclass=*)") - if ent.hasAttr('description') and (ent.getValue('description') == 'Hello world 5'): - break - except ldap.NO_SUCH_OBJECT: - loop += 1 - time.sleep(1) - assert loop <= 10 - - time.sleep(2) - schema_csn_master1 = topology.master1.schema.get_schema_csn() - schema_csn_master2 = topology.master2.schema.get_schema_csn() - log.debug('Master 1 schemaCSN: %s' % schema_csn_master1) - log.debug('Master 2 schemaCSN: %s' % schema_csn_master2) - if schema_csn_master1 != schema_csn_master2: - # We need to give the server a little more time, then check it again - log.info('Schema CSNs are incorrectly in sync, wait a little...') - time.sleep(SLEEP_INTERVAL) - schema_csn_master1 = topology.master1.schema.get_schema_csn() - schema_csn_master2 = topology.master2.schema.get_schema_csn() - - assert schema_csn_master1 is not None - assert schema_csn_master1 == schema_csn_master2 - - -def test_ticket47721_final(topology): - topology.master1.delete() - topology.master2.delete() - log.info('Testcase PASSED') - - -def run_isolated(): - ''' - run_isolated is used to run these test cases independently of a test scheduler (xunit, py.test..) - To run isolated without py.test, you need to - - edit this file and comment '@pytest.fixture' line before 'topology' function. - - set the installation prefix - - run this program - ''' - global installation1_prefix - global installation2_prefix - installation1_prefix = None - installation2_prefix = None - - topo = topology(True) - topo.master1.log.info("\n\n######################### Ticket 47721 ######################\n") - test_ticket47721_init(topo) - - test_ticket47721_0(topo) - test_ticket47721_1(topo) - test_ticket47721_2(topo) - test_ticket47721_3(topo) - test_ticket47721_4(topo) - - test_ticket47721_final(topo) - - -if __name__ == '__main__': - run_isolated() diff --git a/dirsrvtests/tickets/ticket47781_test.py b/dirsrvtests/tickets/ticket47781_test.py deleted file mode 100644 index c52612e..0000000 --- a/dirsrvtests/tickets/ticket47781_test.py +++ /dev/null @@ -1,188 +0,0 @@ -# --- BEGIN COPYRIGHT BLOCK --- -# Copyright (C) 2015 Red Hat, Inc. -# All rights reserved. -# -# License: GPL (version 3 or any later version). -# See LICENSE for details. -# --- END COPYRIGHT BLOCK --- -# -import os -import sys -import time -import ldap -import logging -import pytest -from lib389 import DirSrv, Entry, tools, tasks -from lib389.tools import DirSrvTools -from lib389._constants import * -from lib389.properties import * -from lib389.tasks import * - -log = logging.getLogger(__name__) - -installation_prefix = None - - -class TopologyStandalone(object): - def __init__(self, standalone): - standalone.open() - self.standalone = standalone - - -@pytest.fixture(scope="module") -def topology(request): - ''' - This fixture is used to standalone topology for the 'module'. - ''' - global installation_prefix - - if installation_prefix: - args_instance[SER_DEPLOYED_DIR] = installation_prefix - - standalone = DirSrv(verbose=False) - - # Args for the standalone instance - args_instance[SER_HOST] = HOST_STANDALONE - args_instance[SER_PORT] = PORT_STANDALONE - args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE - args_standalone = args_instance.copy() - standalone.allocate(args_standalone) - - # Get the status of the instance and restart it if it exists - instance_standalone = standalone.exists() - - # Remove the instance - if instance_standalone: - standalone.delete() - - # Create the instance - standalone.create() - - # Used to retrieve configuration information (dbdir, confdir...) - standalone.open() - - # clear the tmp directory - standalone.clearTmpDir(__file__) - - # Here we have standalone instance up and running - return TopologyStandalone(standalone) - - -def test_ticket47781(topology): - """ - Testing for a deadlock after doing an online import of an LDIF with - replication data. The replication agreement should be invalid. - """ - - log.info('Testing Ticket 47781 - Testing for deadlock after importing LDIF with replication data') - - # - # Setup Replication - # - log.info('Setting up replication...') - topology.standalone.replica.enableReplication(suffix=DEFAULT_SUFFIX, role=REPLICAROLE_MASTER, - replicaId=REPLICAID_MASTER_1) - - properties = {RA_NAME: r'meTo_$host:$port', - RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], - RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], - RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], - RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} - # The agreement should point to a server that does NOT exist (invalid port) - repl_agreement = topology.standalone.agreement.create(suffix=DEFAULT_SUFFIX, - host=topology.standalone.host, - port=5555, - properties=properties) - - # - # add two entries - # - log.info('Adding two entries...') - try: - topology.standalone.add_s(Entry(('cn=entry1,dc=example,dc=com', { - 'objectclass': 'top person'.split(), - 'sn': 'user', - 'cn': 'entry1'}))) - except ldap.LDAPError as e: - log.error('Failed to add entry 1: ' + e.message['desc']) - assert False - - try: - topology.standalone.add_s(Entry(('cn=entry2,dc=example,dc=com', { - 'objectclass': 'top person'.split(), - 'sn': 'user', - 'cn': 'entry2'}))) - except ldap.LDAPError as e: - log.error('Failed to add entry 2: ' + e.message['desc']) - assert False - - # - # export the replication ldif - # - log.info('Exporting replication ldif...') - args = {EXPORT_REPL_INFO: True} - exportTask = Tasks(topology.standalone) - try: - exportTask.exportLDIF(DEFAULT_SUFFIX, None, "/tmp/export.ldif", args) - except ValueError: - assert False - - # - # Restart the server - # - log.info('Restarting server...') - topology.standalone.stop(timeout=5) - topology.standalone.start(timeout=5) - - # - # Import the ldif - # - log.info('Import replication LDIF file...') - importTask = Tasks(topology.standalone) - args = {TASK_WAIT: True} - try: - importTask.importLDIF(DEFAULT_SUFFIX, None, "/tmp/export.ldif", args) - os.remove("/tmp/export.ldif") - except ValueError: - os.remove("/tmp/export.ldif") - assert False - - # - # Search for tombstones - we should not hang/timeout - # - log.info('Search for tombstone entries(should find one and not hang)...') - topology.standalone.set_option(ldap.OPT_NETWORK_TIMEOUT, 5) - topology.standalone.set_option(ldap.OPT_TIMEOUT, 5) - try: - entries = topology.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, 'objectclass=nsTombstone') - if not entries: - log.fatal('Search failed to find any entries.') - assert PR_False - except ldap.LDAPError as e: - log.fatal('Search failed: ' + e.message['desc']) - assert PR_False - - -def test_ticket47781_final(topology): - topology.standalone.delete() - log.info('Testcase PASSED') - - -def run_isolated(): - ''' - run_isolated is used to run these test cases independently of a test scheduler (xunit, py.test..) - To run isolated without py.test, you need to - - edit this file and comment '@pytest.fixture' line before 'topology' function. - - set the installation prefix - - run this program - ''' - global installation_prefix - installation_prefix = None - - topo = topology(True) - test_ticket47781(topo) - test_ticket47781_final(topo) - - -if __name__ == '__main__': - run_isolated() diff --git a/dirsrvtests/tickets/ticket47787_test.py b/dirsrvtests/tickets/ticket47787_test.py deleted file mode 100644 index 305ec75..0000000 --- a/dirsrvtests/tickets/ticket47787_test.py +++ /dev/null @@ -1,561 +0,0 @@ -# --- BEGIN COPYRIGHT BLOCK --- -# Copyright (C) 2015 Red Hat, Inc. -# All rights reserved. -# -# License: GPL (version 3 or any later version). -# See LICENSE for details. -# --- END COPYRIGHT BLOCK --- -# -''' -Created on April 14, 2014 - -@author: tbordaz -''' -import os -import sys -import time -import ldap -import logging -import pytest -import re -from lib389 import DirSrv, Entry, tools, NoSuchEntryError -from lib389.tools import DirSrvTools -from lib389._constants import * -from lib389.properties import * -from lib389._constants import REPLICAROLE_MASTER - -logging.getLogger(__name__).setLevel(logging.DEBUG) -log = logging.getLogger(__name__) - -# -# important part. We can deploy Master1 and Master2 on different versions -# -installation1_prefix = None -installation2_prefix = None - -# set this flag to False so that it will assert on failure _status_entry_both_server -DEBUG_FLAG = False - -TEST_REPL_DN = "cn=test_repl, %s" % SUFFIX - -STAGING_CN = "staged user" -PRODUCTION_CN = "accounts" -EXCEPT_CN = "excepts" - -STAGING_DN = "cn=%s,%s" % (STAGING_CN, SUFFIX) -PRODUCTION_DN = "cn=%s,%s" % (PRODUCTION_CN, SUFFIX) -PROD_EXCEPT_DN = "cn=%s,%s" % (EXCEPT_CN, PRODUCTION_DN) - -STAGING_PATTERN = "cn=%s*,%s" % (STAGING_CN[:2], SUFFIX) -PRODUCTION_PATTERN = "cn=%s*,%s" % (PRODUCTION_CN[:2], SUFFIX) -BAD_STAGING_PATTERN = "cn=bad*,%s" % (SUFFIX) -BAD_PRODUCTION_PATTERN = "cn=bad*,%s" % (SUFFIX) - -BIND_CN = "bind_entry" -BIND_DN = "cn=%s,%s" % (BIND_CN, SUFFIX) -BIND_PW = "password" - -NEW_ACCOUNT = "new_account" -MAX_ACCOUNTS = 20 - -CONFIG_MODDN_ACI_ATTR = "nsslapd-moddn-aci" - - -class TopologyMaster1Master2(object): - def __init__(self, master1, master2): - master1.open() - self.master1 = master1 - - master2.open() - self.master2 = master2 - - -@pytest.fixture(scope="module") -def topology(request): - ''' - This fixture is used to create a replicated topology for the 'module'. - The replicated topology is MASTER1 <-> Master2. - ''' - global installation1_prefix - global installation2_prefix - - # allocate master1 on a given deployement - master1 = DirSrv(verbose=False) - if installation1_prefix: - args_instance[SER_DEPLOYED_DIR] = installation1_prefix - - # Args for the master1 instance - args_instance[SER_HOST] = HOST_MASTER_1 - args_instance[SER_PORT] = PORT_MASTER_1 - args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1 - args_master = args_instance.copy() - master1.allocate(args_master) - - # allocate master1 on a given deployement - master2 = DirSrv(verbose=False) - if installation2_prefix: - args_instance[SER_DEPLOYED_DIR] = installation2_prefix - - # Args for the consumer instance - args_instance[SER_HOST] = HOST_MASTER_2 - args_instance[SER_PORT] = PORT_MASTER_2 - args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_2 - args_master = args_instance.copy() - master2.allocate(args_master) - - # Get the status of the instance and restart it if it exists - instance_master1 = master1.exists() - instance_master2 = master2.exists() - - # Remove all the instances - if instance_master1: - master1.delete() - if instance_master2: - master2.delete() - - # Create the instances - master1.create() - master1.open() - master2.create() - master2.open() - - # - # Now prepare the Master-Consumer topology - # - # First Enable replication - master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_1) - master2.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_2) - - # Initialize the supplier->consumer - - properties = {RA_NAME: r'meTo_$host:$port', - RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], - RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], - RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], - RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} - repl_agreement = master1.agreement.create(suffix=SUFFIX, host=master2.host, port=master2.port, properties=properties) - - if not repl_agreement: - log.fatal("Fail to create a replica agreement") - sys.exit(1) - - log.debug("%s created" % repl_agreement) - - properties = {RA_NAME: r'meTo_$host:$port', - RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], - RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], - RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], - RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} - master2.agreement.create(suffix=SUFFIX, host=master1.host, port=master1.port, properties=properties) - - master1.agreement.init(SUFFIX, HOST_MASTER_2, PORT_MASTER_2) - master1.waitForReplInit(repl_agreement) - - # Check replication is working fine - if master1.testReplication(DEFAULT_SUFFIX, master2): - log.info('Replication is working.') - else: - log.fatal('Replication is not working.') - assert False - - # clear the tmp directory - master1.clearTmpDir(__file__) - - # Here we have two instances master and consumer - # with replication working. - return TopologyMaster1Master2(master1, master2) - - -def _bind_manager(server): - server.log.info("Bind as %s " % DN_DM) - server.simple_bind_s(DN_DM, PASSWORD) - - -def _bind_normal(server): - server.log.info("Bind as %s " % BIND_DN) - server.simple_bind_s(BIND_DN, BIND_PW) - - -def _header(topology, label): - topology.master1.log.info("\n\n###############################################") - topology.master1.log.info("#######") - topology.master1.log.info("####### %s" % label) - topology.master1.log.info("#######") - topology.master1.log.info("###############################################") - - -def _status_entry_both_server(topology, name=None, desc=None, debug=True): - if not name: - return - topology.master1.log.info("\n\n######################### Tombstone on M1 ######################\n") - attr = 'description' - found = False - attempt = 0 - while not found and attempt < 10: - ent_m1 = _find_tombstone(topology.master1, SUFFIX, 'sn', name) - if attr in ent_m1.getAttrs(): - found = True - else: - time.sleep(1) - attempt = attempt + 1 - assert ent_m1 - - topology.master1.log.info("\n\n######################### Tombstone on M2 ######################\n") - ent_m2 = _find_tombstone(topology.master2, SUFFIX, 'sn', name) - assert ent_m2 - - topology.master1.log.info("\n\n######################### Description ######################\n%s\n" % desc) - topology.master1.log.info("M1 only\n") - for attr in ent_m1.getAttrs(): - - if not debug: - assert attr in ent_m2.getAttrs() - - if not attr in ent_m2.getAttrs(): - topology.master1.log.info(" %s" % attr) - for val in ent_m1.getValues(attr): - topology.master1.log.info(" %s" % val) - - topology.master1.log.info("M2 only\n") - for attr in ent_m2.getAttrs(): - - if not debug: - assert attr in ent_m1.getAttrs() - - if not attr in ent_m1.getAttrs(): - topology.master1.log.info(" %s" % attr) - for val in ent_m2.getValues(attr): - topology.master1.log.info(" %s" % val) - - topology.master1.log.info("M1 differs M2\n") - - if not debug: - assert ent_m1.dn == ent_m2.dn - - if ent_m1.dn != ent_m2.dn: - topology.master1.log.info(" M1[dn] = %s\n M2[dn] = %s" % (ent_m1.dn, ent_m2.dn)) - - for attr1 in ent_m1.getAttrs(): - if attr1 in ent_m2.getAttrs(): - for val1 in ent_m1.getValues(attr1): - found = False - for val2 in ent_m2.getValues(attr1): - if val1 == val2: - found = True - break - - if not debug: - assert found - - if not found: - topology.master1.log.info(" M1[%s] = %s" % (attr1, val1)) - - for attr2 in ent_m2.getAttrs(): - if attr2 in ent_m1.getAttrs(): - for val2 in ent_m2.getValues(attr2): - found = False - for val1 in ent_m1.getValues(attr2): - if val2 == val1: - found = True - break - - if not debug: - assert found - - if not found: - topology.master1.log.info(" M2[%s] = %s" % (attr2, val2)) - - -def _pause_RAs(topology): - topology.master1.log.info("\n\n######################### Pause RA M1<->M2 ######################\n") - ents = topology.master1.agreement.list(suffix=SUFFIX) - assert len(ents) == 1 - topology.master1.agreement.pause(ents[0].dn) - - ents = topology.master2.agreement.list(suffix=SUFFIX) - assert len(ents) == 1 - topology.master2.agreement.pause(ents[0].dn) - - -def _resume_RAs(topology): - topology.master1.log.info("\n\n######################### resume RA M1<->M2 ######################\n") - ents = topology.master1.agreement.list(suffix=SUFFIX) - assert len(ents) == 1 - topology.master1.agreement.resume(ents[0].dn) - - ents = topology.master2.agreement.list(suffix=SUFFIX) - assert len(ents) == 1 - topology.master2.agreement.resume(ents[0].dn) - - -def _find_tombstone(instance, base, attr, value): - # - # we can not use a filter with a (&(objeclass=nsTombstone)(sn=name)) because - # tombstone are not index in 'sn' so 'sn=name' will return NULL - # and even if tombstone are indexed for objectclass the '&' will set - # the candidate list to NULL - # - filt = '(objectclass=%s)' % REPLICA_OC_TOMBSTONE - ents = instance.search_s(base, ldap.SCOPE_SUBTREE, filt) - #found = False - for ent in ents: - if ent.hasAttr(attr): - for val in ent.getValues(attr): - if val == value: - instance.log.debug("tombstone found: %r" % ent) - return ent - return None - - -def _delete_entry(instance, entry_dn, name): - instance.log.info("\n\n######################### DELETE %s (M1) ######################\n" % name) - - # delete the entry - instance.delete_s(entry_dn) - assert _find_tombstone(instance, SUFFIX, 'sn', name) is not None - - -def _mod_entry(instance, entry_dn, attr, value): - instance.log.info("\n\n######################### MOD %s (M2) ######################\n" % entry_dn) - mod = [(ldap.MOD_REPLACE, attr, value)] - instance.modify_s(entry_dn, mod) - - -def _modrdn_entry(instance=None, entry_dn=None, new_rdn=None, del_old=0, new_superior=None): - assert instance is not None - assert entry_dn is not None - - if not new_rdn: - pattern = 'cn=(.*),(.*)' - rdnre = re.compile(pattern) - match = rdnre.match(entry_dn) - old_value = match.group(1) - new_rdn_val = "%s_modrdn" % old_value - new_rdn = "cn=%s" % new_rdn_val - - instance.log.info("\n\n######################### MODRDN %s (M2) ######################\n" % new_rdn) - if new_superior: - instance.rename_s(entry_dn, new_rdn, newsuperior=new_superior, delold=del_old) - else: - instance.rename_s(entry_dn, new_rdn, delold=del_old) - - -def _check_entry_exists(instance, entry_dn): - loop = 0 - ent = None - while loop <= 10: - try: - ent = instance.getEntry(entry_dn, ldap.SCOPE_BASE, "(objectclass=*)") - break - except ldap.NO_SUCH_OBJECT: - time.sleep(1) - loop += 1 - if ent is None: - assert False - - -def _check_mod_received(instance, base, filt, attr, value): - instance.log.info("\n\n######################### Check MOD replicated on %s ######################\n" % instance.serverid) - loop = 0 - while loop <= 10: - ent = instance.getEntry(base, ldap.SCOPE_SUBTREE, filt) - if ent.hasAttr(attr) and ent.getValue(attr) == value: - break - time.sleep(1) - loop += 1 - assert loop <= 10 - - -def _check_replication(topology, entry_dn): - # prepare the filter to retrieve the entry - filt = entry_dn.split(',')[0] - - topology.master1.log.info("\n######################### Check replicat M1->M2 ######################\n") - loop = 0 - while loop <= 10: - attr = 'description' - value = 'test_value_%d' % loop - mod = [(ldap.MOD_REPLACE, attr, value)] - topology.master1.modify_s(entry_dn, mod) - _check_mod_received(topology.master2, SUFFIX, filt, attr, value) - loop += 1 - - topology.master1.log.info("\n######################### Check replicat M2->M1 ######################\n") - loop = 0 - while loop <= 10: - attr = 'description' - value = 'test_value_%d' % loop - mod = [(ldap.MOD_REPLACE, attr, value)] - topology.master2.modify_s(entry_dn, mod) - _check_mod_received(topology.master1, SUFFIX, filt, attr, value) - loop += 1 - - -def test_ticket47787_init(topology): - """ - Creates - - a staging DIT - - a production DIT - - add accounts in staging DIT - - """ - - topology.master1.log.info("\n\n######################### INITIALIZATION ######################\n") - - # entry used to bind with - topology.master1.log.info("Add %s" % BIND_DN) - topology.master1.add_s(Entry((BIND_DN, { - 'objectclass': "top person".split(), - 'sn': BIND_CN, - 'cn': BIND_CN, - 'userpassword': BIND_PW}))) - - # DIT for staging - topology.master1.log.info("Add %s" % STAGING_DN) - topology.master1.add_s(Entry((STAGING_DN, { - 'objectclass': "top organizationalRole".split(), - 'cn': STAGING_CN, - 'description': "staging DIT"}))) - - # DIT for production - topology.master1.log.info("Add %s" % PRODUCTION_DN) - topology.master1.add_s(Entry((PRODUCTION_DN, { - 'objectclass': "top organizationalRole".split(), - 'cn': PRODUCTION_CN, - 'description': "production DIT"}))) - - # enable replication error logging - mod = [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', '8192')] - topology.master1.modify_s(DN_CONFIG, mod) - topology.master2.modify_s(DN_CONFIG, mod) - - # add dummy entries in the staging DIT - for cpt in range(MAX_ACCOUNTS): - name = "%s%d" % (NEW_ACCOUNT, cpt) - topology.master1.add_s(Entry(("cn=%s,%s" % (name, STAGING_DN), { - 'objectclass': "top person".split(), - 'sn': name, - 'cn': name}))) - - -def test_ticket47787_2(topology): - ''' - Disable replication so that updates are not replicated - Delete an entry on M1. Modrdn it on M2 (chg rdn + delold=0 + same superior). - update a test entry on M2 - Reenable the RA. - checks that entry was deleted on M2 (with the modified RDN) - checks that test entry was replicated on M1 (replication M2->M1 not broken by modrdn) - ''' - - _header(topology, "test_ticket47787_2") - _bind_manager(topology.master1) - _bind_manager(topology.master2) - - #entry to test the replication is still working - name = "%s%d" % (NEW_ACCOUNT, MAX_ACCOUNTS - 1) - test_rdn = "cn=%s" % (name) - testentry_dn = "%s,%s" % (test_rdn, STAGING_DN) - - name = "%s%d" % (NEW_ACCOUNT, MAX_ACCOUNTS - 2) - test2_rdn = "cn=%s" % (name) - testentry2_dn = "%s,%s" % (test2_rdn, STAGING_DN) - - # value of updates to test the replication both ways - attr = 'description' - value = 'test_ticket47787_2' - - # entry for the modrdn - name = "%s%d" % (NEW_ACCOUNT, 1) - rdn = "cn=%s" % (name) - entry_dn = "%s,%s" % (rdn, STAGING_DN) - - # created on M1, wait the entry exists on M2 - _check_entry_exists(topology.master2, entry_dn) - _check_entry_exists(topology.master2, testentry_dn) - - _pause_RAs(topology) - - # Delete 'entry_dn' on M1. - # dummy update is only have a first CSN before the DEL - # else the DEL will be in min_csn RUV and make diagnostic a bit more complex - _mod_entry(topology.master1, testentry2_dn, attr, 'dummy') - _delete_entry(topology.master1, entry_dn, name) - _mod_entry(topology.master1, testentry2_dn, attr, value) - - time.sleep(1) # important to have MOD.csn != DEL.csn - - # MOD 'entry_dn' on M1. - # dummy update is only have a first CSN before the MOD entry_dn - # else the DEL will be in min_csn RUV and make diagnostic a bit more complex - _mod_entry(topology.master2, testentry_dn, attr, 'dummy') - _mod_entry(topology.master2, entry_dn, attr, value) - _mod_entry(topology.master2, testentry_dn, attr, value) - - _resume_RAs(topology) - - topology.master1.log.info("\n\n######################### Check DEL replicated on M2 ######################\n") - loop = 0 - while loop <= 10: - ent = _find_tombstone(topology.master2, SUFFIX, 'sn', name) - if ent: - break - time.sleep(1) - loop += 1 - assert loop <= 10 - assert ent - - # the following checks are not necessary - # as this bug is only for failing replicated MOD (entry_dn) on M1 - #_check_mod_received(topology.master1, SUFFIX, "(%s)" % (test_rdn), attr, value) - #_check_mod_received(topology.master2, SUFFIX, "(%s)" % (test2_rdn), attr, value) - # - #_check_replication(topology, testentry_dn) - - _status_entry_both_server(topology, name=name, desc="DEL M1 - MOD M2", debug=DEBUG_FLAG) - - topology.master1.log.info("\n\n######################### Check MOD replicated on M1 ######################\n") - loop = 0 - while loop <= 10: - ent = _find_tombstone(topology.master1, SUFFIX, 'sn', name) - if ent: - break - time.sleep(1) - loop += 1 - assert loop <= 10 - assert ent - assert ent.hasAttr(attr) - assert ent.getValue(attr) == value - - -def test_ticket47787_final(topology): - topology.master1.delete() - topology.master2.delete() - log.info('Testcase PASSED') - - -def run_isolated(): - ''' - run_isolated is used to run these test cases independently of a test scheduler (xunit, py.test..) - To run isolated without py.test, you need to - - edit this file and comment '@pytest.fixture' line before 'topology' function. - - set the installation prefix - - run this program - ''' - global installation1_prefix - global installation2_prefix - installation1_prefix = None - installation2_prefix = None - - topo = topology(True) - topo.master1.log.info("\n\n######################### Ticket 47787 ######################\n") - test_ticket47787_init(topo) - - test_ticket47787_2(topo) - - test_ticket47787_final(topo) - - -if __name__ == '__main__': - run_isolated() - diff --git a/dirsrvtests/tickets/ticket47808_test.py b/dirsrvtests/tickets/ticket47808_test.py deleted file mode 100644 index 4254c8c..0000000 --- a/dirsrvtests/tickets/ticket47808_test.py +++ /dev/null @@ -1,166 +0,0 @@ -# --- BEGIN COPYRIGHT BLOCK --- -# Copyright (C) 2015 Red Hat, Inc. -# All rights reserved. -# -# License: GPL (version 3 or any later version). -# See LICENSE for details. -# --- END COPYRIGHT BLOCK --- -# -import os -import sys -import time -import ldap -import logging -import pytest -from lib389 import DirSrv, Entry, tools -from lib389.tools import DirSrvTools -from lib389._constants import * -from lib389.properties import * - -log = logging.getLogger(__name__) - -installation_prefix = None - -ATTRIBUTE_UNIQUENESS_PLUGIN = 'cn=attribute uniqueness,cn=plugins,cn=config' -ENTRY_NAME = 'test_entry' - - -class TopologyStandalone(object): - def __init__(self, standalone): - standalone.open() - self.standalone = standalone - - -@pytest.fixture(scope="module") -def topology(request): - ''' - This fixture is used to standalone topology for the 'module'. - ''' - global installation_prefix - - if installation_prefix: - args_instance[SER_DEPLOYED_DIR] = installation_prefix - - standalone = DirSrv(verbose=True) - - # Args for the standalone instance - args_instance[SER_HOST] = HOST_STANDALONE - args_instance[SER_PORT] = PORT_STANDALONE - args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE - args_standalone = args_instance.copy() - standalone.allocate(args_standalone) - - # Get the status of the instance and restart it if it exists - instance_standalone = standalone.exists() - - # Remove the instance - if instance_standalone: - standalone.delete() - - # Create the instance - standalone.create() - - # Used to retrieve configuration information (dbdir, confdir...) - standalone.open() - - # clear the tmp directory - standalone.clearTmpDir(__file__) - - # Here we have standalone instance up and running - return TopologyStandalone(standalone) - - -def test_ticket47808_run(topology): - """ - It enables attribute uniqueness plugin with sn as a unique attribute - Add an entry 1 with sn = ENTRY_NAME - Add an entry 2 with sn = ENTRY_NAME - If the second add does not crash the server and the following search found none, - the bug is fixed. - """ - - # bind as directory manager - topology.standalone.log.info("Bind as %s" % DN_DM) - topology.standalone.simple_bind_s(DN_DM, PASSWORD) - - topology.standalone.log.info("\n\n######################### SETUP ATTR UNIQ PLUGIN ######################\n") - - # enable attribute uniqueness plugin - mod = [(ldap.MOD_REPLACE, 'nsslapd-pluginEnabled', 'on'), (ldap.MOD_REPLACE, 'nsslapd-pluginarg0', 'sn'), (ldap.MOD_REPLACE, 'nsslapd-pluginarg1', SUFFIX)] - topology.standalone.modify_s(ATTRIBUTE_UNIQUENESS_PLUGIN, mod) - - topology.standalone.log.info("\n\n######################### ADD USER 1 ######################\n") - - # Prepare entry 1 - entry_name = '%s 1' % (ENTRY_NAME) - entry_dn_1 = 'cn=%s, %s' % (entry_name, SUFFIX) - entry_1 = Entry(entry_dn_1) - entry_1.setValues('objectclass', 'top', 'person') - entry_1.setValues('sn', ENTRY_NAME) - entry_1.setValues('cn', entry_name) - topology.standalone.log.info("Try to add Add %s: %r" % (entry_1, entry_1)) - topology.standalone.add_s(entry_1) - - topology.standalone.log.info("\n\n######################### Restart Server ######################\n") - topology.standalone.stop(timeout=10) - topology.standalone.start(timeout=10) - - topology.standalone.log.info("\n\n######################### ADD USER 2 ######################\n") - - # Prepare entry 2 having the same sn, which crashes the server - entry_name = '%s 2' % (ENTRY_NAME) - entry_dn_2 = 'cn=%s, %s' % (entry_name, SUFFIX) - entry_2 = Entry(entry_dn_2) - entry_2.setValues('objectclass', 'top', 'person') - entry_2.setValues('sn', ENTRY_NAME) - entry_2.setValues('cn', entry_name) - topology.standalone.log.info("Try to add Add %s: %r" % (entry_2, entry_2)) - try: - topology.standalone.add_s(entry_2) - except: - topology.standalone.log.warn("Adding %s failed" % entry_dn_2) - pass - - topology.standalone.log.info("\n\n######################### IS SERVER UP? ######################\n") - ents = topology.standalone.search_s(entry_dn_1, ldap.SCOPE_BASE, '(objectclass=*)') - assert len(ents) == 1 - topology.standalone.log.info("Yes, it's up.") - - topology.standalone.log.info("\n\n######################### CHECK USER 2 NOT ADDED ######################\n") - topology.standalone.log.info("Try to search %s" % entry_dn_2) - try: - ents = topology.standalone.search_s(entry_dn_2, ldap.SCOPE_BASE, '(objectclass=*)') - except ldap.NO_SUCH_OBJECT: - topology.standalone.log.info("Found none") - - topology.standalone.log.info("\n\n######################### DELETE USER 1 ######################\n") - - topology.standalone.log.info("Try to delete %s " % entry_dn_1) - topology.standalone.delete_s(entry_dn_1) - - -def test_ticket47808_final(topology): - topology.standalone.delete() - log.info('Testcase PASSED') - - -def run_isolated(): - ''' - run_isolated is used to run these test cases independently of a test scheduler (xunit, py.test..) - To run isolated without py.test, you need to - - edit this file and comment '@pytest.fixture' line before 'topology' function. - - set the installation prefix - - run this program - ''' - global installation_prefix - installation_prefix = None - - topo = topology(True) - test_ticket47808_run(topo) - - test_ticket47808_final(topo) - - -if __name__ == '__main__': - run_isolated() - diff --git a/dirsrvtests/tickets/ticket47815_test.py b/dirsrvtests/tickets/ticket47815_test.py deleted file mode 100644 index 675e97b..0000000 --- a/dirsrvtests/tickets/ticket47815_test.py +++ /dev/null @@ -1,179 +0,0 @@ -# --- BEGIN COPYRIGHT BLOCK --- -# Copyright (C) 2015 Red Hat, Inc. -# All rights reserved. -# -# License: GPL (version 3 or any later version). -# See LICENSE for details. -# --- END COPYRIGHT BLOCK --- -# -import os -import sys -import time -import ldap -import logging -import pytest -from lib389 import DirSrv, Entry, tools -from lib389.tools import DirSrvTools -from lib389._constants import * -from lib389.properties import * - -log = logging.getLogger(__name__) - -installation_prefix = None - - -class TopologyStandalone(object): - def __init__(self, standalone): - standalone.open() - self.standalone = standalone - - -@pytest.fixture(scope="module") -def topology(request): - ''' - This fixture is used to standalone topology for the 'module'. - ''' - global installation_prefix - - if installation_prefix: - args_instance[SER_DEPLOYED_DIR] = installation_prefix - - standalone = DirSrv(verbose=False) - - # Args for the standalone instance - args_instance[SER_HOST] = HOST_STANDALONE - args_instance[SER_PORT] = PORT_STANDALONE - args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE - args_standalone = args_instance.copy() - standalone.allocate(args_standalone) - - # Get the status of the instance and restart it if it exists - instance_standalone = standalone.exists() - - # Remove the instance - if instance_standalone: - standalone.delete() - - # Create the instance - standalone.create() - - # Used to retrieve configuration information (dbdir, confdir...) - standalone.open() - - # clear the tmp directory - standalone.clearTmpDir(__file__) - - # Here we have standalone instance up and running - return TopologyStandalone(standalone) - - -def test_ticket47815(topology): - """ - Test betxn plugins reject an invalid option, and make sure that the rejected entry - is not in the entry cache. - - Enable memberOf, automember, and retrocl plugins - Add the automember config entry - Add the automember group - Add a user that will be rejected by a betxn plugin - result error 53 - Attempt the same add again, and it should result in another error 53 (not error 68) - """ - result = 0 - result2 = 0 - - log.info('Testing Ticket 47815 - Add entries that should be rejected by the betxn plugins, and are not left in the entry cache') - - # Enabled the plugins - topology.standalone.plugins.enable(name=PLUGIN_MEMBER_OF) - topology.standalone.plugins.enable(name=PLUGIN_AUTOMEMBER) - topology.standalone.plugins.enable(name=PLUGIN_RETRO_CHANGELOG) - - # configure automember config entry - log.info('Adding automember config') - try: - topology.standalone.add_s(Entry(('cn=group cfg,cn=Auto Membership Plugin,cn=plugins,cn=config', { - 'objectclass': 'top autoMemberDefinition'.split(), - 'autoMemberScope': 'dc=example,dc=com', - 'autoMemberFilter': 'cn=user', - 'autoMemberDefaultGroup': 'cn=group,dc=example,dc=com', - 'autoMemberGroupingAttr': 'member:dn', - 'cn': 'group cfg'}))) - except: - log.error('Failed to add automember config') - exit(1) - - topology.standalone.stop(timeout=120) - time.sleep(1) - topology.standalone.start(timeout=120) - time.sleep(3) - - # need to reopen a connection toward the instance - topology.standalone.open() - - # add automember group - log.info('Adding automember group') - try: - topology.standalone.add_s(Entry(('cn=group,dc=example,dc=com', { - 'objectclass': 'top groupOfNames'.split(), - 'cn': 'group'}))) - except: - log.error('Failed to add automember group') - exit(1) - - # add user that should result in an error 53 - log.info('Adding invalid entry') - - try: - topology.standalone.add_s(Entry(('cn=user,dc=example,dc=com', { - 'objectclass': 'top person'.split(), - 'sn': 'user', - 'cn': 'user'}))) - except ldap.UNWILLING_TO_PERFORM: - log.debug('Adding invalid entry failed as expected') - result = 53 - except ldap.LDAPError as e: - log.error('Unexpected result ' + e.message['desc']) - assert False - if result == 0: - log.error('Add operation unexpectedly succeeded') - assert False - - # Attempt to add user again, should result in error 53 again - try: - topology.standalone.add_s(Entry(('cn=user,dc=example,dc=com', { - 'objectclass': 'top person'.split(), - 'sn': 'user', - 'cn': 'user'}))) - except ldap.UNWILLING_TO_PERFORM: - log.debug('2nd add of invalid entry failed as expected') - result2 = 53 - except ldap.LDAPError as e: - log.error('Unexpected result ' + e.message['desc']) - assert False - if result2 == 0: - log.error('2nd Add operation unexpectedly succeeded') - assert False - - -def test_ticket47815_final(topology): - topology.standalone.delete() - log.info('Testcase PASSED') - - -def run_isolated(): - ''' - run_isolated is used to run these test cases independently of a test scheduler (xunit, py.test..) - To run isolated without py.test, you need to - - edit this file and comment '@pytest.fixture' line before 'topology' function. - - set the installation prefix - - run this program - ''' - global installation_prefix - installation_prefix = None - - topo = topology(True) - test_ticket47815(topo) - test_ticket47815_final(topo) - -if __name__ == '__main__': - run_isolated() diff --git a/dirsrvtests/tickets/ticket47819_test.py b/dirsrvtests/tickets/ticket47819_test.py deleted file mode 100644 index 435b36c..0000000 --- a/dirsrvtests/tickets/ticket47819_test.py +++ /dev/null @@ -1,296 +0,0 @@ -# --- BEGIN COPYRIGHT BLOCK --- -# Copyright (C) 2015 Red Hat, Inc. -# All rights reserved. -# -# License: GPL (version 3 or any later version). -# See LICENSE for details. -# --- END COPYRIGHT BLOCK --- -# -import os -import sys -import time -import ldap -import logging -import pytest -from lib389 import DirSrv, Entry, tools, tasks -from lib389.tools import DirSrvTools -from lib389._constants import * -from lib389.properties import * -from lib389.tasks import * - -log = logging.getLogger(__name__) - -installation_prefix = None - - -class TopologyStandalone(object): - def __init__(self, standalone): - standalone.open() - self.standalone = standalone - - -@pytest.fixture(scope="module") -def topology(request): - ''' - This fixture is used to standalone topology for the 'module'. - ''' - global installation_prefix - - if installation_prefix: - args_instance[SER_DEPLOYED_DIR] = installation_prefix - - standalone = DirSrv(verbose=False) - - # Args for the standalone instance - args_instance[SER_HOST] = HOST_STANDALONE - args_instance[SER_PORT] = PORT_STANDALONE - args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE - args_standalone = args_instance.copy() - standalone.allocate(args_standalone) - - # Get the status of the instance and restart it if it exists - instance_standalone = standalone.exists() - - # Remove the instance - if instance_standalone: - standalone.delete() - - # Create the instance - standalone.create() - - # Used to retrieve configuration information (dbdir, confdir...) - standalone.open() - - # clear the tmp directory - standalone.clearTmpDir(__file__) - - # Here we have standalone instance up and running - return TopologyStandalone(standalone) - - -def test_ticket47819(topology): - """ - Testing precise tombstone purging: - [1] Make sure "nsTombstoneCSN" is added to new tombstones - [2] Make sure an import of a replication ldif adds "nsTombstoneCSN" - to old tombstones - [4] Test fixup task - [3] Make sure tombstone purging works - """ - - log.info('Testing Ticket 47819 - Test precise tombstone purging') - - # - # Setup Replication - # - log.info('Setting up replication...') - topology.standalone.replica.enableReplication(suffix=DEFAULT_SUFFIX, role=REPLICAROLE_MASTER, - replicaId=REPLICAID_MASTER_1) - - # - # Part 1 create a tombstone entry and make sure nsTombstoneCSN is added - # - log.info('Part 1: Add and then delete an entry to create a tombstone...') - - try: - topology.standalone.add_s(Entry(('cn=entry1,dc=example,dc=com', { - 'objectclass': 'top person'.split(), - 'sn': 'user', - 'cn': 'entry1'}))) - except ldap.LDAPError as e: - log.error('Failed to add entry: ' + e.message['desc']) - assert False - - try: - topology.standalone.delete_s('cn=entry1,dc=example,dc=com') - except ldap.LDAPError as e: - log.error('Failed to delete entry: ' + e.message['desc']) - assert False - - log.info('Search for tombstone entries...') - try: - entries = topology.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, - '(&(nsTombstoneCSN=*)(objectclass=nsTombstone))') - if not entries: - log.fatal('Search failed to the new tombstone(nsTombstoneCSN is probably missing).') - assert False - except ldap.LDAPError as e: - log.fatal('Search failed: ' + e.message['desc']) - assert False - - log.info('Part 1 - passed') - - # - # Part 2 - import ldif with tombstones missing 'nsTombstoneCSN' - # - # First, export the replication ldif, edit the file(remove nstombstonecsn), - # and reimport it. - # - log.info('Part 2: Exporting replication ldif...') - - # Get the the full path and name for our LDIF we will be exporting - ldif_file = topology.standalone.getDir(__file__, TMP_DIR) + "export.ldif" - - args = {EXPORT_REPL_INFO: True, - TASK_WAIT: True} - exportTask = Tasks(topology.standalone) - try: - exportTask.exportLDIF(DEFAULT_SUFFIX, None, ldif_file, args) - except ValueError: - assert False - - # open the ldif file, get the lines, then rewrite the file - ldif = open(ldif_file, "r") - lines = ldif.readlines() - ldif.close() - - ldif = open(ldif_file, "w") - for line in lines: - if not line.lower().startswith('nstombstonecsn'): - ldif.write(line) - ldif.close() - - # import the new ldif file - log.info('Import replication LDIF file...') - importTask = Tasks(topology.standalone) - args = {TASK_WAIT: True} - try: - importTask.importLDIF(DEFAULT_SUFFIX, None, ldif_file, args) - os.remove(ldif_file) - except ValueError: - os.remove(ldif_file) - assert False - - # Search for the tombstone again - log.info('Search for tombstone entries...') - try: - entries = topology.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, - '(&(nsTombstoneCSN=*)(objectclass=nsTombstone))') - if not entries: - log.fatal('Search failed to fine the new tombstone(nsTombstoneCSN is probably missing).') - assert False - except ldap.LDAPError as e: - log.fatal('Search failed: ' + e.message['desc']) - assert False - - log.info('Part 2 - passed') - - # - # Part 3 - test fixup task - # - log.info('Part 4: test the fixup task') - - # Run fixup task using the strip option. This removes nsTombstoneCSN - # so we can test if the fixup task works. - args = {TASK_WAIT: True, - TASK_TOMB_STRIP: True} - fixupTombTask = Tasks(topology.standalone) - try: - fixupTombTask.fixupTombstones(DEFAULT_BENAME, args) - except: - assert False - - # Search for tombstones with nsTombstoneCSN - better not find any - log.info('Search for tombstone entries...') - try: - entries = topology.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, - '(&(nsTombstoneCSN=*)(objectclass=nsTombstone))') - if entries: - log.fatal('Search found tombstones with nsTombstoneCSN') - assert False - except ldap.LDAPError as e: - log.fatal('Search failed: ' + e.message['desc']) - assert False - - # Now run the fixup task - args = {TASK_WAIT: True} - fixupTombTask = Tasks(topology.standalone) - try: - fixupTombTask.fixupTombstones(DEFAULT_BENAME, args) - except: - assert False - - # Search for tombstones with nsTombstoneCSN - better find some - log.info('Search for tombstone entries...') - try: - entries = topology.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, - '(&(nsTombstoneCSN=*)(objectclass=nsTombstone))') - if not entries: - log.fatal('Search did not find any fixed-up tombstones') - assert False - except ldap.LDAPError as e: - log.fatal('Search failed: ' + e.message['desc']) - assert False - - log.info('Part 3 - passed') - - # - # Part 4 - Test tombstone purging - # - log.info('Part 4: test tombstone purging...') - - args = {REPLICA_PRECISE_PURGING: 'on', - REPLICA_PURGE_DELAY: '5', - REPLICA_PURGE_INTERVAL: '5'} - try: - topology.standalone.replica.setProperties(DEFAULT_SUFFIX, None, None, args) - except: - log.fatal('Failed to configure replica') - assert False - - # Wait for the interval to pass - log.info('Wait for tombstone purge interval to pass...') - time.sleep(6) - - # Add an entry to trigger replication - log.info('Perform an update to help trigger tombstone purging...') - try: - topology.standalone.add_s(Entry(('cn=test_entry,dc=example,dc=com', { - 'objectclass': 'top person'.split(), - 'sn': 'user', - 'cn': 'entry1'}))) - except ldap.LDAPError as e: - log.error('Failed to add entry: ' + e.message['desc']) - assert False - - # Wait for the interval to pass again - log.info('Wait for tombstone purge interval to pass again...') - time.sleep(10) - - # search for tombstones, there should be none - log.info('Search for tombstone entries...') - try: - entries = topology.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, - '(&(nsTombstoneCSN=*)(objectclass=nsTombstone))') - if entries: - log.fatal('Search unexpectedly found tombstones') - assert False - except ldap.LDAPError as e: - log.fatal('Search failed: ' + e.message['desc']) - assert False - - log.info('Part 4 - passed') - - -def test_ticket47819_final(topology): - topology.standalone.delete() - log.info('Testcase PASSED') - - -def run_isolated(): - ''' - run_isolated is used to run these test cases independently of a test scheduler (xunit, py.test..) - To run isolated without py.test, you need to - - edit this file and comment '@pytest.fixture' line before 'topology' function. - - set the installation prefix - - run this program - ''' - global installation_prefix - installation_prefix = None - - topo = topology(True) - test_ticket47819(topo) - test_ticket47819_final(topo) - -if __name__ == '__main__': - run_isolated() \ No newline at end of file diff --git a/dirsrvtests/tickets/ticket47823_test.py b/dirsrvtests/tickets/ticket47823_test.py deleted file mode 100644 index 635827c..0000000 --- a/dirsrvtests/tickets/ticket47823_test.py +++ /dev/null @@ -1,1021 +0,0 @@ -# --- BEGIN COPYRIGHT BLOCK --- -# Copyright (C) 2015 Red Hat, Inc. -# All rights reserved. -# -# License: GPL (version 3 or any later version). -# See LICENSE for details. -# --- END COPYRIGHT BLOCK --- -# -import os -import sys -import time -import ldap -import logging -import pytest -import re -import shutil -from lib389 import DirSrv, Entry, tools -from lib389.tools import DirSrvTools -from lib389._constants import * -from lib389.properties import * - - -log = logging.getLogger(__name__) - -installation_prefix = None - -PROVISIONING_CN = "provisioning" -PROVISIONING_DN = "cn=%s,%s" % (PROVISIONING_CN, SUFFIX) - -ACTIVE_CN = "accounts" -STAGE_CN = "staged users" -DELETE_CN = "deleted users" -ACTIVE_DN = "cn=%s,%s" % (ACTIVE_CN, SUFFIX) -STAGE_DN = "cn=%s,%s" % (STAGE_CN, PROVISIONING_DN) -DELETE_DN = "cn=%s,%s" % (DELETE_CN, PROVISIONING_DN) - -STAGE_USER_CN = "stage guy" -STAGE_USER_DN = "cn=%s,%s" % (STAGE_USER_CN, STAGE_DN) - -ACTIVE_USER_CN = "active guy" -ACTIVE_USER_DN = "cn=%s,%s" % (ACTIVE_USER_CN, ACTIVE_DN) - -ACTIVE_USER_1_CN = "test_1" -ACTIVE_USER_1_DN = "cn=%s,%s" % (ACTIVE_USER_1_CN, ACTIVE_DN) -ACTIVE_USER_2_CN = "test_2" -ACTIVE_USER_2_DN = "cn=%s,%s" % (ACTIVE_USER_2_CN, ACTIVE_DN) - -STAGE_USER_1_CN = ACTIVE_USER_1_CN -STAGE_USER_1_DN = "cn=%s,%s" % (STAGE_USER_1_CN, STAGE_DN) -STAGE_USER_2_CN = ACTIVE_USER_2_CN -STAGE_USER_2_DN = "cn=%s,%s" % (STAGE_USER_2_CN, STAGE_DN) - -ALL_CONFIG_ATTRS = ['nsslapd-pluginarg0', 'nsslapd-pluginarg1', 'nsslapd-pluginarg2', - 'uniqueness-attribute-name', 'uniqueness-subtrees', 'uniqueness-across-all-subtrees'] - - -class TopologyStandalone(object): - def __init__(self, standalone): - standalone.open() - self.standalone = standalone - - -@pytest.fixture(scope="module") -def topology(request): - ''' - This fixture is used to standalone topology for the 'module'. - ''' - global installation_prefix - - - - standalone = DirSrv(verbose=False) - if installation_prefix: - args_instance[SER_DEPLOYED_DIR] = installation_prefix - # Args for the standalone instance - args_instance[SER_HOST] = HOST_STANDALONE - args_instance[SER_PORT] = PORT_STANDALONE - args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE - args_standalone = args_instance.copy() - standalone.allocate(args_standalone) - - # Get the status of the instance and restart it if it exists - instance_standalone = standalone.exists() - - # Remove the instance - if instance_standalone: - standalone.delete() - - # Create the instance - standalone.create() - - # Used to retrieve configuration information (dbdir, confdir...) - standalone.open() - - # clear the tmp directory - standalone.clearTmpDir(__file__) - - # Here we have standalone instance up and running - return TopologyStandalone(standalone) - - -def _header(topology, label): - topology.standalone.log.info("\n\n###############################################") - topology.standalone.log.info("#######") - topology.standalone.log.info("####### %s" % label) - topology.standalone.log.info("#######") - topology.standalone.log.info("###############################################") - - -def _uniqueness_config_entry(topology, name=None): - if not name: - return None - - ent = topology.standalone.getEntry("cn=%s,%s" % (PLUGIN_ATTR_UNIQUENESS, DN_PLUGIN), ldap.SCOPE_BASE, - "(objectclass=nsSlapdPlugin)", - ['objectClass', 'cn', 'nsslapd-pluginPath', 'nsslapd-pluginInitfunc', - 'nsslapd-pluginType', 'nsslapd-pluginEnabled', 'nsslapd-plugin-depends-on-type', - 'nsslapd-pluginId', 'nsslapd-pluginVersion', 'nsslapd-pluginVendor', - 'nsslapd-pluginDescription']) - ent.dn = "cn=%s uniqueness,%s" % (name, DN_PLUGIN) - return ent - - -def _build_config(topology, attr_name='cn', subtree_1=None, subtree_2=None, type_config='old', across_subtrees=False): - assert topology - assert attr_name - assert subtree_1 - - if type_config == 'old': - # enable the 'cn' uniqueness on Active - config = _uniqueness_config_entry(topology, attr_name) - config.setValue('nsslapd-pluginarg0', attr_name) - config.setValue('nsslapd-pluginarg1', subtree_1) - if subtree_2: - config.setValue('nsslapd-pluginarg2', subtree_2) - else: - # prepare the config entry - config = _uniqueness_config_entry(topology, attr_name) - config.setValue('uniqueness-attribute-name', attr_name) - config.setValue('uniqueness-subtrees', subtree_1) - if subtree_2: - config.setValue('uniqueness-subtrees', subtree_2) - if across_subtrees: - config.setValue('uniqueness-across-all-subtrees', 'on') - return config - - -def _active_container_invalid_cfg_add(topology): - ''' - Check uniqueness is not enforced with ADD (invalid config) - ''' - topology.standalone.add_s(Entry((ACTIVE_USER_1_DN, { - 'objectclass': "top person".split(), - 'sn': ACTIVE_USER_1_CN, - 'cn': ACTIVE_USER_1_CN}))) - - topology.standalone.add_s(Entry((ACTIVE_USER_2_DN, { - 'objectclass': "top person".split(), - 'sn': ACTIVE_USER_2_CN, - 'cn': [ACTIVE_USER_1_CN, ACTIVE_USER_2_CN]}))) - - topology.standalone.delete_s(ACTIVE_USER_1_DN) - topology.standalone.delete_s(ACTIVE_USER_2_DN) - - -def _active_container_add(topology, type_config='old'): - ''' - Check uniqueness in a single container (Active) - Add an entry with a given 'cn', then check we can not add an entry with the same 'cn' value - - ''' - config = _build_config(topology, attr_name='cn', subtree_1=ACTIVE_DN, subtree_2=None, type_config=type_config, across_subtrees=False) - - # remove the 'cn' uniqueness entry - try: - topology.standalone.delete_s(config.dn) - - except ldap.NO_SUCH_OBJECT: - pass - topology.standalone.restart(timeout=120) - - topology.standalone.log.info('Uniqueness not enforced: create the entries') - - topology.standalone.add_s(Entry((ACTIVE_USER_1_DN, { - 'objectclass': "top person".split(), - 'sn': ACTIVE_USER_1_CN, - 'cn': ACTIVE_USER_1_CN}))) - - topology.standalone.add_s(Entry((ACTIVE_USER_2_DN, { - 'objectclass': "top person".split(), - 'sn': ACTIVE_USER_2_CN, - 'cn': [ACTIVE_USER_1_CN, ACTIVE_USER_2_CN]}))) - - topology.standalone.delete_s(ACTIVE_USER_1_DN) - topology.standalone.delete_s(ACTIVE_USER_2_DN) - - - topology.standalone.log.info('Uniqueness enforced: checks second entry is rejected') - - # enable the 'cn' uniqueness on Active - topology.standalone.add_s(config) - topology.standalone.restart(timeout=120) - topology.standalone.add_s(Entry((ACTIVE_USER_1_DN, { - 'objectclass': "top person".split(), - 'sn': ACTIVE_USER_1_CN, - 'cn': ACTIVE_USER_1_CN}))) - - try: - topology.standalone.add_s(Entry((ACTIVE_USER_2_DN, { - 'objectclass': "top person".split(), - 'sn': ACTIVE_USER_2_CN, - 'cn': [ACTIVE_USER_1_CN, ACTIVE_USER_2_CN]}))) - except ldap.CONSTRAINT_VIOLATION: - # yes it is expected - pass - - # cleanup the stuff now - topology.standalone.delete_s(config.dn) - topology.standalone.delete_s(ACTIVE_USER_1_DN) - - -def _active_container_mod(topology, type_config='old'): - ''' - Check uniqueness in a single container (active) - Add and entry with a given 'cn', then check we can not modify an entry with the same 'cn' value - - ''' - - config = _build_config(topology, attr_name='cn', subtree_1=ACTIVE_DN, subtree_2=None, type_config=type_config, across_subtrees=False) - - # enable the 'cn' uniqueness on Active - topology.standalone.add_s(config) - topology.standalone.restart(timeout=120) - - topology.standalone.log.info('Uniqueness enforced: checks MOD ADD entry is rejected') - topology.standalone.add_s(Entry((ACTIVE_USER_1_DN, { - 'objectclass': "top person".split(), - 'sn': ACTIVE_USER_1_CN, - 'cn': ACTIVE_USER_1_CN}))) - - topology.standalone.add_s(Entry((ACTIVE_USER_2_DN, { - 'objectclass': "top person".split(), - 'sn': ACTIVE_USER_2_CN, - 'cn': ACTIVE_USER_2_CN}))) - - try: - topology.standalone.modify_s(ACTIVE_USER_2_DN, [(ldap.MOD_ADD, 'cn', ACTIVE_USER_1_CN)]) - except ldap.CONSTRAINT_VIOLATION: - # yes it is expected - pass - - topology.standalone.log.info('Uniqueness enforced: checks MOD REPLACE entry is rejected') - try: - topology.standalone.modify_s(ACTIVE_USER_2_DN, [(ldap.MOD_REPLACE, 'cn', [ACTIVE_USER_1_CN, ACTIVE_USER_2_CN])]) - except ldap.CONSTRAINT_VIOLATION: - # yes it is expected - pass - - # cleanup the stuff now - topology.standalone.delete_s(config.dn) - topology.standalone.delete_s(ACTIVE_USER_1_DN) - topology.standalone.delete_s(ACTIVE_USER_2_DN) - - -def _active_container_modrdn(topology, type_config='old'): - ''' - Check uniqueness in a single container - Add and entry with a given 'cn', then check we can not modrdn an entry with the same 'cn' value - - ''' - config = _build_config(topology, attr_name='cn', subtree_1=ACTIVE_DN, subtree_2=None, type_config=type_config, across_subtrees=False) - - # enable the 'cn' uniqueness on Active - topology.standalone.add_s(config) - topology.standalone.restart(timeout=120) - - topology.standalone.log.info('Uniqueness enforced: checks MODRDN entry is rejected') - - topology.standalone.add_s(Entry((ACTIVE_USER_1_DN, { - 'objectclass': "top person".split(), - 'sn': ACTIVE_USER_1_CN, - 'cn': [ACTIVE_USER_1_CN, 'dummy']}))) - - topology.standalone.add_s(Entry((ACTIVE_USER_2_DN, { - 'objectclass': "top person".split(), - 'sn': ACTIVE_USER_2_CN, - 'cn': ACTIVE_USER_2_CN}))) - - try: - topology.standalone.rename_s(ACTIVE_USER_2_DN, 'cn=dummy', delold=0) - except ldap.CONSTRAINT_VIOLATION: - # yes it is expected - pass - - # cleanup the stuff now - topology.standalone.delete_s(config.dn) - topology.standalone.delete_s(ACTIVE_USER_1_DN) - topology.standalone.delete_s(ACTIVE_USER_2_DN) - - -def _active_stage_containers_add(topology, type_config='old', across_subtrees=False): - ''' - Check uniqueness in several containers - Add an entry on a container with a given 'cn' - with across_subtrees=False check we CAN add an entry with the same 'cn' value on the other container - with across_subtrees=True check we CAN NOT add an entry with the same 'cn' value on the other container - - ''' - config = _build_config(topology, attr_name='cn', subtree_1=ACTIVE_DN, subtree_2=STAGE_DN, type_config=type_config, across_subtrees=False) - - topology.standalone.add_s(config) - topology.standalone.restart(timeout=120) - topology.standalone.add_s(Entry((ACTIVE_USER_1_DN, { - 'objectclass': "top person".split(), - 'sn': ACTIVE_USER_1_CN, - 'cn': ACTIVE_USER_1_CN}))) - try: - - # adding an entry on a separated contains with the same 'cn' - topology.standalone.add_s(Entry((STAGE_USER_1_DN, { - 'objectclass': "top person".split(), - 'sn': STAGE_USER_1_CN, - 'cn': ACTIVE_USER_1_CN}))) - except ldap.CONSTRAINT_VIOLATION: - assert across_subtrees - - # cleanup the stuff now - topology.standalone.delete_s(config.dn) - topology.standalone.delete_s(ACTIVE_USER_1_DN) - topology.standalone.delete_s(STAGE_USER_1_DN) - - -def _active_stage_containers_mod(topology, type_config='old', across_subtrees=False): - ''' - Check uniqueness in a several containers - Add an entry on a container with a given 'cn', then check we CAN mod an entry with the same 'cn' value on the other container - - ''' - config = _build_config(topology, attr_name='cn', subtree_1=ACTIVE_DN, subtree_2=STAGE_DN, type_config=type_config, across_subtrees=False) - - topology.standalone.add_s(config) - topology.standalone.restart(timeout=120) - # adding an entry on active with a different 'cn' - topology.standalone.add_s(Entry((ACTIVE_USER_1_DN, { - 'objectclass': "top person".split(), - 'sn': ACTIVE_USER_1_CN, - 'cn': ACTIVE_USER_2_CN}))) - - # adding an entry on a stage with a different 'cn' - topology.standalone.add_s(Entry((STAGE_USER_1_DN, { - 'objectclass': "top person".split(), - 'sn': STAGE_USER_1_CN, - 'cn': STAGE_USER_1_CN}))) - - try: - - # modify add same value - topology.standalone.modify_s(STAGE_USER_1_DN, [(ldap.MOD_ADD, 'cn', [ACTIVE_USER_2_CN])]) - except ldap.CONSTRAINT_VIOLATION: - assert across_subtrees - - topology.standalone.delete_s(STAGE_USER_1_DN) - topology.standalone.add_s(Entry((STAGE_USER_1_DN, { - 'objectclass': "top person".split(), - 'sn': STAGE_USER_1_CN, - 'cn': STAGE_USER_2_CN}))) - try: - # modify replace same value - topology.standalone.modify_s(STAGE_USER_1_DN, [(ldap.MOD_REPLACE, 'cn', [STAGE_USER_2_CN, ACTIVE_USER_1_CN])]) - except ldap.CONSTRAINT_VIOLATION: - assert across_subtrees - - # cleanup the stuff now - topology.standalone.delete_s(config.dn) - topology.standalone.delete_s(ACTIVE_USER_1_DN) - topology.standalone.delete_s(STAGE_USER_1_DN) - - -def _active_stage_containers_modrdn(topology, type_config='old', across_subtrees=False): - ''' - Check uniqueness in a several containers - Add and entry with a given 'cn', then check we CAN modrdn an entry with the same 'cn' value on the other container - - ''' - - config = _build_config(topology, attr_name='cn', subtree_1=ACTIVE_DN, subtree_2=STAGE_DN, type_config=type_config, across_subtrees=False) - - # enable the 'cn' uniqueness on Active and Stage - topology.standalone.add_s(config) - topology.standalone.restart(timeout=120) - topology.standalone.add_s(Entry((ACTIVE_USER_1_DN, { - 'objectclass': "top person".split(), - 'sn': ACTIVE_USER_1_CN, - 'cn': [ACTIVE_USER_1_CN, 'dummy']}))) - - topology.standalone.add_s(Entry((STAGE_USER_1_DN, { - 'objectclass': "top person".split(), - 'sn': STAGE_USER_1_CN, - 'cn': STAGE_USER_1_CN}))) - - try: - - topology.standalone.rename_s(STAGE_USER_1_DN, 'cn=dummy', delold=0) - - # check stage entry has 'cn=dummy' - stage_ent = topology.standalone.getEntry("cn=dummy,%s" % (STAGE_DN), ldap.SCOPE_BASE, "objectclass=*", ['cn']) - assert stage_ent.hasAttr('cn') - found = False - for value in stage_ent.getValues('cn'): - if value == 'dummy': - found = True - assert found - - # check active entry has 'cn=dummy' - active_ent = topology.standalone.getEntry(ACTIVE_USER_1_DN, ldap.SCOPE_BASE, "objectclass=*", ['cn']) - assert active_ent.hasAttr('cn') - found = False - for value in stage_ent.getValues('cn'): - if value == 'dummy': - found = True - assert found - - topology.standalone.delete_s("cn=dummy,%s" % (STAGE_DN)) - except ldap.CONSTRAINT_VIOLATION: - assert across_subtrees - topology.standalone.delete_s(STAGE_USER_1_DN) - - # cleanup the stuff now - topology.standalone.delete_s(config.dn) - topology.standalone.delete_s(ACTIVE_USER_1_DN) - - -def _config_file(topology, action='save'): - dse_ldif = topology.standalone.confdir + '/dse.ldif' - sav_file = topology.standalone.confdir + '/dse.ldif.ticket47823' - if action == 'save': - shutil.copy(dse_ldif, sav_file) - else: - shutil.copy(sav_file, dse_ldif) - - -def _pattern_errorlog(file, log_pattern): - try: - _pattern_errorlog.last_pos += 1 - except AttributeError: - _pattern_errorlog.last_pos = 0 - - found = None - log.debug("_pattern_errorlog: start at offset %d" % _pattern_errorlog.last_pos) - file.seek(_pattern_errorlog.last_pos) - - # Use a while true iteration because 'for line in file: hit a - # python bug that break file.tell() - while True: - line = file.readline() - log.debug("_pattern_errorlog: [%d] %s" % (file.tell(), line)) - found = log_pattern.search(line) - if ((line == '') or (found)): - break - - log.debug("_pattern_errorlog: end at offset %d" % file.tell()) - _pattern_errorlog.last_pos = file.tell() - return found - - -def test_ticket47823_init(topology): - """ - - """ - - # Enabled the plugins - topology.standalone.plugins.enable(name=PLUGIN_ATTR_UNIQUENESS) - topology.standalone.restart(timeout=120) - - topology.standalone.add_s(Entry((PROVISIONING_DN, {'objectclass': "top nscontainer".split(), - 'cn': PROVISIONING_CN}))) - topology.standalone.add_s(Entry((ACTIVE_DN, {'objectclass': "top nscontainer".split(), - 'cn': ACTIVE_CN}))) - topology.standalone.add_s(Entry((STAGE_DN, {'objectclass': "top nscontainer".split(), - 'cn': STAGE_CN}))) - topology.standalone.add_s(Entry((DELETE_DN, {'objectclass': "top nscontainer".split(), - 'cn': DELETE_CN}))) - topology.standalone.errorlog_file = open(topology.standalone.errlog, "r") - - topology.standalone.stop(timeout=120) - time.sleep(1) - topology.standalone.start(timeout=120) - time.sleep(3) - - -def test_ticket47823_one_container_add(topology): - ''' - Check uniqueness in a single container - Add and entry with a given 'cn', then check we can not add an entry with the same 'cn' value - - ''' - _header(topology, "With former config (args), check attribute uniqueness with 'cn' (ADD) ") - - _active_container_add(topology, type_config='old') - - _header(topology, "With new config (args), check attribute uniqueness with 'cn' (ADD) ") - - _active_container_add(topology, type_config='new') - - -def test_ticket47823_one_container_mod(topology): - ''' - Check uniqueness in a single container - Add and entry with a given 'cn', then check we can not modify an entry with the same 'cn' value - - ''' - _header(topology, "With former config (args), check attribute uniqueness with 'cn' (MOD)") - - _active_container_mod(topology, type_config='old') - - _header(topology, "With new config (args), check attribute uniqueness with 'cn' (MOD)") - - _active_container_mod(topology, type_config='new') - - -def test_ticket47823_one_container_modrdn(topology): - ''' - Check uniqueness in a single container - Add and entry with a given 'cn', then check we can not modrdn an entry with the same 'cn' value - - ''' - _header(topology, "With former config (args), check attribute uniqueness with 'cn' (MODRDN)") - - _active_container_modrdn(topology, type_config='old') - - _header(topology, "With former config (args), check attribute uniqueness with 'cn' (MODRDN)") - - _active_container_modrdn(topology, type_config='new') - - -def test_ticket47823_multi_containers_add(topology): - ''' - Check uniqueness in a several containers - Add and entry with a given 'cn', then check we can not add an entry with the same 'cn' value - - ''' - _header(topology, "With former config (args), check attribute uniqueness with 'cn' (ADD) ") - - _active_stage_containers_add(topology, type_config='old', across_subtrees=False) - - _header(topology, "With new config (args), check attribute uniqueness with 'cn' (ADD) ") - - _active_stage_containers_add(topology, type_config='new', across_subtrees=False) - - -def test_ticket47823_multi_containers_mod(topology): - ''' - Check uniqueness in a several containers - Add an entry on a container with a given 'cn', then check we CAN mod an entry with the same 'cn' value on the other container - - ''' - _header(topology, "With former config (args), check attribute uniqueness with 'cn' (MOD) on separated container") - - topology.standalone.log.info('Uniqueness not enforced: if same \'cn\' modified (add/replace) on separated containers') - _active_stage_containers_mod(topology, type_config='old', across_subtrees=False) - - _header(topology, "With new config (args), check attribute uniqueness with 'cn' (MOD) on separated container") - - topology.standalone.log.info('Uniqueness not enforced: if same \'cn\' modified (add/replace) on separated containers') - _active_stage_containers_mod(topology, type_config='new', across_subtrees=False) - - -def test_ticket47823_multi_containers_modrdn(topology): - ''' - Check uniqueness in a several containers - Add and entry with a given 'cn', then check we CAN modrdn an entry with the same 'cn' value on the other container - - ''' - _header(topology, "With former config (args), check attribute uniqueness with 'cn' (MODRDN) on separated containers") - - topology.standalone.log.info('Uniqueness not enforced: checks MODRDN entry is accepted on separated containers') - _active_stage_containers_modrdn(topology, type_config='old', across_subtrees=False) - - topology.standalone.log.info('Uniqueness not enforced: checks MODRDN entry is accepted on separated containers') - _active_stage_containers_modrdn(topology, type_config='old') - - -def test_ticket47823_across_multi_containers_add(topology): - ''' - Check uniqueness across several containers, uniquely with the new configuration - Add and entry with a given 'cn', then check we can not add an entry with the same 'cn' value - - ''' - _header(topology, "With new config (args), check attribute uniqueness with 'cn' (ADD) across several containers") - - _active_stage_containers_add(topology, type_config='old', across_subtrees=True) - - -def test_ticket47823_across_multi_containers_mod(topology): - ''' - Check uniqueness across several containers, uniquely with the new configuration - Add and entry with a given 'cn', then check we can not modifiy an entry with the same 'cn' value - - ''' - _header(topology, "With new config (args), check attribute uniqueness with 'cn' (MOD) across several containers") - - _active_stage_containers_mod(topology, type_config='old', across_subtrees=True) - - -def test_ticket47823_across_multi_containers_modrdn(topology): - ''' - Check uniqueness across several containers, uniquely with the new configuration - Add and entry with a given 'cn', then check we can not modrdn an entry with the same 'cn' value - - ''' - _header(topology, "With new config (args), check attribute uniqueness with 'cn' (MODRDN) across several containers") - - _active_stage_containers_modrdn(topology, type_config='old', across_subtrees=True) - - -def test_ticket47823_invalid_config_1(topology): - ''' - Check that an invalid config is detected. No uniqueness enforced - Using old config: arg0 is missing - ''' - _header(topology, "Invalid config (old): arg0 is missing") - - _config_file(topology, action='save') - - # create an invalid config without arg0 - config = _build_config(topology, attr_name='cn', subtree_1=ACTIVE_DN, subtree_2=None, type_config='old', across_subtrees=False) - - del config.data['nsslapd-pluginarg0'] - # replace 'cn' uniqueness entry - try: - topology.standalone.delete_s(config.dn) - - except ldap.NO_SUCH_OBJECT: - pass - topology.standalone.add_s(config) - - topology.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS) - - # Check the server did not restart - topology.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', '65536')]) - try: - topology.standalone.restart(timeout=5) - ent = topology.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS) - if ent: - # be sure to restore a valid config before assert - _config_file(topology, action='restore') - assert not ent - except ldap.SERVER_DOWN: - pass - - # Check the expected error message - regex = re.compile("Config fail: unable to parse old style") - res = _pattern_errorlog(topology.standalone.errorlog_file, regex) - if not res: - # be sure to restore a valid config before assert - _config_file(topology, action='restore') - assert res - - # Check we can restart the server - _config_file(topology, action='restore') - topology.standalone.start(timeout=5) - try: - topology.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS) - except ldap.NO_SUCH_OBJECT: - pass - - -def test_ticket47823_invalid_config_2(topology): - ''' - Check that an invalid config is detected. No uniqueness enforced - Using old config: arg1 is missing - ''' - _header(topology, "Invalid config (old): arg1 is missing") - - _config_file(topology, action='save') - - # create an invalid config without arg0 - config = _build_config(topology, attr_name='cn', subtree_1=ACTIVE_DN, subtree_2=None, type_config='old', across_subtrees=False) - - del config.data['nsslapd-pluginarg1'] - # replace 'cn' uniqueness entry - try: - topology.standalone.delete_s(config.dn) - - except ldap.NO_SUCH_OBJECT: - pass - topology.standalone.add_s(config) - - topology.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS) - - # Check the server did not restart - try: - topology.standalone.restart(timeout=5) - ent = topology.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS) - if ent: - # be sure to restore a valid config before assert - _config_file(topology, action='restore') - assert not ent - except ldap.SERVER_DOWN: - pass - - # Check the expected error message - regex = re.compile("Config info: No valid subtree is defined") - res = _pattern_errorlog(topology.standalone.errorlog_file, regex) - if not res: - # be sure to restore a valid config before assert - _config_file(topology, action='restore') - assert res - - # Check we can restart the server - _config_file(topology, action='restore') - topology.standalone.start(timeout=5) - try: - topology.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS) - except ldap.NO_SUCH_OBJECT: - pass - - -def test_ticket47823_invalid_config_3(topology): - ''' - Check that an invalid config is detected. No uniqueness enforced - Using old config: arg0 is missing - ''' - _header(topology, "Invalid config (old): arg0 is missing but new config attrname exists") - - _config_file(topology, action='save') - - # create an invalid config without arg0 - config = _build_config(topology, attr_name='cn', subtree_1=ACTIVE_DN, subtree_2=None, type_config='old', across_subtrees=False) - - del config.data['nsslapd-pluginarg0'] - config.data['uniqueness-attribute-name'] = 'cn' - # replace 'cn' uniqueness entry - try: - topology.standalone.delete_s(config.dn) - - except ldap.NO_SUCH_OBJECT: - pass - topology.standalone.add_s(config) - - topology.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS) - - # Check the server did not restart - topology.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', '65536')]) - try: - topology.standalone.restart(timeout=5) - ent = topology.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS) - if ent: - # be sure to restore a valid config before assert - _config_file(topology, action='restore') - assert not ent - except ldap.SERVER_DOWN: - pass - - # Check the expected error message - regex = re.compile("Config fail: unable to parse old style") - res = _pattern_errorlog(topology.standalone.errorlog_file, regex) - if not res: - # be sure to restore a valid config before assert - _config_file(topology, action='restore') - assert res - - # Check we can restart the server - _config_file(topology, action='restore') - topology.standalone.start(timeout=5) - try: - topology.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS) - except ldap.NO_SUCH_OBJECT: - pass - - -def test_ticket47823_invalid_config_4(topology): - ''' - Check that an invalid config is detected. No uniqueness enforced - Using old config: arg1 is missing - ''' - _header(topology, "Invalid config (old): arg1 is missing but new config exist") - - _config_file(topology, action='save') - - # create an invalid config without arg0 - config = _build_config(topology, attr_name='cn', subtree_1=ACTIVE_DN, subtree_2=None, type_config='old', across_subtrees=False) - - del config.data['nsslapd-pluginarg1'] - config.data['uniqueness-subtrees'] = ACTIVE_DN - # replace 'cn' uniqueness entry - try: - topology.standalone.delete_s(config.dn) - - except ldap.NO_SUCH_OBJECT: - pass - topology.standalone.add_s(config) - - topology.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS) - - # Check the server did not restart - try: - topology.standalone.restart(timeout=5) - ent = topology.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS) - if ent: - # be sure to restore a valid config before assert - _config_file(topology, action='restore') - assert not ent - except ldap.SERVER_DOWN: - pass - - # Check the expected error message - regex = re.compile("Config info: No valid subtree is defined") - res = _pattern_errorlog(topology.standalone.errorlog_file, regex) - if not res: - # be sure to restore a valid config before assert - _config_file(topology, action='restore') - assert res - - # Check we can restart the server - _config_file(topology, action='restore') - topology.standalone.start(timeout=5) - try: - topology.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS) - except ldap.NO_SUCH_OBJECT: - pass - - -def test_ticket47823_invalid_config_5(topology): - ''' - Check that an invalid config is detected. No uniqueness enforced - Using new config: uniqueness-attribute-name is missing - ''' - _header(topology, "Invalid config (new): uniqueness-attribute-name is missing") - - _config_file(topology, action='save') - - # create an invalid config without arg0 - config = _build_config(topology, attr_name='cn', subtree_1=ACTIVE_DN, subtree_2=None, type_config='new', across_subtrees=False) - - del config.data['uniqueness-attribute-name'] - # replace 'cn' uniqueness entry - try: - topology.standalone.delete_s(config.dn) - - except ldap.NO_SUCH_OBJECT: - pass - topology.standalone.add_s(config) - - topology.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS) - - # Check the server did not restart - try: - topology.standalone.restart(timeout=5) - ent = topology.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS) - if ent: - # be sure to restore a valid config before assert - _config_file(topology, action='restore') - assert not ent - except ldap.SERVER_DOWN: - pass - - # Check the expected error message - regex = re.compile("Config info: attribute name not defined") - res = _pattern_errorlog(topology.standalone.errorlog_file, regex) - if not res: - # be sure to restore a valid config before assert - _config_file(topology, action='restore') - assert res - - # Check we can restart the server - _config_file(topology, action='restore') - topology.standalone.start(timeout=5) - try: - topology.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS) - except ldap.NO_SUCH_OBJECT: - pass - - -def test_ticket47823_invalid_config_6(topology): - ''' - Check that an invalid config is detected. No uniqueness enforced - Using new config: uniqueness-subtrees is missing - ''' - _header(topology, "Invalid config (new): uniqueness-subtrees is missing") - - _config_file(topology, action='save') - - # create an invalid config without arg0 - config = _build_config(topology, attr_name='cn', subtree_1=ACTIVE_DN, subtree_2=None, type_config='new', across_subtrees=False) - - del config.data['uniqueness-subtrees'] - # replace 'cn' uniqueness entry - try: - topology.standalone.delete_s(config.dn) - - except ldap.NO_SUCH_OBJECT: - pass - topology.standalone.add_s(config) - - topology.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS) - - # Check the server did not restart - try: - topology.standalone.restart(timeout=5) - ent = topology.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS) - if ent: - # be sure to restore a valid config before assert - _config_file(topology, action='restore') - assert not ent - except ldap.SERVER_DOWN: - pass - - # Check the expected error message - regex = re.compile("Config info: objectclass for subtree entries is not defined") - res = _pattern_errorlog(topology.standalone.errorlog_file, regex) - if not res: - # be sure to restore a valid config before assert - _config_file(topology, action='restore') - assert res - - # Check we can restart the server - _config_file(topology, action='restore') - topology.standalone.start(timeout=5) - try: - topology.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS) - except ldap.NO_SUCH_OBJECT: - pass - - -def test_ticket47823_invalid_config_7(topology): - ''' - Check that an invalid config is detected. No uniqueness enforced - Using new config: uniqueness-subtrees is missing - ''' - _header(topology, "Invalid config (new): uniqueness-subtrees are invalid") - - _config_file(topology, action='save') - - # create an invalid config without arg0 - config = _build_config(topology, attr_name='cn', subtree_1="this_is dummy DN", subtree_2="an other=dummy DN", type_config='new', across_subtrees=False) - - topology.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', '65536')]) - # replace 'cn' uniqueness entry - try: - topology.standalone.delete_s(config.dn) - - except ldap.NO_SUCH_OBJECT: - pass - topology.standalone.add_s(config) - - topology.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS) - - # Check the server did not restart - try: - topology.standalone.restart(timeout=5) - ent = topology.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS) - if ent: - # be sure to restore a valid config before assert - _config_file(topology, action='restore') - assert not ent - except ldap.SERVER_DOWN: - pass - - # Check the expected error message - regex = re.compile("Config info: No valid subtree is defined") - res = _pattern_errorlog(topology.standalone.errorlog_file, regex) - if not res: - # be sure to restore a valid config before assert - _config_file(topology, action='restore') - assert res - - # Check we can restart the server - _config_file(topology, action='restore') - topology.standalone.start(timeout=5) - try: - topology.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS) - except ldap.NO_SUCH_OBJECT: - pass - - -def test_ticket47823_final(topology): - topology.standalone.delete() - log.info('Testcase PASSED') - - -def run_isolated(): - ''' - run_isolated is used to run these test cases independently of a test scheduler (xunit, py.test..) - To run isolated without py.test, you need to - - edit this file and comment '@pytest.fixture' line before 'topology' function. - - set the installation prefix - - run this program - ''' - global installation_prefix - installation_prefix = None - - topo = topology(True) - test_ticket47823_init(topo) - - # run old/new config style that makes uniqueness checking on one subtree - test_ticket47823_one_container_add(topo) - test_ticket47823_one_container_mod(topo) - test_ticket47823_one_container_modrdn(topo) - - # run old config style that makes uniqueness checking on each defined subtrees - test_ticket47823_multi_containers_add(topo) - test_ticket47823_multi_containers_mod(topo) - test_ticket47823_multi_containers_modrdn(topo) - test_ticket47823_across_multi_containers_add(topo) - test_ticket47823_across_multi_containers_mod(topo) - test_ticket47823_across_multi_containers_modrdn(topo) - - test_ticket47823_invalid_config_1(topo) - test_ticket47823_invalid_config_2(topo) - test_ticket47823_invalid_config_3(topo) - test_ticket47823_invalid_config_4(topo) - test_ticket47823_invalid_config_5(topo) - test_ticket47823_invalid_config_6(topo) - test_ticket47823_invalid_config_7(topo) - - test_ticket47823_final(topo) - - -if __name__ == '__main__': - run_isolated() diff --git a/dirsrvtests/tickets/ticket47824_test.py b/dirsrvtests/tickets/ticket47824_test.py deleted file mode 100644 index ce1caa9..0000000 --- a/dirsrvtests/tickets/ticket47824_test.py +++ /dev/null @@ -1,265 +0,0 @@ -# --- BEGIN COPYRIGHT BLOCK --- -# Copyright (C) 2015 Red Hat, Inc. -# All rights reserved. -# -# License: GPL (version 3 or any later version). -# See LICENSE for details. -# --- END COPYRIGHT BLOCK --- -# -import os -import sys -import time -import ldap -import logging -import pytest -from lib389 import DirSrv, Entry, tools, tasks -from lib389.tools import DirSrvTools -from lib389._constants import * -from lib389.properties import * -from lib389.tasks import * -from ldap.controls import SimplePagedResultsControl - -log = logging.getLogger(__name__) - -installation_prefix = None - -MYSUFFIX = 'o=ticket47824.org' -MYSUFFIXBE = 'ticket47824' -SUBSUFFIX0 = 'ou=OU0,o=ticket47824.org' -SUBSUFFIX0BE = 'OU0' -SUBSUFFIX1 = 'ou=OU1,o=ticket47824.org' -SUBSUFFIX1BE = 'OU1' -SUBSUFFIX2 = 'ou=OU2,o=ticket47824.org' -SUBSUFFIX2BE = 'OU2' - -_MYLDIF = 'ticket47824.ldif' -_SUBLDIF0TMP = 'ticket47824_0.tmp' -_SUBLDIF0 = 'ticket47824_0.ldif' -_SUBLDIF1TMP = 'ticket47824_1.tmp' -_SUBLDIF1 = 'ticket47824_1.ldif' -_SUBLDIF2TMP = 'ticket47824_2.tmp' -_SUBLDIF2 = 'ticket47824_2.ldif' - -SEARCHFILTER = '(objectclass=*)' - - -class TopologyStandalone(object): - def __init__(self, standalone): - standalone.open() - self.standalone = standalone - - -@pytest.fixture(scope="module") -def topology(request): - ''' - This fixture is used to standalone topology for the 'module'. - ''' - global installation_prefix - - if installation_prefix: - args_instance[SER_DEPLOYED_DIR] = installation_prefix - - standalone = DirSrv(verbose=False) - - # Args for the standalone instance - args_instance[SER_HOST] = HOST_STANDALONE - args_instance[SER_PORT] = PORT_STANDALONE - args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE - args_standalone = args_instance.copy() - standalone.allocate(args_standalone) - - # Get the status of the instance and restart it if it exists - instance_standalone = standalone.exists() - - # Remove the instance - if instance_standalone: - standalone.delete() - - # Create the instance - standalone.create() - - # Used to retrieve configuration information (dbdir, confdir...) - standalone.open() - - # clear the tmp directory - standalone.clearTmpDir(__file__) - - # Here we have standalone instance up and running - return TopologyStandalone(standalone) - - -def test_ticket47824_run(topology): - """ - Add 3 sub suffixes under the primary suffix - Import 16 entries each - Search with Simple Paged Results Control from the primary suffix (pagesize = 4) - If all of them are returned, the bug is verified - """ - log.info('Testing Ticket 47824 - paged results control is not working in some cases when we have a subsuffix') - - # bind as directory manager - topology.standalone.log.info("Bind as %s" % DN_DM) - topology.standalone.simple_bind_s(DN_DM, PASSWORD) - - topology.standalone.log.info("\n\n######################### SETUP SUFFIX o=ticket47824.org ######################\n") - - topology.standalone.backend.create(MYSUFFIX, {BACKEND_NAME: MYSUFFIXBE}) - topology.standalone.mappingtree.create(MYSUFFIX, bename=MYSUFFIXBE) - - topology.standalone.log.info("\n\n######################### SETUP SUB SUFFIX ou=OU0 ######################\n") - - topology.standalone.backend.create(SUBSUFFIX0, {BACKEND_NAME: SUBSUFFIX0BE}) - topology.standalone.mappingtree.create(SUBSUFFIX0, bename=SUBSUFFIX0BE, parent=MYSUFFIX) - - topology.standalone.log.info("\n\n######################### SETUP SUB SUFFIX ou=OU1 ######################\n") - - topology.standalone.backend.create(SUBSUFFIX1, {BACKEND_NAME: SUBSUFFIX1BE}) - topology.standalone.mappingtree.create(SUBSUFFIX1, bename=SUBSUFFIX1BE, parent=MYSUFFIX) - - topology.standalone.log.info("\n\n######################### SETUP SUB SUFFIX ou=OU2 ######################\n") - - topology.standalone.backend.create(SUBSUFFIX2, {BACKEND_NAME: SUBSUFFIX2BE}) - topology.standalone.mappingtree.create(SUBSUFFIX2, bename=SUBSUFFIX2BE, parent=MYSUFFIX) - - topology.standalone.log.info("\n\n######################### Generate Test data ######################\n") - - # get tmp dir - mytmp = topology.standalone.getDir(__file__, TMP_DIR) - if mytmp is None: - mytmp = "/tmp" - - MYLDIF = '%s%s' % (mytmp, _MYLDIF) - SUBLDIF0TMP = '%s%s' % (mytmp, _SUBLDIF0TMP) - SUBLDIF0 = '%s%s' % (mytmp, _SUBLDIF0) - SUBLDIF1TMP = '%s%s' % (mytmp, _SUBLDIF1TMP) - SUBLDIF1 = '%s%s' % (mytmp, _SUBLDIF1) - SUBLDIF2TMP = '%s%s' % (mytmp, _SUBLDIF2TMP) - SUBLDIF2 = '%s%s' % (mytmp, _SUBLDIF2) - - os.system('ls %s' % MYLDIF) - os.system('ls %s' % SUBLDIF0TMP) - os.system('ls %s' % SUBLDIF1TMP) - os.system('ls %s' % SUBLDIF2TMP) - os.system('rm -f %s' % MYLDIF) - os.system('rm -f %s' % SUBLDIF0TMP) - os.system('rm -f %s' % SUBLDIF1TMP) - os.system('rm -f %s' % SUBLDIF2TMP) - if hasattr(topology.standalone, 'prefix'): - prefix = topology.standalone.prefix - else: - prefix = None - dbgen_prog = prefix + '/bin/dbgen.pl' - topology.standalone.log.info('dbgen: %s' % dbgen_prog) - os.system('%s -s %s -o %s -n 10' % (dbgen_prog, MYSUFFIX, MYLDIF)) - os.system('%s -s %s -o %s -n 10' % (dbgen_prog, SUBSUFFIX0, SUBLDIF0TMP)) - os.system('%s -s %s -o %s -n 10' % (dbgen_prog, SUBSUFFIX1, SUBLDIF1TMP)) - os.system('%s -s %s -o %s -n 10' % (dbgen_prog, SUBSUFFIX2, SUBLDIF2TMP)) - - os.system('cat %s | sed -e "s/\/objectClass: organizationalUnit/" | sed -e "/^o:.*/d" > %s' % (SUBLDIF0TMP, SUBLDIF0)) - os.system('cat %s | sed -e "s/\/objectClass: organizationalUnit/" | sed -e "/^o:.*/d" > %s' % (SUBLDIF1TMP, SUBLDIF1)) - os.system('cat %s | sed -e "s/\/objectClass: organizationalUnit/" | sed -e "/^o:.*/d" > %s' % (SUBLDIF2TMP, SUBLDIF2)) - - cmdline = 'egrep dn: %s %s %s %s | wc -l' % (MYLDIF, SUBLDIF0, SUBLDIF1, SUBLDIF2) - p = os.popen(cmdline, "r") - dnnumstr = p.readline() - dnnum = int(dnnumstr) - topology.standalone.log.info("We have %d entries.\n", dnnum) - - topology.standalone.log.info("\n\n######################### Import Test data ######################\n") - - args = {TASK_WAIT: True} - importTask = Tasks(topology.standalone) - importTask.importLDIF(MYSUFFIX, MYSUFFIXBE, MYLDIF, args) - importTask.importLDIF(SUBSUFFIX0, SUBSUFFIX0BE, SUBLDIF0, args) - importTask.importLDIF(SUBSUFFIX1, SUBSUFFIX1BE, SUBLDIF1, args) - importTask.importLDIF(SUBSUFFIX2, SUBSUFFIX2BE, SUBLDIF2, args) - - topology.standalone.log.info("\n\n######################### SEARCH ALL ######################\n") - topology.standalone.log.info("Bind as %s and add the READ/SEARCH SELFDN aci" % DN_DM) - topology.standalone.simple_bind_s(DN_DM, PASSWORD) - - entries = topology.standalone.search_s(MYSUFFIX, ldap.SCOPE_SUBTREE, SEARCHFILTER) - topology.standalone.log.info("Returned %d entries.\n", len(entries)) - - #print entries - - assert dnnum == len(entries) - - topology.standalone.log.info('%d entries are successfully imported.' % dnnum) - - topology.standalone.log.info("\n\n######################### SEARCH WITH SIMPLE PAGED RESULTS CONTROL ######################\n") - - page_size = 4 - req_ctrl = SimplePagedResultsControl(True, size=page_size, cookie='') - - known_ldap_resp_ctrls = { - SimplePagedResultsControl.controlType: SimplePagedResultsControl, - } - - topology.standalone.log.info("Calling search_ext...") - msgid = topology.standalone.search_ext(MYSUFFIX, ldap.SCOPE_SUBTREE, SEARCHFILTER, None, serverctrls=[req_ctrl]) - - pageddncnt = 0 - pages = 0 - while True: - pages += 1 - - topology.standalone.log.info("Getting page %d" % pages) - rtype, rdata, rmsgid, serverctrls = topology.standalone.result3(msgid, resp_ctrl_classes=known_ldap_resp_ctrls) - topology.standalone.log.info("%d results" % len(rdata)) - pageddncnt += len(rdata) - - topology.standalone.log.info("Results:") - for dn, attrs in rdata: - topology.standalone.log.info("dn: %s" % dn) - - pctrls = [ - c for c in serverctrls if c.controlType == SimplePagedResultsControl.controlType - ] - if not pctrls: - topology.standalone.log.info('Warning: Server ignores RFC 2696 control.') - break - - if pctrls[0].cookie: - req_ctrl.cookie = pctrls[0].cookie - topology.standalone.log.info("cookie: %s" % req_ctrl.cookie) - msgid = topology.standalone.search_ext(MYSUFFIX, - ldap.SCOPE_SUBTREE, - SEARCHFILTER, - None, - serverctrls=[req_ctrl]) - else: - topology.standalone.log.info("No cookie") - break - - topology.standalone.log.info("Paged result search returned %d entries.\n", pageddncnt) - - assert dnnum == len(entries) - topology.standalone.log.info("ticket47824 was successfully verified.") - - -def test_ticket47824_final(topology): - topology.standalone.delete() - log.info('Testcase PASSED') - - -def run_isolated(): - ''' - run_isolated is used to run these test cases independently of a test scheduler (xunit, py.test..) - To run isolated without py.test, you need to - - edit this file and comment '@pytest.fixture' line before 'topology' function. - - set the installation prefix - - run this program - ''' - global installation_prefix - installation_prefix = None - - topo = topology(True) - test_ticket47824_run(topo) - - test_ticket47824_final(topo) - - -if __name__ == '__main__': - run_isolated() - diff --git a/dirsrvtests/tickets/ticket47828_test.py b/dirsrvtests/tickets/ticket47828_test.py deleted file mode 100644 index 3962a0a..0000000 --- a/dirsrvtests/tickets/ticket47828_test.py +++ /dev/null @@ -1,728 +0,0 @@ -# --- BEGIN COPYRIGHT BLOCK --- -# Copyright (C) 2015 Red Hat, Inc. -# All rights reserved. -# -# License: GPL (version 3 or any later version). -# See LICENSE for details. -# --- END COPYRIGHT BLOCK --- -# -import os -import sys -import time -import ldap -import logging -import socket -import pytest -import shutil -from lib389 import DirSrv, Entry, tools -from lib389.tools import DirSrvTools -from lib389._constants import * -from lib389.properties import * - -log = logging.getLogger(__name__) - -installation_prefix = None - -ACCT_POLICY_CONFIG_DN = 'cn=config,cn=%s,cn=plugins,cn=config' % PLUGIN_ACCT_POLICY -ACCT_POLICY_DN = 'cn=Account Inactivation Pplicy,%s' % SUFFIX -INACTIVITY_LIMIT = '9' -SEARCHFILTER = '(objectclass=*)' - -DUMMY_CONTAINER = 'cn=dummy container,%s' % SUFFIX -PROVISIONING = 'cn=provisioning,%s' % SUFFIX -ACTIVE_USER1_CN = 'active user1' -ACTIVE_USER1_DN = 'cn=%s,%s' % (ACTIVE_USER1_CN, SUFFIX) -STAGED_USER1_CN = 'staged user1' -STAGED_USER1_DN = 'cn=%s,%s' % (STAGED_USER1_CN, PROVISIONING) -DUMMY_USER1_CN = 'dummy user1' -DUMMY_USER1_DN = 'cn=%s,%s' % (DUMMY_USER1_CN, DUMMY_CONTAINER) - -ALLOCATED_ATTR = 'employeeNumber' - -class TopologyStandalone(object): - def __init__(self, standalone): - standalone.open() - self.standalone = standalone - - -@pytest.fixture(scope="module") -def topology(request): - ''' - This fixture is used to standalone topology for the 'module'. - At the beginning, It may exists a standalone instance. - It may also exists a backup for the standalone instance. - - Principle: - If standalone instance exists: - restart it - If backup of standalone exists: - create/rebind to standalone - - restore standalone instance from backup - else: - Cleanup everything - remove instance - remove backup - Create instance - Create backup - ''' - global installation_prefix - - if installation_prefix: - args_instance[SER_DEPLOYED_DIR] = installation_prefix - - standalone = DirSrv(verbose=False) - - # Args for the standalone instance - args_instance[SER_HOST] = HOST_STANDALONE - args_instance[SER_PORT] = PORT_STANDALONE - args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE - args_standalone = args_instance.copy() - standalone.allocate(args_standalone) - - # Get the status of the backups - backup_standalone = standalone.checkBackupFS() - - # Get the status of the instance and restart it if it exists - instance_standalone = standalone.exists() - if instance_standalone: - # assuming the instance is already stopped, just wait 5 sec max - standalone.stop(timeout=5) - try: - standalone.start(timeout=10) - except ldap.SERVER_DOWN: - pass - - if backup_standalone: - # The backup exist, assuming it is correct - # we just re-init the instance with it - if not instance_standalone: - standalone.create() - # Used to retrieve configuration information (dbdir, confdir...) - standalone.open() - - # restore standalone instance from backup - standalone.stop(timeout=10) - standalone.restoreFS(backup_standalone) - standalone.start(timeout=10) - - else: - # We should be here only in two conditions - # - This is the first time a test involve standalone instance - # - Something weird happened (instance/backup destroyed) - # so we discard everything and recreate all - - # Remove the backup. So even if we have a specific backup file - # (e.g backup_standalone) we clear backup that an instance may have created - if backup_standalone: - standalone.clearBackupFS() - - # Remove the instance - if instance_standalone: - standalone.delete() - - # Create the instance - standalone.create() - - # Used to retrieve configuration information (dbdir, confdir...) - standalone.open() - - # Time to create the backups - standalone.stop(timeout=10) - standalone.backupfile = standalone.backupFS() - standalone.start(timeout=10) - - # - # Here we have standalone instance up and running - # Either coming from a backup recovery - # or from a fresh (re)init - # Time to return the topology - return TopologyStandalone(standalone) - -def _header(topology, label): - topology.standalone.log.info("\n\n###############################################") - topology.standalone.log.info("#######") - topology.standalone.log.info("####### %s" % label) - topology.standalone.log.info("#######") - topology.standalone.log.info("###############################################") - -def test_ticket47828_init(topology): - """ - Enable DNA - """ - topology.standalone.plugins.enable(name=PLUGIN_DNA) - - topology.standalone.add_s(Entry((PROVISIONING,{'objectclass': "top nscontainer".split(), - 'cn': 'provisioning'}))) - topology.standalone.add_s(Entry((DUMMY_CONTAINER,{'objectclass': "top nscontainer".split(), - 'cn': 'dummy container'}))) - - dn_config = "cn=excluded scope, cn=%s, %s" % (PLUGIN_DNA, DN_PLUGIN) - topology.standalone.add_s(Entry((dn_config, {'objectclass': "top extensibleObject".split(), - 'cn': 'excluded scope', - 'dnaType': ALLOCATED_ATTR, - 'dnaNextValue': str(1000), - 'dnaMaxValue': str(2000), - 'dnaMagicRegen': str(-1), - 'dnaFilter': '(&(objectClass=person)(objectClass=organizationalPerson)(objectClass=inetOrgPerson))', - 'dnaScope': SUFFIX}))) - topology.standalone.restart(timeout=10) - - - -def test_ticket47828_run_0(topology): - """ - NO exclude scope: Add an active entry and check its ALLOCATED_ATTR is set - """ - _header(topology, 'NO exclude scope: Add an active entry and check its ALLOCATED_ATTR is set') - - topology.standalone.add_s(Entry((ACTIVE_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(), - 'cn': ACTIVE_USER1_CN, - 'sn': ACTIVE_USER1_CN, - ALLOCATED_ATTR: str(-1)}))) - ent = topology.standalone.getEntry(ACTIVE_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)") - assert ent.hasAttr(ALLOCATED_ATTR) - assert ent.getValue(ALLOCATED_ATTR) != str(-1) - topology.standalone.log.debug('%s.%s=%s' % (ACTIVE_USER1_CN, ALLOCATED_ATTR, ent.getValue(ALLOCATED_ATTR))) - topology.standalone.delete_s(ACTIVE_USER1_DN) - -def test_ticket47828_run_1(topology): - """ - NO exclude scope: Add an active entry and check its ALLOCATED_ATTR is unchanged (!= magic) - """ - _header(topology, 'NO exclude scope: Add an active entry and check its ALLOCATED_ATTR is unchanged (!= magic)') - - topology.standalone.add_s(Entry((ACTIVE_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(), - 'cn': ACTIVE_USER1_CN, - 'sn': ACTIVE_USER1_CN, - ALLOCATED_ATTR: str(20)}))) - ent = topology.standalone.getEntry(ACTIVE_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)") - assert ent.hasAttr(ALLOCATED_ATTR) - assert ent.getValue(ALLOCATED_ATTR) == str(20) - topology.standalone.log.debug('%s.%s=%s' % (ACTIVE_USER1_CN, ALLOCATED_ATTR, ent.getValue(ALLOCATED_ATTR))) - topology.standalone.delete_s(ACTIVE_USER1_DN) - -def test_ticket47828_run_2(topology): - """ - NO exclude scope: Add a staged entry and check its ALLOCATED_ATTR is set - """ - _header(topology, 'NO exclude scope: Add a staged entry and check its ALLOCATED_ATTR is set') - - topology.standalone.add_s(Entry((STAGED_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(), - 'cn': STAGED_USER1_CN, - 'sn': STAGED_USER1_CN, - ALLOCATED_ATTR: str(-1)}))) - ent = topology.standalone.getEntry(STAGED_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)") - assert ent.hasAttr(ALLOCATED_ATTR) - assert ent.getValue(ALLOCATED_ATTR) != str(-1) - topology.standalone.log.debug('%s.%s=%s' % (STAGED_USER1_CN, ALLOCATED_ATTR, ent.getValue(ALLOCATED_ATTR))) - topology.standalone.delete_s(STAGED_USER1_DN) - -def test_ticket47828_run_3(topology): - """ - NO exclude scope: Add a staged entry and check its ALLOCATED_ATTR is unchanged (!= magic) - """ - _header(topology, 'NO exclude scope: Add a staged entry and check its ALLOCATED_ATTR is unchanged (!= magic)') - - topology.standalone.add_s(Entry((STAGED_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(), - 'cn': STAGED_USER1_CN, - 'sn': STAGED_USER1_CN, - ALLOCATED_ATTR: str(20)}))) - ent = topology.standalone.getEntry(STAGED_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)") - assert ent.hasAttr(ALLOCATED_ATTR) - assert ent.getValue(ALLOCATED_ATTR) == str(20) - topology.standalone.log.debug('%s.%s=%s' % (STAGED_USER1_CN, ALLOCATED_ATTR, ent.getValue(ALLOCATED_ATTR))) - topology.standalone.delete_s(STAGED_USER1_DN) - -def test_ticket47828_run_4(topology): - ''' - Exclude the provisioning container - ''' - _header(topology, 'Exclude the provisioning container') - - dn_config = "cn=excluded scope, cn=%s, %s" % (PLUGIN_DNA, DN_PLUGIN) - mod = [(ldap.MOD_REPLACE, 'dnaExcludeScope', PROVISIONING)] - topology.standalone.modify_s(dn_config, mod) - -def test_ticket47828_run_5(topology): - """ - Provisioning excluded scope: Add an active entry and check its ALLOCATED_ATTR is set - """ - _header(topology, 'Provisioning excluded scope: Add an active entry and check its ALLOCATED_ATTR is set') - - topology.standalone.add_s(Entry((ACTIVE_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(), - 'cn': ACTIVE_USER1_CN, - 'sn': ACTIVE_USER1_CN, - ALLOCATED_ATTR: str(-1)}))) - ent = topology.standalone.getEntry(ACTIVE_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)") - assert ent.hasAttr(ALLOCATED_ATTR) - assert ent.getValue(ALLOCATED_ATTR) != str(-1) - topology.standalone.log.debug('%s.%s=%s' % (ACTIVE_USER1_CN, ALLOCATED_ATTR, ent.getValue(ALLOCATED_ATTR))) - topology.standalone.delete_s(ACTIVE_USER1_DN) - -def test_ticket47828_run_6(topology): - """ - Provisioning excluded scope: Add an active entry and check its ALLOCATED_ATTR is unchanged (!= magic) - """ - _header(topology, 'Provisioning excluded scope: Add an active entry and check its ALLOCATED_ATTR is unchanged (!= magic)') - - topology.standalone.add_s(Entry((ACTIVE_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(), - 'cn': ACTIVE_USER1_CN, - 'sn': ACTIVE_USER1_CN, - ALLOCATED_ATTR: str(20)}))) - ent = topology.standalone.getEntry(ACTIVE_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)") - assert ent.hasAttr(ALLOCATED_ATTR) - assert ent.getValue(ALLOCATED_ATTR) == str(20) - topology.standalone.log.debug('%s.%s=%s' % (ACTIVE_USER1_CN, ALLOCATED_ATTR, ent.getValue(ALLOCATED_ATTR))) - topology.standalone.delete_s(ACTIVE_USER1_DN) - -def test_ticket47828_run_7(topology): - """ - Provisioning excluded scope: Add a staged entry and check its ALLOCATED_ATTR is not set - """ - _header(topology, 'Provisioning excluded scope: Add a staged entry and check its ALLOCATED_ATTR is not set') - - topology.standalone.add_s(Entry((STAGED_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(), - 'cn': STAGED_USER1_CN, - 'sn': STAGED_USER1_CN, - ALLOCATED_ATTR: str(-1)}))) - ent = topology.standalone.getEntry(STAGED_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)") - assert ent.hasAttr(ALLOCATED_ATTR) - assert ent.getValue(ALLOCATED_ATTR) == str(-1) - topology.standalone.log.debug('%s.%s=%s' % (STAGED_USER1_CN, ALLOCATED_ATTR, ent.getValue(ALLOCATED_ATTR))) - topology.standalone.delete_s(STAGED_USER1_DN) - -def test_ticket47828_run_8(topology): - """ - Provisioning excluded scope: Add a staged entry and check its ALLOCATED_ATTR is unchanged (!= magic) - """ - _header(topology, 'Provisioning excluded scope: Add a staged entry and check its ALLOCATED_ATTR is unchanged (!= magic)') - - topology.standalone.add_s(Entry((STAGED_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(), - 'cn': STAGED_USER1_CN, - 'sn': STAGED_USER1_CN, - ALLOCATED_ATTR: str(20)}))) - ent = topology.standalone.getEntry(STAGED_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)") - assert ent.hasAttr(ALLOCATED_ATTR) - assert ent.getValue(ALLOCATED_ATTR) == str(20) - topology.standalone.log.debug('%s.%s=%s' % (STAGED_USER1_CN, ALLOCATED_ATTR, ent.getValue(ALLOCATED_ATTR))) - topology.standalone.delete_s(STAGED_USER1_DN) - -def test_ticket47828_run_9(topology): - """ - Provisioning excluded scope: Add an dummy entry and check its ALLOCATED_ATTR is set - """ - _header(topology, 'Provisioning excluded scope: Add an dummy entry and check its ALLOCATED_ATTR is set') - - topology.standalone.add_s(Entry((DUMMY_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(), - 'cn': DUMMY_USER1_CN, - 'sn': DUMMY_USER1_CN, - ALLOCATED_ATTR: str(-1)}))) - ent = topology.standalone.getEntry(DUMMY_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)") - assert ent.hasAttr(ALLOCATED_ATTR) - assert ent.getValue(ALLOCATED_ATTR) != str(-1) - topology.standalone.log.debug('%s.%s=%s' % (DUMMY_USER1_CN, ALLOCATED_ATTR, ent.getValue(ALLOCATED_ATTR))) - topology.standalone.delete_s(DUMMY_USER1_DN) - -def test_ticket47828_run_10(topology): - """ - Provisioning excluded scope: Add an dummy entry and check its ALLOCATED_ATTR is unchanged (!= magic) - """ - _header(topology, 'Provisioning excluded scope: Add an dummy entry and check its ALLOCATED_ATTR is unchanged (!= magic)') - - topology.standalone.add_s(Entry((DUMMY_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(), - 'cn': DUMMY_USER1_CN, - 'sn': DUMMY_USER1_CN, - ALLOCATED_ATTR: str(20)}))) - ent = topology.standalone.getEntry(DUMMY_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)") - assert ent.hasAttr(ALLOCATED_ATTR) - assert ent.getValue(ALLOCATED_ATTR) == str(20) - topology.standalone.log.debug('%s.%s=%s' % (DUMMY_USER1_CN, ALLOCATED_ATTR, ent.getValue(ALLOCATED_ATTR))) - topology.standalone.delete_s(DUMMY_USER1_DN) - -def test_ticket47828_run_11(topology): - ''' - Exclude (in addition) the dummy container - ''' - _header(topology, 'Exclude (in addition) the dummy container') - - dn_config = "cn=excluded scope, cn=%s, %s" % (PLUGIN_DNA, DN_PLUGIN) - mod = [(ldap.MOD_ADD, 'dnaExcludeScope', DUMMY_CONTAINER)] - topology.standalone.modify_s(dn_config, mod) - -def test_ticket47828_run_12(topology): - """ - Provisioning/Dummy excluded scope: Add an active entry and check its ALLOCATED_ATTR is set - """ - _header(topology, 'Provisioning/Dummy excluded scope: Add an active entry and check its ALLOCATED_ATTR is set') - - topology.standalone.add_s(Entry((ACTIVE_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(), - 'cn': ACTIVE_USER1_CN, - 'sn': ACTIVE_USER1_CN, - ALLOCATED_ATTR: str(-1)}))) - ent = topology.standalone.getEntry(ACTIVE_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)") - assert ent.hasAttr(ALLOCATED_ATTR) - assert ent.getValue(ALLOCATED_ATTR) != str(-1) - topology.standalone.log.debug('%s.%s=%s' % (ACTIVE_USER1_CN, ALLOCATED_ATTR, ent.getValue(ALLOCATED_ATTR))) - topology.standalone.delete_s(ACTIVE_USER1_DN) - -def test_ticket47828_run_13(topology): - """ - Provisioning/Dummy excluded scope: Add an active entry and check its ALLOCATED_ATTR is unchanged (!= magic) - """ - _header(topology, 'Provisioning/Dummy excluded scope: Add an active entry and check its ALLOCATED_ATTR is unchanged (!= magic)') - - topology.standalone.add_s(Entry((ACTIVE_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(), - 'cn': ACTIVE_USER1_CN, - 'sn': ACTIVE_USER1_CN, - ALLOCATED_ATTR: str(20)}))) - ent = topology.standalone.getEntry(ACTIVE_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)") - assert ent.hasAttr(ALLOCATED_ATTR) - assert ent.getValue(ALLOCATED_ATTR) == str(20) - topology.standalone.log.debug('%s.%s=%s' % (ACTIVE_USER1_CN, ALLOCATED_ATTR, ent.getValue(ALLOCATED_ATTR))) - topology.standalone.delete_s(ACTIVE_USER1_DN) - -def test_ticket47828_run_14(topology): - """ - Provisioning/Dummy excluded scope: Add a staged entry and check its ALLOCATED_ATTR is not set - """ - _header(topology, 'Provisioning/Dummy excluded scope: Add a staged entry and check its ALLOCATED_ATTR is not set') - - topology.standalone.add_s(Entry((STAGED_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(), - 'cn': STAGED_USER1_CN, - 'sn': STAGED_USER1_CN, - ALLOCATED_ATTR: str(-1)}))) - ent = topology.standalone.getEntry(STAGED_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)") - assert ent.hasAttr(ALLOCATED_ATTR) - assert ent.getValue(ALLOCATED_ATTR) == str(-1) - topology.standalone.log.debug('%s.%s=%s' % (STAGED_USER1_CN, ALLOCATED_ATTR, ent.getValue(ALLOCATED_ATTR))) - topology.standalone.delete_s(STAGED_USER1_DN) - -def test_ticket47828_run_15(topology): - """ - Provisioning/Dummy excluded scope: Add a staged entry and check its ALLOCATED_ATTR is unchanged (!= magic) - """ - _header(topology, 'Provisioning/Dummy excluded scope: Add a staged entry and check its ALLOCATED_ATTR is unchanged (!= magic)') - - topology.standalone.add_s(Entry((STAGED_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(), - 'cn': STAGED_USER1_CN, - 'sn': STAGED_USER1_CN, - ALLOCATED_ATTR: str(20)}))) - ent = topology.standalone.getEntry(STAGED_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)") - assert ent.hasAttr(ALLOCATED_ATTR) - assert ent.getValue(ALLOCATED_ATTR) == str(20) - topology.standalone.log.debug('%s.%s=%s' % (STAGED_USER1_CN, ALLOCATED_ATTR, ent.getValue(ALLOCATED_ATTR))) - topology.standalone.delete_s(STAGED_USER1_DN) - -def test_ticket47828_run_16(topology): - """ - Provisioning/Dummy excluded scope: Add an dummy entry and check its ALLOCATED_ATTR is not set - """ - _header(topology, 'Provisioning/Dummy excluded scope: Add an dummy entry and check its ALLOCATED_ATTR not is set') - - topology.standalone.add_s(Entry((DUMMY_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(), - 'cn': DUMMY_USER1_CN, - 'sn': DUMMY_USER1_CN, - ALLOCATED_ATTR: str(-1)}))) - ent = topology.standalone.getEntry(DUMMY_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)") - assert ent.hasAttr(ALLOCATED_ATTR) - assert ent.getValue(ALLOCATED_ATTR) == str(-1) - topology.standalone.log.debug('%s.%s=%s' % (DUMMY_USER1_CN, ALLOCATED_ATTR, ent.getValue(ALLOCATED_ATTR))) - topology.standalone.delete_s(DUMMY_USER1_DN) - -def test_ticket47828_run_17(topology): - """ - Provisioning/Dummy excluded scope: Add an dummy entry and check its ALLOCATED_ATTR is unchanged (!= magic) - """ - _header(topology, 'Provisioning/Dummy excluded scope: Add an dummy entry and check its ALLOCATED_ATTR is unchanged (!= magic)') - - topology.standalone.add_s(Entry((DUMMY_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(), - 'cn': DUMMY_USER1_CN, - 'sn': DUMMY_USER1_CN, - ALLOCATED_ATTR: str(20)}))) - ent = topology.standalone.getEntry(DUMMY_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)") - assert ent.hasAttr(ALLOCATED_ATTR) - assert ent.getValue(ALLOCATED_ATTR) == str(20) - topology.standalone.log.debug('%s.%s=%s' % (DUMMY_USER1_CN, ALLOCATED_ATTR, ent.getValue(ALLOCATED_ATTR))) - topology.standalone.delete_s(DUMMY_USER1_DN) - - -def test_ticket47828_run_18(topology): - ''' - Exclude PROVISIONING and a wrong container - ''' - _header(topology, 'Exclude PROVISIONING and a wrong container') - - dn_config = "cn=excluded scope, cn=%s, %s" % (PLUGIN_DNA, DN_PLUGIN) - mod = [(ldap.MOD_REPLACE, 'dnaExcludeScope', PROVISIONING)] - topology.standalone.modify_s(dn_config, mod) - try: - mod = [(ldap.MOD_ADD, 'dnaExcludeScope', "invalidDN,%s" % SUFFIX)] - topology.standalone.modify_s(dn_config, mod) - raise ValueError("invalid dnaExcludeScope value (not a DN)") - except ldap.INVALID_SYNTAX: - pass - -def test_ticket47828_run_19(topology): - """ - Provisioning+wrong container excluded scope: Add an active entry and check its ALLOCATED_ATTR is set - """ - _header(topology, 'Provisioning+wrong container excluded scope: Add an active entry and check its ALLOCATED_ATTR is set') - - topology.standalone.add_s(Entry((ACTIVE_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(), - 'cn': ACTIVE_USER1_CN, - 'sn': ACTIVE_USER1_CN, - ALLOCATED_ATTR: str(-1)}))) - ent = topology.standalone.getEntry(ACTIVE_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)") - assert ent.hasAttr(ALLOCATED_ATTR) - assert ent.getValue(ALLOCATED_ATTR) != str(-1) - topology.standalone.log.debug('%s.%s=%s' % (ACTIVE_USER1_CN, ALLOCATED_ATTR, ent.getValue(ALLOCATED_ATTR))) - topology.standalone.delete_s(ACTIVE_USER1_DN) - -def test_ticket47828_run_20(topology): - """ - Provisioning+wrong container excluded scope: Add an active entry and check its ALLOCATED_ATTR is unchanged (!= magic) - """ - _header(topology, 'Provisioning+wrong container excluded scope: Add an active entry and check its ALLOCATED_ATTR is unchanged (!= magic)') - - topology.standalone.add_s(Entry((ACTIVE_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(), - 'cn': ACTIVE_USER1_CN, - 'sn': ACTIVE_USER1_CN, - ALLOCATED_ATTR: str(20)}))) - ent = topology.standalone.getEntry(ACTIVE_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)") - assert ent.hasAttr(ALLOCATED_ATTR) - assert ent.getValue(ALLOCATED_ATTR) == str(20) - topology.standalone.log.debug('%s.%s=%s' % (ACTIVE_USER1_CN, ALLOCATED_ATTR, ent.getValue(ALLOCATED_ATTR))) - topology.standalone.delete_s(ACTIVE_USER1_DN) - -def test_ticket47828_run_21(topology): - """ - Provisioning+wrong container excluded scope: Add a staged entry and check its ALLOCATED_ATTR is not set - """ - _header(topology, 'Provisioning+wrong container excluded scope: Add a staged entry and check its ALLOCATED_ATTR is not set') - - topology.standalone.add_s(Entry((STAGED_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(), - 'cn': STAGED_USER1_CN, - 'sn': STAGED_USER1_CN, - ALLOCATED_ATTR: str(-1)}))) - ent = topology.standalone.getEntry(STAGED_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)") - assert ent.hasAttr(ALLOCATED_ATTR) - assert ent.getValue(ALLOCATED_ATTR) == str(-1) - topology.standalone.log.debug('%s.%s=%s' % (STAGED_USER1_CN, ALLOCATED_ATTR, ent.getValue(ALLOCATED_ATTR))) - topology.standalone.delete_s(STAGED_USER1_DN) - -def test_ticket47828_run_22(topology): - """ - Provisioning+wrong container excluded scope: Add a staged entry and check its ALLOCATED_ATTR is unchanged (!= magic) - """ - _header(topology, 'Provisioning+wrong container excluded scope: Add a staged entry and check its ALLOCATED_ATTR is unchanged (!= magic)') - - topology.standalone.add_s(Entry((STAGED_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(), - 'cn': STAGED_USER1_CN, - 'sn': STAGED_USER1_CN, - ALLOCATED_ATTR: str(20)}))) - ent = topology.standalone.getEntry(STAGED_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)") - assert ent.hasAttr(ALLOCATED_ATTR) - assert ent.getValue(ALLOCATED_ATTR) == str(20) - topology.standalone.log.debug('%s.%s=%s' % (STAGED_USER1_CN, ALLOCATED_ATTR, ent.getValue(ALLOCATED_ATTR))) - topology.standalone.delete_s(STAGED_USER1_DN) - -def test_ticket47828_run_23(topology): - """ - Provisioning+wrong container excluded scope: Add an dummy entry and check its ALLOCATED_ATTR is set - """ - _header(topology, 'Provisioning+wrong container excluded scope: Add an dummy entry and check its ALLOCATED_ATTR is set') - - topology.standalone.add_s(Entry((DUMMY_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(), - 'cn': DUMMY_USER1_CN, - 'sn': DUMMY_USER1_CN, - ALLOCATED_ATTR: str(-1)}))) - ent = topology.standalone.getEntry(DUMMY_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)") - assert ent.hasAttr(ALLOCATED_ATTR) - assert ent.getValue(ALLOCATED_ATTR) != str(-1) - topology.standalone.log.debug('%s.%s=%s' % (DUMMY_USER1_CN, ALLOCATED_ATTR, ent.getValue(ALLOCATED_ATTR))) - topology.standalone.delete_s(DUMMY_USER1_DN) - -def test_ticket47828_run_24(topology): - """ - Provisioning+wrong container excluded scope: Add an dummy entry and check its ALLOCATED_ATTR is unchanged (!= magic) - """ - _header(topology, 'Provisioning+wrong container excluded scope: Add an dummy entry and check its ALLOCATED_ATTR is unchanged (!= magic)') - - topology.standalone.add_s(Entry((DUMMY_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(), - 'cn': DUMMY_USER1_CN, - 'sn': DUMMY_USER1_CN, - ALLOCATED_ATTR: str(20)}))) - ent = topology.standalone.getEntry(DUMMY_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)") - assert ent.hasAttr(ALLOCATED_ATTR) - assert ent.getValue(ALLOCATED_ATTR) == str(20) - topology.standalone.log.debug('%s.%s=%s' % (DUMMY_USER1_CN, ALLOCATED_ATTR, ent.getValue(ALLOCATED_ATTR))) - topology.standalone.delete_s(DUMMY_USER1_DN) - -def test_ticket47828_run_25(topology): - ''' - Exclude a wrong container - ''' - _header(topology, 'Exclude a wrong container') - - dn_config = "cn=excluded scope, cn=%s, %s" % (PLUGIN_DNA, DN_PLUGIN) - - try: - mod = [(ldap.MOD_REPLACE, 'dnaExcludeScope', "invalidDN,%s" % SUFFIX)] - topology.standalone.modify_s(dn_config, mod) - raise ValueError("invalid dnaExcludeScope value (not a DN)") - except ldap.INVALID_SYNTAX: - pass - -def test_ticket47828_run_26(topology): - """ - Wrong container excluded scope: Add an active entry and check its ALLOCATED_ATTR is set - """ - _header(topology, 'Wrong container excluded scope: Add an active entry and check its ALLOCATED_ATTR is set') - - topology.standalone.add_s(Entry((ACTIVE_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(), - 'cn': ACTIVE_USER1_CN, - 'sn': ACTIVE_USER1_CN, - ALLOCATED_ATTR: str(-1)}))) - ent = topology.standalone.getEntry(ACTIVE_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)") - assert ent.hasAttr(ALLOCATED_ATTR) - assert ent.getValue(ALLOCATED_ATTR) != str(-1) - topology.standalone.log.debug('%s.%s=%s' % (ACTIVE_USER1_CN, ALLOCATED_ATTR, ent.getValue(ALLOCATED_ATTR))) - topology.standalone.delete_s(ACTIVE_USER1_DN) - -def test_ticket47828_run_27(topology): - """ - Wrong container excluded scope: Add an active entry and check its ALLOCATED_ATTR is unchanged (!= magic) - """ - _header(topology, 'Wrong container excluded scope: Add an active entry and check its ALLOCATED_ATTR is unchanged (!= magic)') - - topology.standalone.add_s(Entry((ACTIVE_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(), - 'cn': ACTIVE_USER1_CN, - 'sn': ACTIVE_USER1_CN, - ALLOCATED_ATTR: str(20)}))) - ent = topology.standalone.getEntry(ACTIVE_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)") - assert ent.hasAttr(ALLOCATED_ATTR) - assert ent.getValue(ALLOCATED_ATTR) == str(20) - topology.standalone.log.debug('%s.%s=%s' % (ACTIVE_USER1_CN, ALLOCATED_ATTR, ent.getValue(ALLOCATED_ATTR))) - topology.standalone.delete_s(ACTIVE_USER1_DN) - -def test_ticket47828_run_28(topology): - """ - Wrong container excluded scope: Add a staged entry and check its ALLOCATED_ATTR is not set - """ - _header(topology, 'Wrong container excluded scope: Add a staged entry and check its ALLOCATED_ATTR is not set') - - topology.standalone.add_s(Entry((STAGED_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(), - 'cn': STAGED_USER1_CN, - 'sn': STAGED_USER1_CN, - ALLOCATED_ATTR: str(-1)}))) - ent = topology.standalone.getEntry(STAGED_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)") - assert ent.hasAttr(ALLOCATED_ATTR) - assert ent.getValue(ALLOCATED_ATTR) == str(-1) - topology.standalone.log.debug('%s.%s=%s' % (STAGED_USER1_CN, ALLOCATED_ATTR, ent.getValue(ALLOCATED_ATTR))) - topology.standalone.delete_s(STAGED_USER1_DN) - -def test_ticket47828_run_29(topology): - """ - Wrong container excluded scope: Add a staged entry and check its ALLOCATED_ATTR is unchanged (!= magic) - """ - _header(topology, 'Wrong container excluded scope: Add a staged entry and check its ALLOCATED_ATTR is unchanged (!= magic)') - - topology.standalone.add_s(Entry((STAGED_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(), - 'cn': STAGED_USER1_CN, - 'sn': STAGED_USER1_CN, - ALLOCATED_ATTR: str(20)}))) - ent = topology.standalone.getEntry(STAGED_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)") - assert ent.hasAttr(ALLOCATED_ATTR) - assert ent.getValue(ALLOCATED_ATTR) == str(20) - topology.standalone.log.debug('%s.%s=%s' % (STAGED_USER1_CN, ALLOCATED_ATTR, ent.getValue(ALLOCATED_ATTR))) - topology.standalone.delete_s(STAGED_USER1_DN) - -def test_ticket47828_run_30(topology): - """ - Wrong container excluded scope: Add an dummy entry and check its ALLOCATED_ATTR is set - """ - _header(topology, 'Wrong container excluded scope: Add an dummy entry and check its ALLOCATED_ATTR is set') - - topology.standalone.add_s(Entry((DUMMY_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(), - 'cn': DUMMY_USER1_CN, - 'sn': DUMMY_USER1_CN, - ALLOCATED_ATTR: str(-1)}))) - ent = topology.standalone.getEntry(DUMMY_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)") - assert ent.hasAttr(ALLOCATED_ATTR) - assert ent.getValue(ALLOCATED_ATTR) != str(-1) - topology.standalone.log.debug('%s.%s=%s' % (DUMMY_USER1_CN, ALLOCATED_ATTR, ent.getValue(ALLOCATED_ATTR))) - topology.standalone.delete_s(DUMMY_USER1_DN) - -def test_ticket47828_run_31(topology): - """ - Wrong container excluded scope: Add an dummy entry and check its ALLOCATED_ATTR is unchanged (!= magic) - """ - _header(topology, 'Wrong container excluded scope: Add an dummy entry and check its ALLOCATED_ATTR is unchanged (!= magic)') - - topology.standalone.add_s(Entry((DUMMY_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(), - 'cn': DUMMY_USER1_CN, - 'sn': DUMMY_USER1_CN, - ALLOCATED_ATTR: str(20)}))) - ent = topology.standalone.getEntry(DUMMY_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)") - assert ent.hasAttr(ALLOCATED_ATTR) - assert ent.getValue(ALLOCATED_ATTR) == str(20) - topology.standalone.log.debug('%s.%s=%s' % (DUMMY_USER1_CN, ALLOCATED_ATTR, ent.getValue(ALLOCATED_ATTR))) - topology.standalone.delete_s(DUMMY_USER1_DN) - -def test_ticket47828_final(topology): - topology.standalone.plugins.disable(name=PLUGIN_DNA) - topology.standalone.stop(timeout=10) - -def run_isolated(): - ''' - run_isolated is used to run these test cases independently of a test scheduler (xunit, py.test..) - To run isolated without py.test, you need to - - edit this file and comment '@pytest.fixture' line before 'topology' function. - - set the installation prefix - - run this program - ''' - global installation_prefix - installation_prefix = None - - topo = topology(True) - test_ticket47828_init(topo) - - test_ticket47828_run_0(topo) - test_ticket47828_run_1(topo) - test_ticket47828_run_2(topo) - test_ticket47828_run_3(topo) - test_ticket47828_run_4(topo) - test_ticket47828_run_5(topo) - test_ticket47828_run_6(topo) - test_ticket47828_run_7(topo) - test_ticket47828_run_8(topo) - test_ticket47828_run_9(topo) - test_ticket47828_run_10(topo) - test_ticket47828_run_11(topo) - test_ticket47828_run_12(topo) - test_ticket47828_run_13(topo) - test_ticket47828_run_14(topo) - test_ticket47828_run_15(topo) - test_ticket47828_run_16(topo) - test_ticket47828_run_17(topo) - test_ticket47828_run_18(topo) - test_ticket47828_run_19(topo) - test_ticket47828_run_20(topo) - test_ticket47828_run_21(topo) - test_ticket47828_run_22(topo) - test_ticket47828_run_23(topo) - test_ticket47828_run_24(topo) - test_ticket47828_run_25(topo) - test_ticket47828_run_26(topo) - test_ticket47828_run_27(topo) - test_ticket47828_run_28(topo) - test_ticket47828_run_29(topo) - test_ticket47828_run_30(topo) - test_ticket47828_run_31(topo) - - test_ticket47828_final(topo) - - -if __name__ == '__main__': - run_isolated() diff --git a/dirsrvtests/tickets/ticket47829_test.py b/dirsrvtests/tickets/ticket47829_test.py deleted file mode 100644 index 2acebf9..0000000 --- a/dirsrvtests/tickets/ticket47829_test.py +++ /dev/null @@ -1,656 +0,0 @@ -# --- BEGIN COPYRIGHT BLOCK --- -# Copyright (C) 2015 Red Hat, Inc. -# All rights reserved. -# -# License: GPL (version 3 or any later version). -# See LICENSE for details. -# --- END COPYRIGHT BLOCK --- -# -import os -import sys -import time -import ldap -import logging -import pytest -from lib389 import DirSrv, Entry, tools -from lib389.tools import DirSrvTools -from lib389._constants import * -from lib389.properties import * - - -SCOPE_IN_CN = 'in' -SCOPE_OUT_CN = 'out' -SCOPE_IN_DN = 'cn=%s,%s' % (SCOPE_IN_CN, SUFFIX) -SCOPE_OUT_DN = 'cn=%s,%s' % (SCOPE_OUT_CN, SUFFIX) - -PROVISIONING_CN = "provisioning" -PROVISIONING_DN = "cn=%s,%s" % (PROVISIONING_CN, SCOPE_IN_DN) - -ACTIVE_CN = "accounts" -STAGE_CN = "staged users" -DELETE_CN = "deleted users" -ACTIVE_DN = "cn=%s,%s" % (ACTIVE_CN, SCOPE_IN_DN) -STAGE_DN = "cn=%s,%s" % (STAGE_CN, PROVISIONING_DN) -DELETE_DN = "cn=%s,%s" % (DELETE_CN, PROVISIONING_DN) - -STAGE_USER_CN = "stage guy" -STAGE_USER_DN = "cn=%s,%s" % (STAGE_USER_CN, STAGE_DN) - -ACTIVE_USER_CN = "active guy" -ACTIVE_USER_DN = "cn=%s,%s" % (ACTIVE_USER_CN, ACTIVE_DN) - -OUT_USER_CN = "out guy" -OUT_USER_DN = "cn=%s,%s" % (OUT_USER_CN, SCOPE_OUT_DN) - -STAGE_GROUP_CN = "stage group" -STAGE_GROUP_DN = "cn=%s,%s" % (STAGE_GROUP_CN, STAGE_DN) - -ACTIVE_GROUP_CN = "active group" -ACTIVE_GROUP_DN = "cn=%s,%s" % (ACTIVE_GROUP_CN, ACTIVE_DN) - -OUT_GROUP_CN = "out group" -OUT_GROUP_DN = "cn=%s,%s" % (OUT_GROUP_CN, SCOPE_OUT_DN) - -INDIRECT_ACTIVE_GROUP_CN = "indirect active group" -INDIRECT_ACTIVE_GROUP_DN = "cn=%s,%s" % (INDIRECT_ACTIVE_GROUP_CN, ACTIVE_DN) - -log = logging.getLogger(__name__) - -installation_prefix = None - - -class TopologyStandalone(object): - def __init__(self, standalone): - standalone.open() - self.standalone = standalone - - -@pytest.fixture(scope="module") -def topology(request): - ''' - This fixture is used to standalone topology for the 'module'. - ''' - global installation_prefix - - if installation_prefix: - args_instance[SER_DEPLOYED_DIR] = installation_prefix - - standalone = DirSrv(verbose=False) - - # Args for the standalone instance - args_instance[SER_HOST] = HOST_STANDALONE - args_instance[SER_PORT] = PORT_STANDALONE - args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE - args_standalone = args_instance.copy() - standalone.allocate(args_standalone) - - # Get the status of the instance and restart it if it exists - instance_standalone = standalone.exists() - - # Remove the instance - if instance_standalone: - standalone.delete() - - # Create the instance - standalone.create() - - # Used to retrieve configuration information (dbdir, confdir...) - standalone.open() - - # clear the tmp directory - standalone.clearTmpDir(__file__) - - # Here we have standalone instance up and running - return TopologyStandalone(standalone) - - -def _header(topology, label): - topology.standalone.log.info("\n\n###############################################") - topology.standalone.log.info("#######") - topology.standalone.log.info("####### %s" % label) - topology.standalone.log.info("#######") - topology.standalone.log.info("###############################################") - - -def _add_user(topology, type='active'): - if type == 'active': - topology.standalone.add_s(Entry((ACTIVE_USER_DN, { - 'objectclass': "top person inetuser".split(), - 'sn': ACTIVE_USER_CN, - 'cn': ACTIVE_USER_CN}))) - elif type == 'stage': - topology.standalone.add_s(Entry((STAGE_USER_DN, { - 'objectclass': "top person inetuser".split(), - 'sn': STAGE_USER_CN, - 'cn': STAGE_USER_CN}))) - else: - topology.standalone.add_s(Entry((OUT_USER_DN, { - 'objectclass': "top person inetuser".split(), - 'sn': OUT_USER_CN, - 'cn': OUT_USER_CN}))) - - -def _find_memberof(topology, user_dn=None, group_dn=None, find_result=True): - assert(topology) - assert(user_dn) - assert(group_dn) - ent = topology.standalone.getEntry(user_dn, ldap.SCOPE_BASE, "(objectclass=*)", ['memberof']) - found = False - if ent.hasAttr('memberof'): - - for val in ent.getValues('memberof'): - topology.standalone.log.info("!!!!!!! %s: memberof->%s" % (user_dn, val)) - if val == group_dn: - found = True - break - - if find_result: - assert(found) - else: - assert(not found) - - -def _find_member(topology, user_dn=None, group_dn=None, find_result=True): - assert(topology) - assert(user_dn) - assert(group_dn) - ent = topology.standalone.getEntry(group_dn, ldap.SCOPE_BASE, "(objectclass=*)", ['member']) - found = False - if ent.hasAttr('member'): - - for val in ent.getValues('member'): - topology.standalone.log.info("!!!!!!! %s: member ->%s" % (group_dn, val)) - if val == user_dn: - found = True - break - - if find_result: - assert(found) - else: - assert(not found) - - -def _modrdn_entry(topology=None, entry_dn=None, new_rdn=None, del_old=0, new_superior=None): - assert topology is not None - assert entry_dn is not None - assert new_rdn is not None - - topology.standalone.log.info("\n\n######################### MODRDN %s ######################\n" % new_rdn) - try: - if new_superior: - topology.standalone.rename_s(entry_dn, new_rdn, newsuperior=new_superior, delold=del_old) - else: - topology.standalone.rename_s(entry_dn, new_rdn, delold=del_old) - except ldap.NO_SUCH_ATTRIBUTE: - topology.standalone.log.info("accepted failure due to 47833: modrdn reports error.. but succeeds") - attempt = 0 - if new_superior: - dn = "%s,%s" % (new_rdn, new_superior) - base = new_superior - else: - base = ','.join(entry_dn.split(",")[1:]) - dn = "%s, %s" % (new_rdn, base) - myfilter = entry_dn.split(',')[0] - - while attempt < 10: - try: - ent = topology.standalone.getEntry(dn, ldap.SCOPE_BASE, myfilter) - break - except ldap.NO_SUCH_OBJECT: - topology.standalone.log.info("Accept failure due to 47833: unable to find (base) a modrdn entry") - attempt += 1 - time.sleep(1) - if attempt == 10: - ent = topology.standalone.getEntry(base, ldap.SCOPE_SUBTREE, myfilter) - ent = topology.standalone.getEntry(dn, ldap.SCOPE_BASE, myfilter) - - -def _check_memberof(topology=None, action=None, user_dn=None, group_dn=None, find_result=None): - assert(topology) - assert(user_dn) - assert(group_dn) - if action == ldap.MOD_ADD: - txt = 'add' - elif action == ldap.MOD_DELETE: - txt = 'delete' - else: - txt = 'replace' - topology.standalone.log.info('\n%s entry %s' % (txt, user_dn)) - topology.standalone.log.info('to group %s' % group_dn) - - topology.standalone.modify_s(group_dn, [(action, 'member', user_dn)]) - time.sleep(1) - _find_memberof(topology, user_dn=user_dn, group_dn=group_dn, find_result=find_result) - - -def test_ticket47829_init(topology): - topology.standalone.add_s(Entry((SCOPE_IN_DN, { - 'objectclass': "top nscontainer".split(), - 'cn': SCOPE_IN_DN}))) - topology.standalone.add_s(Entry((SCOPE_OUT_DN, { - 'objectclass': "top nscontainer".split(), - 'cn': SCOPE_OUT_DN}))) - topology.standalone.add_s(Entry((PROVISIONING_DN, { - 'objectclass': "top nscontainer".split(), - 'cn': PROVISIONING_CN}))) - topology.standalone.add_s(Entry((ACTIVE_DN, { - 'objectclass': "top nscontainer".split(), - 'cn': ACTIVE_CN}))) - topology.standalone.add_s(Entry((STAGE_DN, { - 'objectclass': "top nscontainer".split(), - 'cn': STAGE_DN}))) - topology.standalone.add_s(Entry((DELETE_DN, { - 'objectclass': "top nscontainer".split(), - 'cn': DELETE_CN}))) - - # add groups - topology.standalone.add_s(Entry((ACTIVE_GROUP_DN, { - 'objectclass': "top groupOfNames inetuser".split(), - 'cn': ACTIVE_GROUP_CN}))) - topology.standalone.add_s(Entry((STAGE_GROUP_DN, { - 'objectclass': "top groupOfNames inetuser".split(), - 'cn': STAGE_GROUP_CN}))) - topology.standalone.add_s(Entry((OUT_GROUP_DN, { - 'objectclass': "top groupOfNames inetuser".split(), - 'cn': OUT_GROUP_CN}))) - topology.standalone.add_s(Entry((INDIRECT_ACTIVE_GROUP_DN, { - 'objectclass': "top groupOfNames".split(), - 'cn': INDIRECT_ACTIVE_GROUP_CN}))) - - # add users - _add_user(topology, 'active') - _add_user(topology, 'stage') - _add_user(topology, 'out') - - # enable memberof of with scope IN except provisioning - topology.standalone.plugins.enable(name=PLUGIN_MEMBER_OF) - dn = "cn=%s,%s" % (PLUGIN_MEMBER_OF, DN_PLUGIN) - topology.standalone.modify_s(dn, [(ldap.MOD_REPLACE, 'memberOfEntryScope', SCOPE_IN_DN)]) - topology.standalone.modify_s(dn, [(ldap.MOD_REPLACE, 'memberOfEntryScopeExcludeSubtree', PROVISIONING_DN)]) - - # enable RI with scope IN except provisioning - topology.standalone.plugins.enable(name=PLUGIN_REFER_INTEGRITY) - dn = "cn=%s,%s" % (PLUGIN_REFER_INTEGRITY, DN_PLUGIN) - topology.standalone.modify_s(dn, [(ldap.MOD_REPLACE, 'nsslapd-pluginentryscope', SCOPE_IN_DN)]) - topology.standalone.modify_s(dn, [(ldap.MOD_REPLACE, 'nsslapd-plugincontainerscope', SCOPE_IN_DN)]) - topology.standalone.modify_s(dn, [(ldap.MOD_REPLACE, 'nsslapd-pluginExcludeEntryScope', PROVISIONING_DN)]) - - topology.standalone.restart(timeout=10) - - -def test_ticket47829_mod_active_user_1(topology): - _header(topology, 'MOD: add an active user to an active group') - - # add active user to active group - _check_memberof(topology, action=ldap.MOD_ADD, user_dn=ACTIVE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=True) - _find_member(topology, user_dn=ACTIVE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=True) - - # remove active user to active group - _check_memberof(topology, action=ldap.MOD_DELETE, user_dn=ACTIVE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=False) - - -def test_ticket47829_mod_active_user_2(topology): - _header(topology, 'MOD: add an Active user to a Stage group') - - # add active user to stage group - _check_memberof(topology, action=ldap.MOD_ADD, user_dn=ACTIVE_USER_DN, group_dn=STAGE_GROUP_DN, find_result=False) - _find_member(topology, user_dn=ACTIVE_USER_DN, group_dn=STAGE_GROUP_DN, find_result=True) - - # remove active user to stage group - _check_memberof(topology, action=ldap.MOD_DELETE, user_dn=ACTIVE_USER_DN, group_dn=STAGE_GROUP_DN, find_result=False) - - -def test_ticket47829_mod_active_user_3(topology): - _header(topology, 'MOD: add an Active user to a out of scope group') - - # add active user to out of scope group - _check_memberof(topology, action=ldap.MOD_ADD, user_dn=ACTIVE_USER_DN, group_dn=OUT_GROUP_DN, find_result=False) - _find_member(topology, user_dn=ACTIVE_USER_DN, group_dn=OUT_GROUP_DN, find_result=True) - - # remove active user to out of scope group - _check_memberof(topology, action=ldap.MOD_DELETE, user_dn=ACTIVE_USER_DN, group_dn=OUT_GROUP_DN, find_result=False) - - -def test_ticket47829_mod_stage_user_1(topology): - _header(topology, 'MOD: add an Stage user to a Active group') - - # add stage user to active group - _check_memberof(topology, action=ldap.MOD_ADD, user_dn=STAGE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=False) - _find_member(topology, user_dn=STAGE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=True) - - # remove stage user to active group - _check_memberof(topology, action=ldap.MOD_DELETE, user_dn=STAGE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=False) - - -def test_ticket47829_mod_stage_user_2(topology): - _header(topology, 'MOD: add an Stage user to a Stage group') - - # add stage user to stage group - _check_memberof(topology, action=ldap.MOD_ADD, user_dn=STAGE_USER_DN, group_dn=STAGE_GROUP_DN, find_result=False) - _find_member(topology, user_dn=STAGE_USER_DN, group_dn=STAGE_GROUP_DN, find_result=True) - - # remove stage user to stage group - _check_memberof(topology, action=ldap.MOD_DELETE, user_dn=STAGE_USER_DN, group_dn=STAGE_GROUP_DN, find_result=False) - - -def test_ticket47829_mod_stage_user_3(topology): - _header(topology, 'MOD: add an Stage user to a out of scope group') - - # add stage user to an out of scope group - _check_memberof(topology, action=ldap.MOD_ADD, user_dn=STAGE_USER_DN, group_dn=OUT_GROUP_DN, find_result=False) - _find_member(topology, user_dn=STAGE_USER_DN, group_dn=OUT_GROUP_DN, find_result=True) - - # remove stage user to out of scope group - _check_memberof(topology, action=ldap.MOD_DELETE, user_dn=STAGE_USER_DN, group_dn=OUT_GROUP_DN, find_result=False) - - -def test_ticket47829_mod_out_user_1(topology): - _header(topology, 'MOD: add an out of scope user to an active group') - - # add out of scope user to active group - _check_memberof(topology, action=ldap.MOD_ADD, user_dn=OUT_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=False) - _find_member(topology, user_dn=OUT_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=True) - - # remove out of scope user to active group - _check_memberof(topology, action=ldap.MOD_DELETE, user_dn=OUT_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=False) - - -def test_ticket47829_mod_out_user_2(topology): - _header(topology, 'MOD: add an out of scope user to a Stage group') - - # add out of scope user to stage group - _check_memberof(topology, action=ldap.MOD_ADD, user_dn=OUT_USER_DN, group_dn=STAGE_GROUP_DN, find_result=False) - _find_member(topology, user_dn=OUT_USER_DN, group_dn=STAGE_GROUP_DN, find_result=True) - - # remove out of scope user to stage group - _check_memberof(topology, action=ldap.MOD_DELETE, user_dn=OUT_USER_DN, group_dn=STAGE_GROUP_DN, find_result=False) - -def test_ticket47829_mod_out_user_3(topology): - _header(topology, 'MOD: add an out of scope user to an out of scope group') - - # add out of scope user to stage group - _check_memberof(topology, action=ldap.MOD_ADD, user_dn=OUT_USER_DN, group_dn=OUT_GROUP_DN, find_result=False) - _find_member(topology, user_dn=OUT_USER_DN, group_dn=OUT_GROUP_DN, find_result=True) - - # remove out of scope user to stage group - _check_memberof(topology, action=ldap.MOD_DELETE, user_dn=OUT_USER_DN, group_dn=OUT_GROUP_DN, find_result=False) - - -def test_ticket47829_mod_active_user_modrdn_active_user_1(topology): - _header(topology, 'add an Active user to a Active group. Then move Active user to Active') - - # add Active user to active group - _check_memberof(topology, action=ldap.MOD_ADD, user_dn=ACTIVE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=True) - _find_member(topology, user_dn=ACTIVE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=True) - - # move the Active entry to active, expect 'member' and 'memberof' - _modrdn_entry(topology, entry_dn=ACTIVE_USER_DN, new_rdn="cn=x%s" % ACTIVE_USER_CN, new_superior=ACTIVE_DN) - _find_memberof(topology, user_dn="cn=x%s,%s" % (ACTIVE_USER_CN, ACTIVE_DN), group_dn=ACTIVE_GROUP_DN, find_result=True) - _find_member(topology, user_dn="cn=x%s,%s" % (ACTIVE_USER_CN, ACTIVE_DN), group_dn=ACTIVE_GROUP_DN, find_result=True) - - # move the Active entry to active, expect 'member' and no 'memberof' - _modrdn_entry(topology, entry_dn="cn=x%s,%s" % (ACTIVE_USER_CN, ACTIVE_DN), new_rdn="cn=%s" % ACTIVE_USER_CN, new_superior=ACTIVE_DN) - _find_memberof(topology, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, ACTIVE_DN), group_dn=ACTIVE_GROUP_DN, find_result=True) - _find_member(topology, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, ACTIVE_DN), group_dn=ACTIVE_GROUP_DN, find_result=True) - - # remove active user to active group - _check_memberof(topology, action=ldap.MOD_DELETE, user_dn=ACTIVE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=False) - - -def test_ticket47829_mod_active_user_modrdn_stage_user_1(topology): - _header(topology, 'add an Active user to a Active group. Then move Active user to Stage') - - # add Active user to active group - _check_memberof(topology, action=ldap.MOD_ADD, user_dn=ACTIVE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=True) - _find_member(topology, user_dn=ACTIVE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=True) - - # move the Active entry to stage, expect no 'member' and 'memberof' - _modrdn_entry(topology, entry_dn=ACTIVE_USER_DN, new_rdn="cn=%s" % ACTIVE_USER_CN, new_superior=STAGE_DN) - _find_memberof(topology, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, STAGE_DN), group_dn=ACTIVE_GROUP_DN, find_result=False) - _find_member(topology, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, STAGE_DN), group_dn=ACTIVE_GROUP_DN, find_result=False) - - # move the Active entry to Stage, expect 'member' and no 'memberof' - _modrdn_entry(topology, entry_dn="cn=%s,%s" % (ACTIVE_USER_CN, STAGE_DN), new_rdn="cn=%s" % ACTIVE_USER_CN, new_superior=ACTIVE_DN) - _find_memberof(topology, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, ACTIVE_DN), group_dn=ACTIVE_GROUP_DN, find_result=False) - _find_member(topology, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, ACTIVE_DN), group_dn=ACTIVE_GROUP_DN, find_result=False) - - -def test_ticket47829_mod_active_user_modrdn_out_user_1(topology): - _header(topology, 'add an Active user to a Active group. Then move Active user to out of scope') - - # add Active user to active group - _check_memberof(topology, action=ldap.MOD_ADD, user_dn=ACTIVE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=True) - _find_member(topology, user_dn=ACTIVE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=True) - - # move the Active entry to out of scope, expect no 'member' and no 'memberof' - _modrdn_entry(topology, entry_dn=ACTIVE_USER_DN, new_rdn="cn=%s" % ACTIVE_USER_CN, new_superior=OUT_GROUP_DN) - _find_memberof(topology, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, OUT_GROUP_DN), group_dn=ACTIVE_GROUP_DN, find_result=False) - _find_member(topology, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, OUT_GROUP_DN), group_dn=ACTIVE_GROUP_DN, find_result=False) - - # move the Active entry to out of scope, expect no 'member' and no 'memberof' - _modrdn_entry(topology, entry_dn="cn=%s,%s" % (ACTIVE_USER_CN, OUT_GROUP_DN), new_rdn="cn=%s" % ACTIVE_USER_CN, new_superior=ACTIVE_DN) - _find_memberof(topology, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, ACTIVE_DN), group_dn=ACTIVE_GROUP_DN, find_result=False) - _find_member(topology, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, ACTIVE_DN), group_dn=ACTIVE_GROUP_DN, find_result=False) - - -def test_ticket47829_mod_modrdn_1(topology): - _header(topology, 'add an Stage user to a Active group. Then move Stage user to Active') - - # add Stage user to active group - _check_memberof(topology, action=ldap.MOD_ADD, user_dn=STAGE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=False) - _find_member(topology, user_dn=STAGE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=True) - - # move the Stage entry to active, expect 'member' and 'memberof' - _modrdn_entry(topology, entry_dn=STAGE_USER_DN, new_rdn="cn=%s" % STAGE_USER_CN, new_superior=ACTIVE_DN) - _find_memberof(topology, user_dn="cn=%s,%s" % (STAGE_USER_CN, ACTIVE_DN), group_dn=ACTIVE_GROUP_DN, find_result=True) - _find_member(topology, user_dn="cn=%s,%s" % (STAGE_USER_CN, ACTIVE_DN), group_dn=ACTIVE_GROUP_DN, find_result=True) - - # move the Active entry to Stage, expect no 'member' and no 'memberof' - _modrdn_entry(topology, entry_dn="cn=%s,%s" % (STAGE_USER_CN, ACTIVE_DN), new_rdn="cn=%s" % STAGE_USER_CN, new_superior=STAGE_DN) - _find_memberof(topology, user_dn="cn=%s,%s" % (STAGE_USER_CN, STAGE_DN), group_dn=ACTIVE_GROUP_DN, find_result=False) - _find_member(topology, user_dn="cn=%s,%s" % (STAGE_USER_CN, ACTIVE_DN), group_dn=ACTIVE_GROUP_DN, find_result=False) - - -def test_ticket47829_mod_stage_user_modrdn_active_user_1(topology): - _header(topology, 'add an Stage user to a Active group. Then move Stage user to Active') - - stage_user_dn = STAGE_USER_DN - stage_user_rdn = "cn=%s" % STAGE_USER_CN - active_user_dn = "cn=%s,%s" % (STAGE_USER_CN, ACTIVE_DN) - - # add Stage user to active group - _check_memberof(topology, action=ldap.MOD_ADD, user_dn=stage_user_dn, group_dn=ACTIVE_GROUP_DN, find_result=False) - _find_member(topology, user_dn=stage_user_dn, group_dn=ACTIVE_GROUP_DN, find_result=True) - - # move the Stage entry to Actve, expect 'member' and 'memberof' - _modrdn_entry(topology, entry_dn=stage_user_dn, new_rdn=stage_user_rdn, new_superior=ACTIVE_DN) - _find_memberof(topology, user_dn=active_user_dn, group_dn=ACTIVE_GROUP_DN, find_result=True) - _find_member(topology, user_dn=active_user_dn, group_dn=ACTIVE_GROUP_DN, find_result=True) - - # move the Active entry to Stage, expect no 'member' and no 'memberof' - _modrdn_entry(topology, entry_dn=active_user_dn, new_rdn=stage_user_rdn, new_superior=STAGE_DN) - _find_memberof(topology, user_dn=stage_user_dn, group_dn=ACTIVE_GROUP_DN, find_result=False) - _find_member(topology, user_dn=stage_user_dn, group_dn=ACTIVE_GROUP_DN, find_result=False) - - -def test_ticket47829_mod_stage_user_modrdn_stage_user_1(topology): - _header(topology, 'add an Stage user to a Active group. Then move Stage user to Stage') - - _header(topology, 'Return because it requires a fix for 47833') - return - - old_stage_user_dn = STAGE_USER_DN - old_stage_user_rdn = "cn=%s" % STAGE_USER_CN - new_stage_user_rdn = "cn=x%s" % STAGE_USER_CN - new_stage_user_dn = "%s,%s" % (new_stage_user_rdn, STAGE_DN) - - # add Stage user to active group - _check_memberof(topology, action=ldap.MOD_ADD, user_dn=old_stage_user_dn, group_dn=ACTIVE_GROUP_DN, find_result=False) - _find_member(topology, user_dn=old_stage_user_dn, group_dn=ACTIVE_GROUP_DN, find_result=True) - - # move the Stage entry to Stage, expect no 'member' and 'memberof' - _modrdn_entry(topology, entry_dn=old_stage_user_dn, new_rdn=new_stage_user_rdn, new_superior=STAGE_DN) - _find_memberof(topology, user_dn=new_stage_user_dn, group_dn=ACTIVE_GROUP_DN, find_result=False) - _find_member(topology, user_dn=new_stage_user_dn, group_dn=ACTIVE_GROUP_DN, find_result=False) - - # move the Stage entry to Stage, expect no 'member' and no 'memberof' - _modrdn_entry(topology, entry_dn=new_stage_user_dn, new_rdn=old_stage_user_rdn, new_superior=STAGE_DN) - _find_memberof(topology, user_dn=old_stage_user_dn, group_dn=ACTIVE_GROUP_DN, find_result=False) - _find_member(topology, user_dn=old_stage_user_dn, group_dn=ACTIVE_GROUP_DN, find_result=False) - - -def test_ticket47829_indirect_active_group_1(topology): - _header(topology, 'add an Active group (G1) to an active group (G0). Then add active user to G1') - - topology.standalone.modify_s(INDIRECT_ACTIVE_GROUP_DN, [(ldap.MOD_ADD, 'member', ACTIVE_GROUP_DN)]) - - # add an active user to G1. Checks that user is memberof G1 - _check_memberof(topology, action=ldap.MOD_ADD, user_dn=ACTIVE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=True) - _find_memberof(topology, user_dn=ACTIVE_USER_DN, group_dn=INDIRECT_ACTIVE_GROUP_DN, find_result=True) - - # remove G1 from G0 - topology.standalone.modify_s(INDIRECT_ACTIVE_GROUP_DN, [(ldap.MOD_DELETE, 'member', ACTIVE_GROUP_DN)]) - _find_memberof(topology, user_dn=ACTIVE_USER_DN, group_dn=INDIRECT_ACTIVE_GROUP_DN, find_result=False) - _find_memberof(topology, user_dn=ACTIVE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=True) - - # remove active user from G1 - _check_memberof(topology, action=ldap.MOD_DELETE, user_dn=ACTIVE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=False) - - -def test_ticket47829_indirect_active_group_2(topology): - _header(topology, 'add an Active group (G1) to an active group (G0). Then add active user to G1. Then move active user to stage') - - topology.standalone.modify_s(INDIRECT_ACTIVE_GROUP_DN, [(ldap.MOD_ADD, 'member', ACTIVE_GROUP_DN)]) - - # add an active user to G1. Checks that user is memberof G1 - _check_memberof(topology, action=ldap.MOD_ADD, user_dn=ACTIVE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=True) - _find_memberof(topology, user_dn=ACTIVE_USER_DN, group_dn=INDIRECT_ACTIVE_GROUP_DN, find_result=True) - - # remove G1 from G0 - topology.standalone.modify_s(INDIRECT_ACTIVE_GROUP_DN, [(ldap.MOD_DELETE, 'member', ACTIVE_GROUP_DN)]) - _find_memberof(topology, user_dn=ACTIVE_USER_DN, group_dn=INDIRECT_ACTIVE_GROUP_DN, find_result=False) - _find_memberof(topology, user_dn=ACTIVE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=True) - - # move active user to stage - _modrdn_entry(topology, entry_dn=ACTIVE_USER_DN, new_rdn="cn=%s" % ACTIVE_USER_CN, new_superior=STAGE_DN) - - # stage user is no long member of active group and indirect active group - _find_memberof(topology, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, STAGE_DN), group_dn=ACTIVE_GROUP_DN, find_result=False) - _find_memberof(topology, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, STAGE_DN), group_dn=INDIRECT_ACTIVE_GROUP_DN, find_result=False) - - # active group and indirect active group do no longer have stage user as member - _find_member(topology, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, STAGE_DN), group_dn=ACTIVE_GROUP_DN, find_result=False) - _find_member(topology, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, STAGE_DN), group_dn=INDIRECT_ACTIVE_GROUP_DN, find_result=False) - - # return back the entry to active. It remains not member - _modrdn_entry(topology, entry_dn="cn=%s,%s" % (ACTIVE_USER_CN, STAGE_DN), new_rdn="cn=%s" % ACTIVE_USER_CN, new_superior=ACTIVE_DN) - _find_member(topology, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, ACTIVE_DN), group_dn=ACTIVE_GROUP_DN, find_result=False) - _find_member(topology, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, ACTIVE_DN), group_dn=INDIRECT_ACTIVE_GROUP_DN, find_result=False) - - -def test_ticket47829_indirect_active_group_3(topology): - _header(topology, 'add an Active group (G1) to an active group (G0). Then add active user to G1. Then move active user to out of the scope') - - topology.standalone.modify_s(INDIRECT_ACTIVE_GROUP_DN, [(ldap.MOD_ADD, 'member', ACTIVE_GROUP_DN)]) - - # add an active user to G1. Checks that user is memberof G1 - _check_memberof(topology, action=ldap.MOD_ADD, user_dn=ACTIVE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=True) - _find_memberof(topology, user_dn=ACTIVE_USER_DN, group_dn=INDIRECT_ACTIVE_GROUP_DN, find_result=True) - - # remove G1 from G0 - topology.standalone.modify_s(INDIRECT_ACTIVE_GROUP_DN, [(ldap.MOD_DELETE, 'member', ACTIVE_GROUP_DN)]) - _find_memberof(topology, user_dn=ACTIVE_USER_DN, group_dn=INDIRECT_ACTIVE_GROUP_DN, find_result=False) - _find_memberof(topology, user_dn=ACTIVE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=True) - - # move active user to out of the scope - _modrdn_entry(topology, entry_dn=ACTIVE_USER_DN, new_rdn="cn=%s" % ACTIVE_USER_CN, new_superior=SCOPE_OUT_DN) - - # stage user is no long member of active group and indirect active group - _find_memberof(topology, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, SCOPE_OUT_DN), group_dn=ACTIVE_GROUP_DN, find_result=False) - _find_memberof(topology, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, SCOPE_OUT_DN), group_dn=INDIRECT_ACTIVE_GROUP_DN, find_result=False) - - # active group and indirect active group do no longer have stage user as member - _find_member(topology, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, SCOPE_OUT_DN), group_dn=ACTIVE_GROUP_DN, find_result=False) - _find_member(topology, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, SCOPE_OUT_DN), group_dn=INDIRECT_ACTIVE_GROUP_DN, find_result=False) - - # return back the entry to active. It remains not member - _modrdn_entry(topology, entry_dn="cn=%s,%s" % (ACTIVE_USER_CN, SCOPE_OUT_DN), new_rdn="cn=%s" % ACTIVE_USER_CN, new_superior=ACTIVE_DN) - _find_member(topology, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, ACTIVE_DN), group_dn=ACTIVE_GROUP_DN, find_result=False) - _find_member(topology, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, ACTIVE_DN), group_dn=INDIRECT_ACTIVE_GROUP_DN, find_result=False) - - -def test_ticket47829_indirect_active_group_4(topology): - _header(topology, 'add an Active group (G1) to an active group (G0). Then add stage user to G1. Then move user to active. Then move it back') - - topology.standalone.modify_s(INDIRECT_ACTIVE_GROUP_DN, [(ldap.MOD_ADD, 'member', ACTIVE_GROUP_DN)]) - - # add stage user to active group - _check_memberof(topology, action=ldap.MOD_ADD, user_dn=STAGE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=False) - _find_member(topology, user_dn=STAGE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=True) - _find_member(topology, user_dn=STAGE_USER_DN, group_dn=INDIRECT_ACTIVE_GROUP_DN, find_result=False) - _find_memberof(topology, user_dn=STAGE_USER_DN, group_dn=INDIRECT_ACTIVE_GROUP_DN, find_result=False) - _find_memberof(topology, user_dn=STAGE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=False) - - # move stage user to active - _modrdn_entry(topology, entry_dn=STAGE_USER_DN, new_rdn="cn=%s" % STAGE_USER_CN, new_superior=ACTIVE_DN) - renamed_stage_dn = "cn=%s,%s" % (STAGE_USER_CN, ACTIVE_DN) - _find_member(topology, user_dn=renamed_stage_dn, group_dn=ACTIVE_GROUP_DN, find_result=True) - _find_member(topology, user_dn=renamed_stage_dn, group_dn=INDIRECT_ACTIVE_GROUP_DN, find_result=False) - _find_memberof(topology, user_dn=renamed_stage_dn, group_dn=INDIRECT_ACTIVE_GROUP_DN, find_result=True) - _find_memberof(topology, user_dn=renamed_stage_dn, group_dn=ACTIVE_GROUP_DN, find_result=True) - - # move back active to stage - _modrdn_entry(topology, entry_dn=renamed_stage_dn, new_rdn="cn=%s" % STAGE_USER_CN, new_superior=STAGE_DN) - _find_member(topology, user_dn=STAGE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=False) - _find_member(topology, user_dn=STAGE_USER_DN, group_dn=INDIRECT_ACTIVE_GROUP_DN, find_result=False) - _find_memberof(topology, user_dn=STAGE_USER_DN, group_dn=INDIRECT_ACTIVE_GROUP_DN, find_result=False) - _find_memberof(topology, user_dn=STAGE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=False) - - -def test_ticket47829_final(topology): - topology.standalone.delete() - log.info('Testcase PASSED') - - -def run_isolated(): - ''' - run_isolated is used to run these test cases independently of a test scheduler (xunit, py.test..) - To run isolated without py.test, you need to - - edit this file and comment '@pytest.fixture' line before 'topology' function. - - set the installation prefix - - run this program - ''' - global installation_prefix - installation_prefix = None - - topo = topology(True) - test_ticket47829_init(topo) - - test_ticket47829_mod_active_user_1(topo) - test_ticket47829_mod_active_user_2(topo) - test_ticket47829_mod_active_user_3(topo) - test_ticket47829_mod_stage_user_1(topo) - test_ticket47829_mod_stage_user_2(topo) - test_ticket47829_mod_stage_user_3(topo) - test_ticket47829_mod_out_user_1(topo) - test_ticket47829_mod_out_user_2(topo) - test_ticket47829_mod_out_user_3(topo) - - test_ticket47829_mod_active_user_modrdn_active_user_1(topo) - test_ticket47829_mod_active_user_modrdn_stage_user_1(topo) - test_ticket47829_mod_active_user_modrdn_out_user_1(topo) - - test_ticket47829_mod_stage_user_modrdn_active_user_1(topo) - test_ticket47829_mod_stage_user_modrdn_stage_user_1(topo) - - test_ticket47829_indirect_active_group_1(topo) - test_ticket47829_indirect_active_group_2(topo) - test_ticket47829_indirect_active_group_3(topo) - test_ticket47829_indirect_active_group_4(topo) - - test_ticket47829_final(topo) - - -if __name__ == '__main__': - run_isolated() - diff --git a/dirsrvtests/tickets/ticket47833_test.py b/dirsrvtests/tickets/ticket47833_test.py deleted file mode 100644 index f1fb634..0000000 --- a/dirsrvtests/tickets/ticket47833_test.py +++ /dev/null @@ -1,274 +0,0 @@ -# --- BEGIN COPYRIGHT BLOCK --- -# Copyright (C) 2015 Red Hat, Inc. -# All rights reserved. -# -# License: GPL (version 3 or any later version). -# See LICENSE for details. -# --- END COPYRIGHT BLOCK --- -# -import os -import sys -import time -import ldap -import logging -import pytest -from lib389 import DirSrv, Entry, tools, tasks -from lib389.tools import DirSrvTools -from lib389._constants import * -from lib389.properties import * -from lib389.tasks import * -from lib389.utils import * - -SCOPE_IN_CN = 'in' -SCOPE_OUT_CN = 'out' -SCOPE_IN_DN = 'cn=%s,%s' % (SCOPE_IN_CN, SUFFIX) -SCOPE_OUT_DN = 'cn=%s,%s' % (SCOPE_OUT_CN, SUFFIX) - -PROVISIONING_CN = "provisioning" -PROVISIONING_DN = "cn=%s,%s" % (PROVISIONING_CN, SCOPE_IN_DN) - -ACTIVE_CN = "accounts" -STAGE_CN = "staged users" -DELETE_CN = "deleted users" -ACTIVE_DN = "cn=%s,%s" % (ACTIVE_CN, SCOPE_IN_DN) -STAGE_DN = "cn=%s,%s" % (STAGE_CN, PROVISIONING_DN) -DELETE_DN = "cn=%s,%s" % (DELETE_CN, PROVISIONING_DN) - -STAGE_USER_CN = "stage guy" -STAGE_USER_DN = "cn=%s,%s" % (STAGE_USER_CN, STAGE_DN) - -ACTIVE_USER_CN = "active guy" -ACTIVE_USER_DN = "cn=%s,%s" % (ACTIVE_USER_CN, ACTIVE_DN) - -OUT_USER_CN = "out guy" -OUT_USER_DN = "cn=%s,%s" % (OUT_USER_CN, SCOPE_OUT_DN) - -STAGE_GROUP_CN = "stage group" -STAGE_GROUP_DN = "cn=%s,%s" % (STAGE_GROUP_CN, STAGE_DN) - -ACTIVE_GROUP_CN = "active group" -ACTIVE_GROUP_DN = "cn=%s,%s" % (ACTIVE_GROUP_CN, ACTIVE_DN) - -OUT_GROUP_CN = "out group" -OUT_GROUP_DN = "cn=%s,%s" % (OUT_GROUP_CN, SCOPE_OUT_DN) - - -logging.getLogger(__name__).setLevel(logging.DEBUG) -log = logging.getLogger(__name__) - -installation1_prefix = None - - -class TopologyStandalone(object): - def __init__(self, standalone): - standalone.open() - self.standalone = standalone - - -@pytest.fixture(scope="module") -def topology(request): - global installation1_prefix - if installation1_prefix: - args_instance[SER_DEPLOYED_DIR] = installation1_prefix - - # Creating standalone instance ... - standalone = DirSrv(verbose=False) - if installation1_prefix: - args_instance[SER_DEPLOYED_DIR] = installation1_prefix - args_instance[SER_HOST] = HOST_STANDALONE - args_instance[SER_PORT] = PORT_STANDALONE - args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE - args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX - args_standalone = args_instance.copy() - standalone.allocate(args_standalone) - instance_standalone = standalone.exists() - if instance_standalone: - standalone.delete() - standalone.create() - standalone.open() - - # Clear out the tmp dir - standalone.clearTmpDir(__file__) - - return TopologyStandalone(standalone) - - -def _header(topology, label): - topology.standalone.log.info("\n\n###############################################") - topology.standalone.log.info("#######") - topology.standalone.log.info("####### %s" % label) - topology.standalone.log.info("#######") - topology.standalone.log.info("###############################################") - -def _add_user(topology, type='active'): - if type == 'active': - topology.standalone.add_s(Entry((ACTIVE_USER_DN, { - 'objectclass': "top person inetuser".split(), - 'sn': ACTIVE_USER_CN, - 'cn': ACTIVE_USER_CN}))) - elif type == 'stage': - topology.standalone.add_s(Entry((STAGE_USER_DN, { - 'objectclass': "top person inetuser".split(), - 'sn': STAGE_USER_CN, - 'cn': STAGE_USER_CN}))) - else: - topology.standalone.add_s(Entry((OUT_USER_DN, { - 'objectclass': "top person inetuser".split(), - 'sn': OUT_USER_CN, - 'cn': OUT_USER_CN}))) - -def _find_memberof(topology, user_dn=None, group_dn=None, find_result=True): - assert(topology) - assert(user_dn) - assert(group_dn) - ent = topology.standalone.getEntry(user_dn, ldap.SCOPE_BASE, "(objectclass=*)", ['memberof']) - found = False - if ent.hasAttr('memberof'): - - for val in ent.getValues('memberof'): - topology.standalone.log.info("!!!!!!! %s: memberof->%s" % (user_dn, val)) - if val == group_dn: - found = True - break - - if find_result: - assert(found) - else: - assert(not found) - -def _find_member(topology, user_dn=None, group_dn=None, find_result=True): - assert(topology) - assert(user_dn) - assert(group_dn) - ent = topology.standalone.getEntry(group_dn, ldap.SCOPE_BASE, "(objectclass=*)", ['member']) - found = False - if ent.hasAttr('member'): - - for val in ent.getValues('member'): - topology.standalone.log.info("!!!!!!! %s: member ->%s" % (group_dn, val)) - if val == user_dn: - found = True - break - - if find_result: - assert(found) - else: - assert(not found) - -def _modrdn_entry(topology=None, entry_dn=None, new_rdn=None, del_old=0, new_superior=None): - assert topology != None - assert entry_dn != None - assert new_rdn != None - - - topology.standalone.log.info("\n\n######################### MODRDN %s ######################\n" % new_rdn) - if new_superior: - topology.standalone.rename_s(entry_dn, new_rdn, newsuperior=new_superior, delold=del_old) - else: - topology.standalone.rename_s(entry_dn, new_rdn, delold=del_old) - -def _check_memberof(topology=None, action=None, user_dn=None, group_dn=None, find_result=None): - assert(topology) - assert(user_dn) - assert(group_dn) - if action == ldap.MOD_ADD: - txt = 'add' - elif action == ldap.MOD_DELETE: - txt = 'delete' - else: - txt = 'replace' - topology.standalone.log.info('\n%s entry %s' % (txt, user_dn)) - topology.standalone.log.info('to group %s' % group_dn) - - topology.standalone.modify_s(group_dn, [(action, 'member', user_dn)]) - time.sleep(1) - _find_memberof(topology, user_dn=user_dn, group_dn=group_dn, find_result=find_result) - - - - -def test_ticket47829_init(topology): - topology.standalone.add_s(Entry((SCOPE_IN_DN, { - 'objectclass': "top nscontainer".split(), - 'cn': SCOPE_IN_DN}))) - topology.standalone.add_s(Entry((SCOPE_OUT_DN, { - 'objectclass': "top nscontainer".split(), - 'cn': SCOPE_OUT_DN}))) - topology.standalone.add_s(Entry((PROVISIONING_DN, { - 'objectclass': "top nscontainer".split(), - 'cn': PROVISIONING_CN}))) - topology.standalone.add_s(Entry((ACTIVE_DN, { - 'objectclass': "top nscontainer".split(), - 'cn': ACTIVE_CN}))) - topology.standalone.add_s(Entry((STAGE_DN, { - 'objectclass': "top nscontainer".split(), - 'cn': STAGE_DN}))) - topology.standalone.add_s(Entry((DELETE_DN, { - 'objectclass': "top nscontainer".split(), - 'cn': DELETE_CN}))) - - # add groups - topology.standalone.add_s(Entry((ACTIVE_GROUP_DN, { - 'objectclass': "top groupOfNames".split(), - 'cn': ACTIVE_GROUP_CN}))) - topology.standalone.add_s(Entry((STAGE_GROUP_DN, { - 'objectclass': "top groupOfNames".split(), - 'cn': STAGE_GROUP_CN}))) - topology.standalone.add_s(Entry((OUT_GROUP_DN, { - 'objectclass': "top groupOfNames".split(), - 'cn': OUT_GROUP_CN}))) - - # add users - _add_user(topology, 'active') - _add_user(topology, 'stage') - _add_user(topology, 'out') - - - - # enable memberof of with scope account - topology.standalone.plugins.enable(name=PLUGIN_MEMBER_OF) - dn = "cn=%s,%s" % (PLUGIN_MEMBER_OF, DN_PLUGIN) - topology.standalone.modify_s(dn, [(ldap.MOD_REPLACE, 'memberOfEntryScope', ACTIVE_DN)]) - - - - topology.standalone.restart(timeout=10) - - - - -def test_ticket47829_mod_stage_user_modrdn_stage_user_1(topology): - _header(topology, 'add an Stage user to a Active group. Then move Stage user to Stage') - - old_stage_user_dn = STAGE_USER_DN - old_stage_user_rdn = "cn=%s" % STAGE_USER_CN - new_stage_user_rdn = "cn=x%s" % STAGE_USER_CN - new_stage_user_dn = "%s,%s" % (new_stage_user_rdn, STAGE_DN) - - # add Stage user to active group - _check_memberof(topology, action=ldap.MOD_ADD, user_dn=old_stage_user_dn, group_dn=ACTIVE_GROUP_DN, find_result=False) - _find_member (topology, user_dn=old_stage_user_dn, group_dn=ACTIVE_GROUP_DN, find_result=True) - - # move the Stage entry to Stage, expect no 'member' and 'memberof' - _modrdn_entry (topology, entry_dn=old_stage_user_dn, new_rdn=new_stage_user_rdn, new_superior=STAGE_DN) - _find_memberof(topology, user_dn=new_stage_user_dn, group_dn=ACTIVE_GROUP_DN, find_result=False) - _find_member (topology, user_dn=new_stage_user_dn, group_dn=ACTIVE_GROUP_DN, find_result=False) - - -def test_ticket47833_final(topology): - topology.standalone.delete() - log.info('Testcase PASSED') - - -def run_isolated(): - global installation1_prefix - installation1_prefix = None - - topo = topology(True) - test_ticket47829_init(topo) - test_ticket47829_mod_stage_user_modrdn_stage_user_1(topo) - test_ticket47833_final(topo) - -if __name__ == '__main__': - run_isolated() - diff --git a/dirsrvtests/tickets/ticket47838_test.py b/dirsrvtests/tickets/ticket47838_test.py deleted file mode 100644 index 42d25fd..0000000 --- a/dirsrvtests/tickets/ticket47838_test.py +++ /dev/null @@ -1,841 +0,0 @@ -# --- BEGIN COPYRIGHT BLOCK --- -# Copyright (C) 2015 Red Hat, Inc. -# All rights reserved. -# -# License: GPL (version 3 or any later version). -# See LICENSE for details. -# --- END COPYRIGHT BLOCK --- -# -import os -import sys -import time -import ldap -import logging -import pytest -import shutil -from lib389 import DirSrv, Entry, tools -from lib389 import DirSrvTools -from lib389.tools import DirSrvTools -from lib389._constants import * -from lib389.properties import * - -log = logging.getLogger(__name__) - -installation_prefix = None - -CONFIG_DN = 'cn=config' -ENCRYPTION_DN = 'cn=encryption,%s' % CONFIG_DN -RSA = 'RSA' -RSA_DN = 'cn=%s,%s' % (RSA, ENCRYPTION_DN) -LDAPSPORT = '10636' -SERVERCERT = 'Server-Cert' -plus_all_ecount = 0 -plus_all_dcount = 0 -plus_all_ecount_noweak = 0 -plus_all_dcount_noweak = 0 - - -class TopologyStandalone(object): - def __init__(self, standalone): - standalone.open() - self.standalone = standalone - - -@pytest.fixture(scope="module") -def topology(request): - ''' - This fixture is used to standalone topology for the 'module'. - ''' - global installation_prefix - - if installation_prefix: - args_instance[SER_DEPLOYED_DIR] = installation_prefix - - standalone = DirSrv(verbose=False) - - # Args for the standalone instance - args_instance[SER_HOST] = HOST_STANDALONE - args_instance[SER_PORT] = PORT_STANDALONE - args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE - args_standalone = args_instance.copy() - standalone.allocate(args_standalone) - - # Get the status of the instance and restart it if it exists - instance_standalone = standalone.exists() - - # Remove the instance - if instance_standalone: - standalone.delete() - - # Create the instance - standalone.create() - - # Used to retrieve configuration information (dbdir, confdir...) - standalone.open() - - # clear the tmp directory - standalone.clearTmpDir(__file__) - - # Here we have standalone instance up and running - return TopologyStandalone(standalone) - - -def _header(topology, label): - topology.standalone.log.info("\n\n###############################################") - topology.standalone.log.info("#######") - topology.standalone.log.info("####### %s" % label) - topology.standalone.log.info("#######") - topology.standalone.log.info("###############################################") - - -def test_ticket47838_init(topology): - """ - Generate self signed cert and import it to the DS cert db. - Enable SSL - """ - _header(topology, 'Testing Ticket 47838 - harden the list of ciphers available by default') - - conf_dir = topology.standalone.confdir - - log.info("\n######################### Checking existing certs ######################\n") - os.system('certutil -L -d %s -n "CA certificate"' % conf_dir) - os.system('certutil -L -d %s -n "%s"' % (conf_dir, SERVERCERT)) - - log.info("\n######################### Create a password file ######################\n") - pwdfile = '%s/pwdfile.txt' % (conf_dir) - opasswd = os.popen("(ps -ef ; w ) | sha1sum | awk '{print $1}'", "r") - passwd = opasswd.readline() - pwdfd = open(pwdfile, "w") - pwdfd.write(passwd) - pwdfd.close() - - log.info("\n######################### Create a noise file ######################\n") - noisefile = '%s/noise.txt' % (conf_dir) - noise = os.popen("(w ; ps -ef ; date ) | sha1sum | awk '{print $1}'", "r") - noisewdfd = open(noisefile, "w") - noisewdfd.write(noise.readline()) - noisewdfd.close() - - log.info("\n######################### Create key3.db and cert8.db database ######################\n") - os.system("ls %s" % pwdfile) - os.system("cat %s" % pwdfile) - os.system('certutil -N -d %s -f %s' % (conf_dir, pwdfile)) - - log.info("\n######################### Creating encryption key for CA ######################\n") - os.system('certutil -G -d %s -z %s -f %s' % (conf_dir, noisefile, pwdfile)) - - log.info("\n######################### Creating self-signed CA certificate ######################\n") - os.system('( echo y ; echo ; echo y ) | certutil -S -n "CA certificate" -s "cn=CAcert" -x -t "CT,," -m 1000 -v 120 -d %s -z %s -f %s -2' % (conf_dir, noisefile, pwdfile)) - - log.info("\n######################### Exporting the CA certificate to cacert.asc ######################\n") - cafile = '%s/cacert.asc' % conf_dir - catxt = os.popen('certutil -L -d %s -n "CA certificate" -a' % conf_dir) - cafd = open(cafile, "w") - while True: - line = catxt.readline() - if (line == ''): - break - cafd.write(line) - cafd.close() - - log.info("\n######################### Generate the server certificate ######################\n") - ohostname = os.popen('hostname --fqdn', "r") - myhostname = ohostname.readline() - os.system('certutil -S -n "%s" -s "cn=%s,ou=389 Directory Server" -c "CA certificate" -t "u,u,u" -m 1001 -v 120 -d %s -z %s -f %s' % (SERVERCERT, myhostname.rstrip(), conf_dir, noisefile, pwdfile)) - - log.info("\n######################### create the pin file ######################\n") - pinfile = '%s/pin.txt' % (conf_dir) - pintxt = 'Internal (Software) Token:%s' % passwd - pinfd = open(pinfile, "w") - pinfd.write(pintxt) - pinfd.close() - - log.info("\n######################### enable SSL in the directory server with all ciphers ######################\n") - topology.standalone.simple_bind_s(DN_DM, PASSWORD) - topology.standalone.modify_s(ENCRYPTION_DN, [(ldap.MOD_REPLACE, 'nsSSL3', 'off'), - (ldap.MOD_REPLACE, 'nsTLS1', 'on'), - (ldap.MOD_REPLACE, 'nsSSLClientAuth', 'allowed'), - (ldap.MOD_REPLACE, 'allowWeakCipher', 'on'), - (ldap.MOD_REPLACE, 'nsSSL3Ciphers', '+all')]) - - topology.standalone.modify_s(CONFIG_DN, [(ldap.MOD_REPLACE, 'nsslapd-security', 'on'), - (ldap.MOD_REPLACE, 'nsslapd-ssl-check-hostname', 'off'), - (ldap.MOD_REPLACE, 'nsslapd-secureport', LDAPSPORT)]) - - topology.standalone.add_s(Entry((RSA_DN, {'objectclass': "top nsEncryptionModule".split(), - 'cn': RSA, - 'nsSSLPersonalitySSL': SERVERCERT, - 'nsSSLToken': 'internal (software)', - 'nsSSLActivation': 'on'}))) - - -def comp_nsSSLEnableCipherCount(topology, ecount): - """ - Check nsSSLEnabledCipher count with ecount - """ - log.info("Checking nsSSLEnabledCiphers...") - msgid = topology.standalone.search_ext(ENCRYPTION_DN, ldap.SCOPE_BASE, 'cn=*', ['nsSSLEnabledCiphers']) - enabledciphercnt = 0 - rtype, rdata, rmsgid = topology.standalone.result2(msgid) - topology.standalone.log.info("%d results" % len(rdata)) - - topology.standalone.log.info("Results:") - for dn, attrs in rdata: - topology.standalone.log.info("dn: %s" % dn) - if 'nsSSLEnabledCiphers' in attrs: - enabledciphercnt = len(attrs['nsSSLEnabledCiphers']) - topology.standalone.log.info("enabledCipherCount: %d" % enabledciphercnt) - assert ecount == enabledciphercnt - - -def test_ticket47838_run_0(topology): - """ - Check nsSSL3Ciphers: +all - All ciphers are enabled except null. - Note: allowWeakCipher: on - """ - _header(topology, 'Test Case 1 - Check the ciphers availability for "+all"; allowWeakCipher: on') - - topology.standalone.simple_bind_s(DN_DM, PASSWORD) - topology.standalone.modify_s(CONFIG_DN, [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', '64')]) - - log.info("\n######################### Restarting the server ######################\n") - topology.standalone.restart(timeout=120) - - enabled = os.popen('egrep "SSL alert:" %s | egrep \": enabled\" | wc -l' % topology.standalone.errlog) - disabled = os.popen('egrep "SSL alert:" %s | egrep \": disabled\" | wc -l' % topology.standalone.errlog) - ecount = int(enabled.readline().rstrip()) - dcount = int(disabled.readline().rstrip()) - - log.info("Enabled ciphers: %d" % ecount) - log.info("Disabled ciphers: %d" % dcount) - assert ecount >= 60 - assert dcount <= 7 - global plus_all_ecount - global plus_all_dcount - plus_all_ecount = ecount - plus_all_dcount = dcount - weak = os.popen('egrep "SSL alert:" %s | egrep "WEAK CIPHER" | wc -l' % topology.standalone.errlog) - wcount = int(weak.readline().rstrip()) - log.info("Weak ciphers: %d" % wcount) - assert wcount <= 29 - - comp_nsSSLEnableCipherCount(topology, ecount) - - -def test_ticket47838_run_1(topology): - """ - Check nsSSL3Ciphers: +all - All ciphers are enabled except null. - Note: default allowWeakCipher (i.e., off) for +all - """ - _header(topology, 'Test Case 2 - Check the ciphers availability for "+all" with default allowWeakCiphers') - - topology.standalone.simple_bind_s(DN_DM, PASSWORD) - topology.standalone.modify_s(CONFIG_DN, [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', '64')]) - # Make sure allowWeakCipher is not set. - topology.standalone.modify_s(ENCRYPTION_DN, [(ldap.MOD_DELETE, 'allowWeakCipher', None)]) - - log.info("\n######################### Restarting the server ######################\n") - log.info("\n######################### Restarting the server ######################\n") - topology.standalone.stop(timeout=10) - os.system('mv %s %s.47838_0' % (topology.standalone.errlog, topology.standalone.errlog)) - os.system('touch %s' % (topology.standalone.errlog)) - topology.standalone.start(timeout=120) - - enabled = os.popen('egrep "SSL alert:" %s | egrep \": enabled\" | wc -l' % topology.standalone.errlog) - disabled = os.popen('egrep "SSL alert:" %s | egrep \": disabled\" | wc -l' % topology.standalone.errlog) - ecount = int(enabled.readline().rstrip()) - dcount = int(disabled.readline().rstrip()) - - global plus_all_ecount_noweak - global plus_all_dcount_noweak - plus_all_ecount_noweak = ecount - plus_all_dcount_noweak = dcount - - log.info("Enabled ciphers: %d" % ecount) - log.info("Disabled ciphers: %d" % dcount) - assert ecount >= 31 - assert dcount <= 36 - weak = os.popen('egrep "SSL alert:" %s | egrep "WEAK CIPHER" | wc -l' % topology.standalone.errlog) - wcount = int(weak.readline().rstrip()) - log.info("Weak ciphers: %d" % wcount) - assert wcount <= 29 - - comp_nsSSLEnableCipherCount(topology, ecount) - - -def test_ticket47838_run_2(topology): - """ - Check nsSSL3Ciphers: +rsa_aes_128_sha,+rsa_aes_256_sha - rsa_aes_128_sha, tls_rsa_aes_128_sha, rsa_aes_256_sha, tls_rsa_aes_256_sha are enabled. - default allowWeakCipher - """ - _header(topology, 'Test Case 3 - Check the ciphers availability for "+rsa_aes_128_sha,+rsa_aes_256_sha" with default allowWeakCipher') - - topology.standalone.simple_bind_s(DN_DM, PASSWORD) - topology.standalone.modify_s(ENCRYPTION_DN, [(ldap.MOD_REPLACE, 'nsSSL3Ciphers', '+rsa_aes_128_sha,+rsa_aes_256_sha')]) - - log.info("\n######################### Restarting the server ######################\n") - topology.standalone.stop(timeout=10) - os.system('mv %s %s.47838_1' % (topology.standalone.errlog, topology.standalone.errlog)) - os.system('touch %s' % (topology.standalone.errlog)) - topology.standalone.start(timeout=120) - - enabled = os.popen('egrep "SSL alert:" %s | egrep \": enabled\" | wc -l' % topology.standalone.errlog) - disabled = os.popen('egrep "SSL alert:" %s | egrep \": disabled\" | wc -l' % topology.standalone.errlog) - ecount = int(enabled.readline().rstrip()) - dcount = int(disabled.readline().rstrip()) - - log.info("Enabled ciphers: %d" % ecount) - log.info("Disabled ciphers: %d" % dcount) - global plus_all_ecount - global plus_all_dcount - assert ecount == 2 - assert dcount == (plus_all_ecount + plus_all_dcount - ecount) - - comp_nsSSLEnableCipherCount(topology, ecount) - - -def test_ticket47838_run_3(topology): - """ - Check nsSSL3Ciphers: -all - All ciphers are disabled. - default allowWeakCipher - """ - _header(topology, 'Test Case 4 - Check the ciphers availability for "-all"') - - topology.standalone.simple_bind_s(DN_DM, PASSWORD) - topology.standalone.modify_s(ENCRYPTION_DN, [(ldap.MOD_REPLACE, 'nsSSL3Ciphers', '-all')]) - - log.info("\n######################### Restarting the server ######################\n") - topology.standalone.stop(timeout=10) - os.system('mv %s %s.47838_2' % (topology.standalone.errlog, topology.standalone.errlog)) - os.system('touch %s' % (topology.standalone.errlog)) - topology.standalone.start(timeout=120) - - enabled = os.popen('egrep "SSL alert:" %s | egrep \": enabled\" | wc -l' % topology.standalone.errlog) - ecount = int(enabled.readline().rstrip()) - - log.info("Enabled ciphers: %d" % ecount) - global plus_all_ecount - assert ecount == 0 - - disabledmsg = os.popen('egrep "Disabling SSL" %s' % topology.standalone.errlog) - log.info("Disabling SSL message?: %s" % disabledmsg.readline()) - assert disabledmsg != '' - - comp_nsSSLEnableCipherCount(topology, ecount) - - -def test_ticket47838_run_4(topology): - """ - Check no nsSSL3Ciphers - Default ciphers are enabled. - default allowWeakCipher - """ - _header(topology, 'Test Case 5 - Check no nsSSL3Ciphers (default setting) with default allowWeakCipher') - - topology.standalone.simple_bind_s(DN_DM, PASSWORD) - topology.standalone.modify_s(ENCRYPTION_DN, [(ldap.MOD_DELETE, 'nsSSL3Ciphers', '-all')]) - - log.info("\n######################### Restarting the server ######################\n") - topology.standalone.stop(timeout=10) - os.system('mv %s %s.47838_3' % (topology.standalone.errlog, topology.standalone.errlog)) - os.system('touch %s' % (topology.standalone.errlog)) - topology.standalone.start(timeout=120) - - enabled = os.popen('egrep "SSL alert:" %s | egrep \": enabled\" | wc -l' % topology.standalone.errlog) - disabled = os.popen('egrep "SSL alert:" %s | egrep \": disabled\" | wc -l' % topology.standalone.errlog) - ecount = int(enabled.readline().rstrip()) - dcount = int(disabled.readline().rstrip()) - - log.info("Enabled ciphers: %d" % ecount) - log.info("Disabled ciphers: %d" % dcount) - global plus_all_ecount - global plus_all_dcount - assert ecount == 12 - assert dcount == (plus_all_ecount + plus_all_dcount - ecount) - weak = os.popen('egrep "SSL alert:" %s | egrep \": enabled\" | egrep "WEAK CIPHER" | wc -l' % topology.standalone.errlog) - wcount = int(weak.readline().rstrip()) - log.info("Weak ciphers in the default setting: %d" % wcount) - assert wcount == 0 - - comp_nsSSLEnableCipherCount(topology, ecount) - - -def test_ticket47838_run_5(topology): - """ - Check nsSSL3Ciphers: default - Default ciphers are enabled. - default allowWeakCipher - """ - _header(topology, 'Test Case 6 - Check default nsSSL3Ciphers (default setting) with default allowWeakCipher') - - topology.standalone.simple_bind_s(DN_DM, PASSWORD) - topology.standalone.modify_s(ENCRYPTION_DN, [(ldap.MOD_REPLACE, 'nsSSL3Ciphers', 'default')]) - - log.info("\n######################### Restarting the server ######################\n") - topology.standalone.stop(timeout=10) - os.system('mv %s %s.47838_4' % (topology.standalone.errlog, topology.standalone.errlog)) - os.system('touch %s' % (topology.standalone.errlog)) - topology.standalone.start(timeout=120) - - enabled = os.popen('egrep "SSL alert:" %s | egrep \": enabled\" | wc -l' % topology.standalone.errlog) - disabled = os.popen('egrep "SSL alert:" %s | egrep \": disabled\" | wc -l' % topology.standalone.errlog) - ecount = int(enabled.readline().rstrip()) - dcount = int(disabled.readline().rstrip()) - - log.info("Enabled ciphers: %d" % ecount) - log.info("Disabled ciphers: %d" % dcount) - global plus_all_ecount - global plus_all_dcount - assert ecount == 12 - assert dcount == (plus_all_ecount + plus_all_dcount - ecount) - weak = os.popen('egrep "SSL alert:" %s | egrep \": enabled\" | egrep "WEAK CIPHER" | wc -l' % topology.standalone.errlog) - wcount = int(weak.readline().rstrip()) - log.info("Weak ciphers in the default setting: %d" % wcount) - assert wcount == 0 - - comp_nsSSLEnableCipherCount(topology, ecount) - - -def test_ticket47838_run_6(topology): - """ - Check nsSSL3Ciphers: +all,-rsa_rc4_128_md5 - All ciphers are disabled. - default allowWeakCipher - """ - _header(topology, 'Test Case 7 - Check nsSSL3Ciphers: +all,-tls_dhe_rsa_aes_128_gcm_sha with default allowWeakCipher') - - topology.standalone.simple_bind_s(DN_DM, PASSWORD) - topology.standalone.modify_s(ENCRYPTION_DN, [(ldap.MOD_REPLACE, 'nsSSL3Ciphers', '+all,-tls_dhe_rsa_aes_128_gcm_sha')]) - - log.info("\n######################### Restarting the server ######################\n") - topology.standalone.stop(timeout=10) - os.system('mv %s %s.47838_5' % (topology.standalone.errlog, topology.standalone.errlog)) - os.system('touch %s' % (topology.standalone.errlog)) - topology.standalone.start(timeout=120) - - enabled = os.popen('egrep "SSL alert:" %s | egrep \": enabled\" | wc -l' % topology.standalone.errlog) - disabled = os.popen('egrep "SSL alert:" %s | egrep \": disabled\" | wc -l' % topology.standalone.errlog) - ecount = int(enabled.readline().rstrip()) - dcount = int(disabled.readline().rstrip()) - - log.info("Enabled ciphers: %d" % ecount) - log.info("Disabled ciphers: %d" % dcount) - global plus_all_ecount_noweak - global plus_all_dcount_noweak - log.info("ALL Ecount: %d" % plus_all_ecount_noweak) - log.info("ALL Dcount: %d" % plus_all_dcount_noweak) - assert ecount == (plus_all_ecount_noweak - 1) - assert dcount == (plus_all_dcount_noweak + 1) - - comp_nsSSLEnableCipherCount(topology, ecount) - - -def test_ticket47838_run_7(topology): - """ - Check nsSSL3Ciphers: -all,+rsa_rc4_128_md5 - All ciphers are disabled. - default allowWeakCipher - """ - _header(topology, 'Test Case 8 - Check nsSSL3Ciphers: -all,+rsa_rc4_128_md5 with default allowWeakCipher') - - topology.standalone.simple_bind_s(DN_DM, PASSWORD) - topology.standalone.modify_s(ENCRYPTION_DN, [(ldap.MOD_REPLACE, 'nsSSL3Ciphers', '-all,+rsa_rc4_128_md5')]) - - log.info("\n######################### Restarting the server ######################\n") - topology.standalone.stop(timeout=10) - os.system('mv %s %s.47838_6' % (topology.standalone.errlog, topology.standalone.errlog)) - os.system('touch %s' % (topology.standalone.errlog)) - topology.standalone.start(timeout=120) - - enabled = os.popen('egrep "SSL alert:" %s | egrep \": enabled\" | wc -l' % topology.standalone.errlog) - disabled = os.popen('egrep "SSL alert:" %s | egrep \": disabled\" | wc -l' % topology.standalone.errlog) - ecount = int(enabled.readline().rstrip()) - dcount = int(disabled.readline().rstrip()) - - log.info("Enabled ciphers: %d" % ecount) - log.info("Disabled ciphers: %d" % dcount) - global plus_all_ecount - global plus_all_dcount - assert ecount == 1 - assert dcount == (plus_all_ecount + plus_all_dcount - ecount) - - comp_nsSSLEnableCipherCount(topology, ecount) - - -def test_ticket47838_run_8(topology): - """ - Check nsSSL3Ciphers: default + allowWeakCipher: off - Strong Default ciphers are enabled. - """ - _header(topology, 'Test Case 9 - Check default nsSSL3Ciphers (default setting + allowWeakCipher: off)') - - topology.standalone.simple_bind_s(DN_DM, PASSWORD) - topology.standalone.modify_s(ENCRYPTION_DN, [(ldap.MOD_REPLACE, 'nsSSL3Ciphers', 'default'), - (ldap.MOD_REPLACE, 'allowWeakCipher', 'off')]) - - log.info("\n######################### Restarting the server ######################\n") - topology.standalone.stop(timeout=10) - os.system('mv %s %s.47838_7' % (topology.standalone.errlog, topology.standalone.errlog)) - os.system('touch %s' % (topology.standalone.errlog)) - topology.standalone.start(timeout=120) - - enabled = os.popen('egrep "SSL alert:" %s | egrep \": enabled\" | wc -l' % topology.standalone.errlog) - disabled = os.popen('egrep "SSL alert:" %s | egrep \": disabled\" | wc -l' % topology.standalone.errlog) - ecount = int(enabled.readline().rstrip()) - dcount = int(disabled.readline().rstrip()) - - log.info("Enabled ciphers: %d" % ecount) - log.info("Disabled ciphers: %d" % dcount) - global plus_all_ecount - global plus_all_dcount - assert ecount == 12 - assert dcount == (plus_all_ecount + plus_all_dcount - ecount) - weak = os.popen('egrep "SSL alert:" %s | egrep \": enabled\" | egrep "WEAK CIPHER" | wc -l' % topology.standalone.errlog) - wcount = int(weak.readline().rstrip()) - log.info("Weak ciphers in the default setting: %d" % wcount) - assert wcount == 0 - - comp_nsSSLEnableCipherCount(topology, ecount) - - -def test_ticket47838_run_9(topology): - """ - Check no nsSSL3Ciphers - Default ciphers are enabled. - allowWeakCipher: on - nsslapd-errorlog-level: 0 - """ - _header(topology, 'Test Case 10 - Check no nsSSL3Ciphers (default setting) with no errorlog-level & allowWeakCipher on') - - topology.standalone.simple_bind_s(DN_DM, PASSWORD) - topology.standalone.modify_s(ENCRYPTION_DN, [(ldap.MOD_REPLACE, 'nsSSL3Ciphers', None), - (ldap.MOD_REPLACE, 'allowWeakCipher', 'on')]) - topology.standalone.modify_s(CONFIG_DN, [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', None)]) - - log.info("\n######################### Restarting the server ######################\n") - topology.standalone.stop(timeout=10) - os.system('mv %s %s.47838_8' % (topology.standalone.errlog, topology.standalone.errlog)) - os.system('touch %s' % (topology.standalone.errlog)) - topology.standalone.start(timeout=120) - - enabled = os.popen('egrep "SSL alert:" %s | egrep \": enabled\" | wc -l' % topology.standalone.errlog) - disabled = os.popen('egrep "SSL alert:" %s | egrep \": disabled\" | wc -l' % topology.standalone.errlog) - ecount = int(enabled.readline().rstrip()) - dcount = int(disabled.readline().rstrip()) - - log.info("Enabled ciphers: %d" % ecount) - log.info("Disabled ciphers: %d" % dcount) - assert ecount == 23 - assert dcount == 0 - weak = os.popen('egrep "SSL alert:" %s | egrep \": enabled\" | egrep "WEAK CIPHER" | wc -l' % topology.standalone.errlog) - wcount = int(weak.readline().rstrip()) - log.info("Weak ciphers in the default setting: %d" % wcount) - assert wcount == 11 - - comp_nsSSLEnableCipherCount(topology, ecount) - - -def test_ticket47838_run_10(topology): - """ - Check nsSSL3Ciphers: -TLS_RSA_WITH_NULL_MD5,+TLS_RSA_WITH_RC4_128_MD5, - +TLS_RSA_EXPORT_WITH_RC4_40_MD5,+TLS_RSA_EXPORT_WITH_RC2_CBC_40_MD5, - +TLS_DHE_RSA_WITH_DES_CBC_SHA,+SSL_RSA_FIPS_WITH_DES_CBC_SHA, - +TLS_DHE_RSA_WITH_3DES_EDE_CBC_SHA,+SSL_RSA_FIPS_WITH_3DES_EDE_CBC_SHA, - +TLS_RSA_EXPORT1024_WITH_RC4_56_SHA,+TLS_RSA_EXPORT1024_WITH_DES_CBC_SHA, - -SSL_CK_RC4_128_WITH_MD5,-SSL_CK_RC4_128_EXPORT40_WITH_MD5, - -SSL_CK_RC2_128_CBC_WITH_MD5,-SSL_CK_RC2_128_CBC_EXPORT40_WITH_MD5, - -SSL_CK_DES_64_CBC_WITH_MD5,-SSL_CK_DES_192_EDE3_CBC_WITH_MD5 - allowWeakCipher: on - nsslapd-errorlog-level: 0 - """ - _header(topology, 'Test Case 11 - Check nsSSL3Ciphers: long list using the NSS Cipher Suite name with allowWeakCipher on') - - topology.standalone.simple_bind_s(DN_DM, PASSWORD) - topology.standalone.modify_s(ENCRYPTION_DN, [(ldap.MOD_REPLACE, 'nsSSL3Ciphers', - '-TLS_RSA_WITH_NULL_MD5,+TLS_RSA_WITH_RC4_128_MD5,+TLS_RSA_EXPORT_WITH_RC4_40_MD5,+TLS_RSA_EXPORT_WITH_RC2_CBC_40_MD5,+TLS_DHE_RSA_WITH_DES_CBC_SHA,+SSL_RSA_FIPS_WITH_DES_CBC_SHA,+TLS_DHE_RSA_WITH_3DES_EDE_CBC_SHA,+SSL_RSA_FIPS_WITH_3DES_EDE_CBC_SHA,+TLS_RSA_EXPORT1024_WITH_RC4_56_SHA,+TLS_RSA_EXPORT1024_WITH_DES_CBC_SHA,-SSL_CK_RC4_128_WITH_MD5,-SSL_CK_RC4_128_EXPORT40_WITH_MD5,-SSL_CK_RC2_128_CBC_WITH_MD5,-SSL_CK_RC2_128_CBC_EXPORT40_WITH_MD5,-SSL_CK_DES_64_CBC_WITH_MD5,-SSL_CK_DES_192_EDE3_CBC_WITH_MD5')]) - - log.info("\n######################### Restarting the server ######################\n") - topology.standalone.stop(timeout=10) - os.system('mv %s %s.47838_9' % (topology.standalone.errlog, topology.standalone.errlog)) - os.system('touch %s' % (topology.standalone.errlog)) - topology.standalone.start(timeout=120) - - enabled = os.popen('egrep "SSL alert:" %s | egrep \": enabled\" | wc -l' % topology.standalone.errlog) - disabled = os.popen('egrep "SSL alert:" %s | egrep \": disabled\" | wc -l' % topology.standalone.errlog) - ecount = int(enabled.readline().rstrip()) - dcount = int(disabled.readline().rstrip()) - - log.info("Enabled ciphers: %d" % ecount) - log.info("Disabled ciphers: %d" % dcount) - global plus_all_ecount - global plus_all_dcount - assert ecount == 9 - assert dcount == 0 - weak = os.popen('egrep "SSL alert:" %s | egrep \": enabled\" | egrep "WEAK CIPHER" | wc -l' % topology.standalone.errlog) - wcount = int(weak.readline().rstrip()) - log.info("Weak ciphers in the default setting: %d" % wcount) - - topology.standalone.log.info("ticket47838 was successfully verified.") - - comp_nsSSLEnableCipherCount(topology, ecount) - - -def test_ticket47838_run_11(topology): - """ - Check nsSSL3Ciphers: +fortezza - SSL_GetImplementedCiphers does not return this as a secuire cipher suite - """ - _header(topology, 'Test Case 12 - Check nsSSL3Ciphers: +fortezza, which is not supported') - - topology.standalone.simple_bind_s(DN_DM, PASSWORD) - topology.standalone.modify_s(ENCRYPTION_DN, [(ldap.MOD_REPLACE, 'nsSSL3Ciphers', '+fortezza')]) - - log.info("\n######################### Restarting the server ######################\n") - topology.standalone.stop(timeout=10) - os.system('mv %s %s.47838_10' % (topology.standalone.errlog, topology.standalone.errlog)) - os.system('touch %s' % (topology.standalone.errlog)) - topology.standalone.start(timeout=120) - - errmsg = os.popen('egrep "SSL alert:" %s | egrep "is not available in NSS"' % topology.standalone.errlog) - if errmsg != "": - log.info("Expected error message:") - log.info("%s" % errmsg.readline()) - else: - log.info("Expected error message was not found") - assert False - - comp_nsSSLEnableCipherCount(topology, 0) - - -def test_ticket47928_run_0(topology): - """ - No SSL version config parameters. - Check SSL3 (TLS1.0) is off. - """ - _header(topology, 'Test Case 13 - No SSL version config parameters') - - topology.standalone.simple_bind_s(DN_DM, PASSWORD) - # add them once and remove them - topology.standalone.modify_s(ENCRYPTION_DN, [(ldap.MOD_REPLACE, 'nsSSL3', 'off'), - (ldap.MOD_REPLACE, 'nsTLS1', 'on'), - (ldap.MOD_REPLACE, 'sslVersionMin', 'TLS1.1'), - (ldap.MOD_REPLACE, 'sslVersionMax', 'TLS1.2')]) - topology.standalone.modify_s(ENCRYPTION_DN, [(ldap.MOD_DELETE, 'nsSSL3', None), - (ldap.MOD_DELETE, 'nsTLS1', None), - (ldap.MOD_DELETE, 'sslVersionMin', None), - (ldap.MOD_DELETE, 'sslVersionMax', None)]) - topology.standalone.modify_s(CONFIG_DN, [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', '64')]) - - log.info("\n######################### Restarting the server ######################\n") - topology.standalone.stop(timeout=10) - os.system('mv %s %s.47838_11' % (topology.standalone.errlog, topology.standalone.errlog)) - os.system('touch %s' % (topology.standalone.errlog)) - topology.standalone.start(timeout=120) - - errmsg = os.popen('egrep "SSL alert:" %s | egrep "Default SSL Version settings; Configuring the version range as min: TLS1.1"' % topology.standalone.errlog) - if errmsg != "": - log.info("Expected message:") - log.info("%s" % errmsg.readline()) - else: - log.info("Expected message was not found") - assert False - - -def test_ticket47928_run_1(topology): - """ - No nsSSL3, nsTLS1; sslVersionMin > sslVersionMax - Check sslVersionMax is ignored. - """ - _header(topology, 'Test Case 14 - No nsSSL3, nsTLS1; sslVersionMin > sslVersionMax') - - topology.standalone.simple_bind_s(DN_DM, PASSWORD) - topology.standalone.modify_s(ENCRYPTION_DN, [(ldap.MOD_REPLACE, 'sslVersionMin', 'TLS1.2'), - (ldap.MOD_REPLACE, 'sslVersionMax', 'TLS1.1')]) - - log.info("\n######################### Restarting the server ######################\n") - topology.standalone.stop(timeout=10) - os.system('mv %s %s.47838_12' % (topology.standalone.errlog, topology.standalone.errlog)) - os.system('touch %s' % (topology.standalone.errlog)) - topology.standalone.start(timeout=120) - - errmsg = os.popen('egrep "SSL alert:" %s | egrep "The min value of NSS version range"' % topology.standalone.errlog) - if errmsg != "": - log.info("Expected message:") - log.info("%s" % errmsg.readline()) - else: - log.info("Expected message was not found") - assert False - - errmsg = os.popen('egrep "SSL Initialization" %s | egrep "Configured SSL version range: min: TLS1.2, max: TLS1"' % topology.standalone.errlog) - if errmsg != "": - log.info("Expected message:") - log.info("%s" % errmsg.readline()) - else: - log.info("Expected message was not found") - assert False - - -def test_ticket47928_run_2(topology): - """ - nsSSL3: on; sslVersionMin: TLS1.1; sslVersionMax: TLS1.2 - Conflict between nsSSL3 and range; nsSSL3 is disabled - """ - _header(topology, 'Test Case 15 - nsSSL3: on; sslVersionMin: TLS1.1; sslVersionMax: TLS1.2') - - topology.standalone.simple_bind_s(DN_DM, PASSWORD) - topology.standalone.modify_s(ENCRYPTION_DN, [(ldap.MOD_REPLACE, 'sslVersionMin', 'TLS1.1'), - (ldap.MOD_REPLACE, 'sslVersionMax', 'TLS1.2'), - (ldap.MOD_REPLACE, 'nsSSL3', 'on')]) - - log.info("\n######################### Restarting the server ######################\n") - topology.standalone.stop(timeout=10) - os.system('mv %s %s.47838_13' % (topology.standalone.errlog, topology.standalone.errlog)) - os.system('touch %s' % (topology.standalone.errlog)) - topology.standalone.start(timeout=120) - - errmsg = os.popen('egrep "SSL alert:" %s | egrep "Found unsecure configuration: nsSSL3: on"' % topology.standalone.errlog) - if errmsg != "": - log.info("Expected message:") - log.info("%s" % errmsg.readline()) - else: - log.info("Expected message was not found") - assert False - - errmsg = os.popen('egrep "SSL alert:" %s | egrep "Respect the supported range."' % topology.standalone.errlog) - if errmsg != "": - log.info("Expected message:") - log.info("%s" % errmsg.readline()) - else: - log.info("Expected message was not found") - assert False - - errmsg = os.popen('egrep "SSL Initialization" %s | egrep "Configured SSL version range: min: TLS1.1, max: TLS1"' % topology.standalone.errlog) - if errmsg != "": - log.info("Expected message:") - log.info("%s" % errmsg.readline()) - else: - log.info("Expected message was not found") - assert False - - -def test_ticket47928_run_3(topology): - """ - nsSSL3: on; nsTLS1: off; sslVersionMin: TLS1.1; sslVersionMax: TLS1.2 - Conflict between nsSSL3/nsTLS1 and range; nsSSL3 is disabled; nsTLS1 is enabled. - """ - _header(topology, 'Test Case 16 - nsSSL3: on; nsTLS1: off; sslVersionMin: TLS1.1; sslVersionMax: TLS1.2') - - topology.standalone.simple_bind_s(DN_DM, PASSWORD) - topology.standalone.modify_s(ENCRYPTION_DN, [(ldap.MOD_REPLACE, 'sslVersionMin', 'TLS1.1'), - (ldap.MOD_REPLACE, 'sslVersionMax', 'TLS1.2'), - (ldap.MOD_REPLACE, 'nsSSL3', 'on'), - (ldap.MOD_REPLACE, 'nsTLS1', 'off')]) - - log.info("\n######################### Restarting the server ######################\n") - topology.standalone.stop(timeout=10) - os.system('mv %s %s.47838_14' % (topology.standalone.errlog, topology.standalone.errlog)) - os.system('touch %s' % (topology.standalone.errlog)) - topology.standalone.start(timeout=120) - - errmsg = os.popen('egrep "SSL alert:" %s | egrep "Found unsecure configuration: nsSSL3: on"' % topology.standalone.errlog) - if errmsg != "": - log.info("Expected message:") - log.info("%s" % errmsg.readline()) - else: - log.info("Expected message was not found") - assert False - - errmsg = os.popen('egrep "SSL alert:" %s | egrep "Respect the configured range."' % topology.standalone.errlog) - if errmsg != "": - log.info("Expected message:") - log.info("%s" % errmsg.readline()) - else: - log.info("Expected message was not found") - assert False - - errmsg = os.popen('egrep "SSL Initialization" %s | egrep "Configured SSL version range: min: TLS1.1, max: TLS1"' % topology.standalone.errlog) - if errmsg != "": - log.info("Expected message:") - log.info("%s" % errmsg.readline()) - else: - log.info("Expected message was not found") - assert False - - -def test_ticket47838_run_last(topology): - """ - Check nsSSL3Ciphers: all <== invalid value - All ciphers are disabled. - """ - _header(topology, 'Test Case 17 - Check nsSSL3Ciphers: all, which is invalid') - - topology.standalone.simple_bind_s(DN_DM, PASSWORD) - topology.standalone.modify_s(CONFIG_DN, [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', None)]) - topology.standalone.modify_s(ENCRYPTION_DN, [(ldap.MOD_REPLACE, 'nsSSL3Ciphers', 'all')]) - - log.info("\n######################### Restarting the server ######################\n") - topology.standalone.stop(timeout=10) - os.system('mv %s %s.47838_15' % (topology.standalone.errlog, topology.standalone.errlog)) - os.system('touch %s' % (topology.standalone.errlog)) - topology.standalone.start(timeout=120) - - errmsg = os.popen('egrep "SSL alert:" %s | egrep "invalid ciphers"' % topology.standalone.errlog) - if errmsg != "": - log.info("Expected error message:") - log.info("%s" % errmsg.readline()) - else: - log.info("Expected error message was not found") - assert False - - comp_nsSSLEnableCipherCount(topology, 0) - - topology.standalone.log.info("ticket47838, 47880, 47908, 47928 were successfully verified.") - - -def test_ticket47838_final(topology): - topology.standalone.delete() - log.info('Testcase PASSED') - - -def run_isolated(): - ''' - run_isolated is used to run these test cases independently of a test scheduler (xunit, py.test..) - To run isolated without py.test, you need to - - edit this file and comment '@pytest.fixture' line before 'topology' function. - - set the installation prefix - - run this program - ''' - global installation_prefix - installation_prefix = None - - topo = topology(True) - test_ticket47838_init(topo) - - test_ticket47838_run_0(topo) - test_ticket47838_run_1(topo) - test_ticket47838_run_2(topo) - test_ticket47838_run_3(topo) - test_ticket47838_run_4(topo) - test_ticket47838_run_5(topo) - test_ticket47838_run_6(topo) - test_ticket47838_run_7(topo) - test_ticket47838_run_8(topo) - test_ticket47838_run_9(topo) - test_ticket47838_run_10(topo) - test_ticket47838_run_11(topo) - test_ticket47928_run_0(topo) - test_ticket47928_run_1(topo) - test_ticket47928_run_2(topo) - test_ticket47928_run_3(topo) - - test_ticket47838_run_last(topo) - - test_ticket47838_final(topo) - - -if __name__ == '__main__': - run_isolated() diff --git a/dirsrvtests/tickets/ticket47869MMR_test.py b/dirsrvtests/tickets/ticket47869MMR_test.py deleted file mode 100644 index 630cb93..0000000 --- a/dirsrvtests/tickets/ticket47869MMR_test.py +++ /dev/null @@ -1,346 +0,0 @@ -# --- BEGIN COPYRIGHT BLOCK --- -# Copyright (C) 2015 Red Hat, Inc. -# All rights reserved. -# -# License: GPL (version 3 or any later version). -# See LICENSE for details. -# --- END COPYRIGHT BLOCK --- -# -import os -import sys -import time -import ldap -import logging -import pytest -from lib389 import DirSrv, Entry, tools -from lib389.tools import DirSrvTools -from lib389._constants import * -from lib389.properties import * - -logging.getLogger(__name__).setLevel(logging.DEBUG) -log = logging.getLogger(__name__) - -# -# important part. We can deploy Master1 and Master2 on different versions -# -installation1_prefix = None -installation2_prefix = None - -TEST_REPL_DN = "cn=test_repl, %s" % SUFFIX -ENTRY_NAME = 'test_entry' -MAX_ENTRIES = 10 - -BIND_NAME = 'bind_entry' -BIND_DN = 'cn=%s, %s' % (BIND_NAME, SUFFIX) -BIND_PW = 'password' - - -class TopologyMaster1Master2(object): - def __init__(self, master1, master2): - master1.open() - self.master1 = master1 - - master2.open() - self.master2 = master2 - - -@pytest.fixture(scope="module") -def topology(request): - ''' - This fixture is used to create a replicated topology for the 'module'. - The replicated topology is MASTER1 <-> Master2. - ''' - global installation1_prefix - global installation2_prefix - - # allocate master1 on a given deployement - master1 = DirSrv(verbose=False) - if installation1_prefix: - args_instance[SER_DEPLOYED_DIR] = installation1_prefix - - # Args for the master1 instance - args_instance[SER_HOST] = HOST_MASTER_1 - args_instance[SER_PORT] = PORT_MASTER_1 - args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1 - args_master = args_instance.copy() - master1.allocate(args_master) - - # allocate master1 on a given deployement - master2 = DirSrv(verbose=False) - if installation2_prefix: - args_instance[SER_DEPLOYED_DIR] = installation2_prefix - - # Args for the consumer instance - args_instance[SER_HOST] = HOST_MASTER_2 - args_instance[SER_PORT] = PORT_MASTER_2 - args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_2 - args_master = args_instance.copy() - master2.allocate(args_master) - - # Get the status of the instance - instance_master1 = master1.exists() - instance_master2 = master2.exists() - - # Remove all the instances - if instance_master1: - master1.delete() - if instance_master2: - master2.delete() - - # Create the instances - master1.create() - master1.open() - master2.create() - master2.open() - - # - # Now prepare the Master-Consumer topology - # - # First Enable replication - master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_1) - master2.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_2) - - # Initialize the supplier->consumer - - properties = {RA_NAME: r'meTo_$host:$port', - RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], - RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], - RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], - RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} - repl_agreement = master1.agreement.create(suffix=SUFFIX, host=master2.host, port=master2.port, properties=properties) - - if not repl_agreement: - log.fatal("Fail to create a replica agreement") - sys.exit(1) - - log.debug("%s created" % repl_agreement) - - properties = {RA_NAME: r'meTo_$host:$port', - RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], - RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], - RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], - RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} - master2.agreement.create(suffix=SUFFIX, host=master1.host, port=master1.port, properties=properties) - - master1.agreement.init(SUFFIX, HOST_MASTER_2, PORT_MASTER_2) - master1.waitForReplInit(repl_agreement) - - # Check replication is working fine - if master1.testReplication(DEFAULT_SUFFIX, master2): - log.info('Replication is working.') - else: - log.fatal('Replication is not working.') - assert False - - # clear the tmp directory - master1.clearTmpDir(__file__) - - # Here we have two instances master and consumer - return TopologyMaster1Master2(master1, master2) - - -def test_ticket47869_init(topology): - """ - It adds an entry ('bind_entry') and 10 test entries - It sets the anonymous aci - - """ - # enable acl error logging - mod = [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', str(8192))] # REPL - topology.master1.modify_s(DN_CONFIG, mod) - topology.master2.modify_s(DN_CONFIG, mod) - - # entry used to bind with - topology.master1.log.info("Add %s" % BIND_DN) - topology.master1.add_s(Entry((BIND_DN, { - 'objectclass': "top person".split(), - 'sn': BIND_NAME, - 'cn': BIND_NAME, - 'userpassword': BIND_PW}))) - loop = 0 - ent = None - while loop <= 10: - try: - ent = topology.master2.getEntry(BIND_DN, ldap.SCOPE_BASE, "(objectclass=*)") - break - except ldap.NO_SUCH_OBJECT: - time.sleep(1) - loop += 1 - if ent is None: - assert False - - # keep anonymous ACI for use 'read-search' aci in SEARCH test - ACI_ANONYMOUS = "(targetattr!=\"userPassword\")(version 3.0; acl \"Enable anonymous access\"; allow (read, search, compare) userdn=\"ldap:///anyone\";)" - mod = [(ldap.MOD_REPLACE, 'aci', ACI_ANONYMOUS)] - topology.master1.modify_s(SUFFIX, mod) - topology.master2.modify_s(SUFFIX, mod) - - # add entries - for cpt in range(MAX_ENTRIES): - name = "%s%d" % (ENTRY_NAME, cpt) - mydn = "cn=%s,%s" % (name, SUFFIX) - topology.master1.add_s(Entry((mydn, - {'objectclass': "top person".split(), - 'sn': name, - 'cn': name}))) - loop = 0 - ent = None - while loop <= 10: - try: - ent = topology.master2.getEntry(mydn, ldap.SCOPE_BASE, "(objectclass=*)") - break - except ldap.NO_SUCH_OBJECT: - time.sleep(1) - loop += 1 - if ent is None: - assert False - - -def test_ticket47869_check(topology): - ''' - On Master 1 and 2: - Bind as Directory Manager. - Search all specifying nscpEntryWsi in the attribute list. - Check nscpEntryWsi is returned. - On Master 1 and 2: - Bind as Bind Entry. - Search all specifying nscpEntryWsi in the attribute list. - Check nscpEntryWsi is not returned. - On Master 1 and 2: - Bind as anonymous. - Search all specifying nscpEntryWsi in the attribute list. - Check nscpEntryWsi is not returned. - ''' - topology.master1.log.info("\n\n######################### CHECK nscpentrywsi ######################\n") - - topology.master1.log.info("##### Master1: Bind as %s #####" % DN_DM) - topology.master1.simple_bind_s(DN_DM, PASSWORD) - - topology.master1.log.info("Master1: Calling search_ext...") - msgid = topology.master1.search_ext(SUFFIX, ldap.SCOPE_SUBTREE, 'objectclass=*', ['nscpentrywsi']) - nscpentrywsicnt = 0 - rtype, rdata, rmsgid = topology.master1.result2(msgid) - topology.master1.log.info("%d results" % len(rdata)) - - topology.master1.log.info("Results:") - for dn, attrs in rdata: - topology.master1.log.info("dn: %s" % dn) - if 'nscpentrywsi' in attrs: - nscpentrywsicnt += 1 - - topology.master1.log.info("Master1: count of nscpentrywsi: %d" % nscpentrywsicnt) - - topology.master2.log.info("##### Master2: Bind as %s #####" % DN_DM) - topology.master2.simple_bind_s(DN_DM, PASSWORD) - - topology.master2.log.info("Master2: Calling search_ext...") - msgid = topology.master2.search_ext(SUFFIX, ldap.SCOPE_SUBTREE, 'objectclass=*', ['nscpentrywsi']) - nscpentrywsicnt = 0 - rtype, rdata, rmsgid = topology.master2.result2(msgid) - topology.master2.log.info("%d results" % len(rdata)) - - topology.master2.log.info("Results:") - for dn, attrs in rdata: - topology.master2.log.info("dn: %s" % dn) - if 'nscpentrywsi' in attrs: - nscpentrywsicnt += 1 - - topology.master2.log.info("Master2: count of nscpentrywsi: %d" % nscpentrywsicnt) - - # bind as bind_entry - topology.master1.log.info("##### Master1: Bind as %s #####" % BIND_DN) - topology.master1.simple_bind_s(BIND_DN, BIND_PW) - - topology.master1.log.info("Master1: Calling search_ext...") - msgid = topology.master1.search_ext(SUFFIX, ldap.SCOPE_SUBTREE, 'objectclass=*', ['nscpentrywsi']) - nscpentrywsicnt = 0 - rtype, rdata, rmsgid = topology.master1.result2(msgid) - topology.master1.log.info("%d results" % len(rdata)) - - for dn, attrs in rdata: - if 'nscpentrywsi' in attrs: - nscpentrywsicnt += 1 - assert nscpentrywsicnt == 0 - topology.master1.log.info("Master1: count of nscpentrywsi: %d" % nscpentrywsicnt) - - # bind as bind_entry - topology.master2.log.info("##### Master2: Bind as %s #####" % BIND_DN) - topology.master2.simple_bind_s(BIND_DN, BIND_PW) - - topology.master2.log.info("Master2: Calling search_ext...") - msgid = topology.master2.search_ext(SUFFIX, ldap.SCOPE_SUBTREE, 'objectclass=*', ['nscpentrywsi']) - nscpentrywsicnt = 0 - rtype, rdata, rmsgid = topology.master2.result2(msgid) - topology.master2.log.info("%d results" % len(rdata)) - - for dn, attrs in rdata: - if 'nscpentrywsi' in attrs: - nscpentrywsicnt += 1 - assert nscpentrywsicnt == 0 - topology.master2.log.info("Master2: count of nscpentrywsi: %d" % nscpentrywsicnt) - - # bind as anonymous - topology.master1.log.info("##### Master1: Bind as anonymous #####") - topology.master1.simple_bind_s("", "") - - topology.master1.log.info("Master1: Calling search_ext...") - msgid = topology.master1.search_ext(SUFFIX, ldap.SCOPE_SUBTREE, 'objectclass=*', ['nscpentrywsi']) - nscpentrywsicnt = 0 - rtype, rdata, rmsgid = topology.master1.result2(msgid) - topology.master1.log.info("%d results" % len(rdata)) - - for dn, attrs in rdata: - if 'nscpentrywsi' in attrs: - nscpentrywsicnt += 1 - assert nscpentrywsicnt == 0 - topology.master1.log.info("Master1: count of nscpentrywsi: %d" % nscpentrywsicnt) - - # bind as bind_entry - topology.master2.log.info("##### Master2: Bind as anonymous #####") - topology.master2.simple_bind_s("", "") - - topology.master2.log.info("Master2: Calling search_ext...") - msgid = topology.master2.search_ext(SUFFIX, ldap.SCOPE_SUBTREE, 'objectclass=*', ['nscpentrywsi']) - nscpentrywsicnt = 0 - rtype, rdata, rmsgid = topology.master2.result2(msgid) - topology.master2.log.info("%d results" % len(rdata)) - - for dn, attrs in rdata: - if 'nscpentrywsi' in attrs: - nscpentrywsicnt += 1 - assert nscpentrywsicnt == 0 - topology.master2.log.info("Master2: count of nscpentrywsi: %d" % nscpentrywsicnt) - - topology.master1.log.info("##### ticket47869 was successfully verified. #####") - - -def test_ticket47869_final(topology): - topology.master1.delete() - topology.master2.delete() - log.info('Testcase PASSED') - - -def run_isolated(): - ''' - run_isolated is used to run these test cases independently of a test scheduler (xunit, py.test..) - To run isolated without py.test, you need to - - edit this file and comment '@pytest.fixture' line before 'topology' function. - - set the installation prefix - - run this program - ''' - global installation1_prefix - global installation2_prefix - installation1_prefix = None - installation2_prefix = None - - topo = topology(True) - test_ticket47869_init(topo) - - test_ticket47869_check(topo) - - test_ticket47869_final(topo) - - -if __name__ == '__main__': - run_isolated() - diff --git a/dirsrvtests/tickets/ticket47871_test.py b/dirsrvtests/tickets/ticket47871_test.py deleted file mode 100644 index d6ea214..0000000 --- a/dirsrvtests/tickets/ticket47871_test.py +++ /dev/null @@ -1,226 +0,0 @@ -# --- BEGIN COPYRIGHT BLOCK --- -# Copyright (C) 2015 Red Hat, Inc. -# All rights reserved. -# -# License: GPL (version 3 or any later version). -# See LICENSE for details. -# --- END COPYRIGHT BLOCK --- -# -''' -Created on Nov 7, 2013 - -@author: tbordaz -''' -import os -import sys -import time -import ldap -import logging -import pytest -from lib389 import DirSrv, Entry, tools -from lib389.tools import DirSrvTools -from lib389._constants import * -from lib389.properties import * - -logging.getLogger(__name__).setLevel(logging.DEBUG) -log = logging.getLogger(__name__) - -installation_prefix = None - -TEST_REPL_DN = "cn=test_repl, %s" % SUFFIX -ENTRY_DN = "cn=test_entry, %s" % SUFFIX - -OTHER_NAME = 'other_entry' -MAX_OTHERS = 10 - -ATTRIBUTES = ['street', 'countryName', 'description', 'postalAddress', 'postalCode', 'title', 'l', 'roomNumber'] - - -class TopologyMasterConsumer(object): - def __init__(self, master, consumer): - master.open() - self.master = master - - consumer.open() - self.consumer = consumer - - def __repr__(self): - return "Master[%s] -> Consumer[%s" % (self.master, self.consumer) - - -@pytest.fixture(scope="module") -def topology(request): - ''' - This fixture is used to create a replicated topology for the 'module'. - The replicated topology is MASTER -> Consumer. - ''' - global installation_prefix - - if installation_prefix: - args_instance[SER_DEPLOYED_DIR] = installation_prefix - - master = DirSrv(verbose=False) - consumer = DirSrv(verbose=False) - - # Args for the master instance - args_instance[SER_HOST] = HOST_MASTER_1 - args_instance[SER_PORT] = PORT_MASTER_1 - args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1 - args_master = args_instance.copy() - master.allocate(args_master) - - # Args for the consumer instance - args_instance[SER_HOST] = HOST_CONSUMER_1 - args_instance[SER_PORT] = PORT_CONSUMER_1 - args_instance[SER_SERVERID_PROP] = SERVERID_CONSUMER_1 - args_consumer = args_instance.copy() - consumer.allocate(args_consumer) - - # Get the status of the instance and restart it if it exists - instance_master = master.exists() - instance_consumer = consumer.exists() - - # Remove all the instances - if instance_master: - master.delete() - if instance_consumer: - consumer.delete() - - # Create the instances - master.create() - master.open() - consumer.create() - consumer.open() - - # - # Now prepare the Master-Consumer topology - # - # First Enable replication - master.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_1) - consumer.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_CONSUMER) - - # Initialize the supplier->consumer - - properties = {RA_NAME: r'meTo_$host:$port', - RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], - RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], - RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], - RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} - repl_agreement = master.agreement.create(suffix=SUFFIX, host=consumer.host, port=consumer.port, properties=properties) - - if not repl_agreement: - log.fatal("Fail to create a replica agreement") - sys.exit(1) - - log.debug("%s created" % repl_agreement) - master.agreement.init(SUFFIX, HOST_CONSUMER_1, PORT_CONSUMER_1) - master.waitForReplInit(repl_agreement) - - # Check replication is working fine - if master.testReplication(DEFAULT_SUFFIX, consumer): - log.info('Replication is working.') - else: - log.fatal('Replication is not working.') - assert False - - # clear the tmp directory - master.clearTmpDir(__file__) - - # - # Here we have two instances master and consumer - # with replication working. Either coming from a backup recovery - # or from a fresh (re)init - # Time to return the topology - return TopologyMasterConsumer(master, consumer) - - -def test_ticket47871_init(topology): - """ - Initialize the test environment - """ - topology.master.plugins.enable(name=PLUGIN_RETRO_CHANGELOG) - mod = [(ldap.MOD_REPLACE, 'nsslapd-changelogmaxage', "10s"), # 10 second triming - (ldap.MOD_REPLACE, 'nsslapd-changelog-trim-interval', "5s")] - topology.master.modify_s("cn=%s,%s" % (PLUGIN_RETRO_CHANGELOG, DN_PLUGIN), mod) - #topology.master.plugins.enable(name=PLUGIN_MEMBER_OF) - #topology.master.plugins.enable(name=PLUGIN_REFER_INTEGRITY) - topology.master.stop(timeout=10) - topology.master.start(timeout=10) - - topology.master.log.info("test_ticket47871_init topology %r" % (topology)) - # the test case will check if a warning message is logged in the - # error log of the supplier - topology.master.errorlog_file = open(topology.master.errlog, "r") - - -def test_ticket47871_1(topology): - ''' - ADD entries and check they are all in the retrocl - ''' - # add dummy entries - for cpt in range(MAX_OTHERS): - name = "%s%d" % (OTHER_NAME, cpt) - topology.master.add_s(Entry(("cn=%s,%s" % (name, SUFFIX), { - 'objectclass': "top person".split(), - 'sn': name, - 'cn': name}))) - - topology.master.log.info("test_ticket47871_init: %d entries ADDed %s[0..%d]" % (MAX_OTHERS, OTHER_NAME, MAX_OTHERS - 1)) - - # Check the number of entries in the retro changelog - time.sleep(1) - ents = topology.master.search_s(RETROCL_SUFFIX, ldap.SCOPE_ONELEVEL, "(objectclass=*)") - assert len(ents) == MAX_OTHERS - topology.master.log.info("Added entries are") - for ent in ents: - topology.master.log.info("%s" % ent.dn) - - -def test_ticket47871_2(topology): - ''' - Wait until there is just a last entries - ''' - MAX_TRIES = 10 - TRY_NO = 1 - while TRY_NO <= MAX_TRIES: - time.sleep(6) # at least 1 trimming occurred - ents = topology.master.search_s(RETROCL_SUFFIX, ldap.SCOPE_ONELEVEL, "(objectclass=*)") - assert len(ents) <= MAX_OTHERS - topology.master.log.info("\nTry no %d it remains %d entries" % (TRY_NO, len(ents))) - for ent in ents: - topology.master.log.info("%s" % ent.dn) - if len(ents) > 1: - TRY_NO += 1 - else: - break - assert TRY_NO <= MAX_TRIES - assert len(ents) <= 1 - - -def test_ticket47871_final(topology): - topology.master.delete() - topology.consumer.delete() - log.info('Testcase PASSED') - - -def run_isolated(): - ''' - run_isolated is used to run these test cases independently of a test scheduler (xunit, py.test..) - To run isolated without py.test, you need to - - edit this file and comment '@pytest.fixture' line before 'topology' function. - - set the installation prefix - - run this program - ''' - global installation_prefix - installation_prefix = None - - topo = topology(True) - test_ticket47871_init(topo) - test_ticket47871_1(topo) - test_ticket47871_2(topo) - - test_ticket47871_final(topo) - - -if __name__ == '__main__': - run_isolated() diff --git a/dirsrvtests/tickets/ticket47900_test.py b/dirsrvtests/tickets/ticket47900_test.py deleted file mode 100644 index c01b733..0000000 --- a/dirsrvtests/tickets/ticket47900_test.py +++ /dev/null @@ -1,344 +0,0 @@ -# --- BEGIN COPYRIGHT BLOCK --- -# Copyright (C) 2015 Red Hat, Inc. -# All rights reserved. -# -# License: GPL (version 3 or any later version). -# See LICENSE for details. -# --- END COPYRIGHT BLOCK --- -# -import os -import sys -import time -import ldap -import logging -import pytest -from lib389 import DirSrv, Entry, tools -from lib389.tools import DirSrvTools -from lib389._constants import * -from lib389.properties import * - -log = logging.getLogger(__name__) - -installation_prefix = None - -CONFIG_DN = 'cn=config' -ADMIN_NAME = 'passwd_admin' -ADMIN_DN = 'cn=%s,%s' % (ADMIN_NAME, SUFFIX) -ADMIN_PWD = 'adminPassword_1' -ENTRY_NAME = 'Joe Schmo' -ENTRY_DN = 'cn=%s,%s' % (ENTRY_NAME, SUFFIX) -INVALID_PWDS = ('2_Short', 'No_Number', 'N0Special', '{SSHA}bBy8UdtPZwu8uZna9QOYG3Pr41RpIRVDl8wddw==') - - -class TopologyStandalone(object): - def __init__(self, standalone): - standalone.open() - self.standalone = standalone - - -@pytest.fixture(scope="module") -def topology(request): - ''' - This fixture is used to standalone topology for the 'module'. - ''' - global installation_prefix - - if installation_prefix: - args_instance[SER_DEPLOYED_DIR] = installation_prefix - - standalone = DirSrv(verbose=False) - - # Args for the standalone instance - args_instance[SER_HOST] = HOST_STANDALONE - args_instance[SER_PORT] = PORT_STANDALONE - args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE - args_standalone = args_instance.copy() - standalone.allocate(args_standalone) - - # Get the status of the instance and restart it if it exists - instance_standalone = standalone.exists() - - # Remove the instance - if instance_standalone: - standalone.delete() - - # Create the instance - standalone.create() - - # Used to retrieve configuration information (dbdir, confdir...) - standalone.open() - - # clear the tmp directory - standalone.clearTmpDir(__file__) - - # Here we have standalone instance up and running - return TopologyStandalone(standalone) - - -def test_ticket47900(topology): - """ - Test that password administrators/root DN can - bypass password syntax/policy. - - We need to test how passwords are modified in - existing entries, and when adding new entries. - - Create the Password Admin entry, but do not set - it as an admin yet. Use the entry to verify invalid - passwords are caught. Then activate the password - admin and make sure it can bypass password policy. - """ - - # Prepare the Password Administator - entry = Entry(ADMIN_DN) - entry.setValues('objectclass', 'top', 'person') - entry.setValues('sn', ADMIN_NAME) - entry.setValues('cn', ADMIN_NAME) - entry.setValues('userpassword', ADMIN_PWD) - - topology.standalone.log.info("Creating Password Administator entry %s..." % ADMIN_DN) - try: - topology.standalone.add_s(entry) - except ldap.LDAPError as e: - topology.standalone.log.error('Unexpected result ' + e.message['desc']) - assert False - topology.standalone.log.error("Failed to add Password Administator %s, error: %s " - % (ADMIN_DN, e.message['desc'])) - assert False - - topology.standalone.log.info("Configuring password policy...") - try: - topology.standalone.modify_s(CONFIG_DN, [(ldap.MOD_REPLACE, 'nsslapd-pwpolicy-local' , 'on'), - (ldap.MOD_REPLACE, 'passwordCheckSyntax', 'on'), - (ldap.MOD_REPLACE, 'passwordMinCategories' , '1'), - (ldap.MOD_REPLACE, 'passwordMinTokenLength' , '1'), - (ldap.MOD_REPLACE, 'passwordExp' , 'on'), - (ldap.MOD_REPLACE, 'passwordMinDigits' , '1'), - (ldap.MOD_REPLACE, 'passwordMinSpecials' , '1')]) - except ldap.LDAPError as e: - topology.standalone.log.error('Failed configure password policy: ' + e.message['desc']) - assert False - - # - # Add an aci to allow everyone all access (just makes things easier) - # - topology.standalone.log.info("Add aci to allow password admin to add/update entries...") - - ACI_TARGET = "(target = \"ldap:///%s\")" % SUFFIX - ACI_TARGETATTR = "(targetattr = *)" - ACI_ALLOW = "(version 3.0; acl \"Password Admin Access\"; allow (all) " - ACI_SUBJECT = "(userdn = \"ldap:///anyone\");)" - ACI_BODY = ACI_TARGET + ACI_TARGETATTR + ACI_ALLOW + ACI_SUBJECT - mod = [(ldap.MOD_ADD, 'aci', ACI_BODY)] - try: - topology.standalone.modify_s(SUFFIX, mod) - except ldap.LDAPError as e: - topology.standalone.log.error('Failed to add aci for password admin: ' + e.message['desc']) - assert False - - # - # Bind as the Password Admin - # - topology.standalone.log.info("Bind as the Password Administator (before activating)...") - try: - topology.standalone.simple_bind_s(ADMIN_DN, ADMIN_PWD) - except ldap.LDAPError as e: - topology.standalone.log.error('Failed to bind as the Password Admin: ' + e.message['desc']) - assert False - - # - # Setup our test entry, and test password policy is working - # - entry = Entry(ENTRY_DN) - entry.setValues('objectclass', 'top', 'person') - entry.setValues('sn', ENTRY_NAME) - entry.setValues('cn', ENTRY_NAME) - - # - # Start by attempting to add an entry with an invalid password - # - topology.standalone.log.info("Attempt to add entries with invalid passwords, these adds should fail...") - for passwd in INVALID_PWDS: - failed_as_expected = False - entry.setValues('userpassword', passwd) - topology.standalone.log.info("Create a regular user entry %s with password (%s)..." % (ENTRY_DN, passwd)) - try: - topology.standalone.add_s(entry) - except ldap.LDAPError as e: - # We failed as expected - failed_as_expected = True - topology.standalone.log.info('Add failed as expected: password (%s) result (%s)' - % (passwd, e.message['desc'])) - - if not failed_as_expected: - topology.standalone.log.error("We were incorrectly able to add an entry " + - "with an invalid password (%s)" % (passwd)) - assert False - - # - # Now activate a password administator, bind as root dn to do the config - # update, then rebind as the password admin - # - topology.standalone.log.info("Activate the Password Administator...") - - # Bind as Root DN - try: - topology.standalone.simple_bind_s(DN_DM, PASSWORD) - except ldap.LDAPError as e: - topology.standalone.log.error('Root DN failed to authenticate: ' + e.message['desc']) - assert False - - # Update config - try: - topology.standalone.modify_s(CONFIG_DN, [(ldap.MOD_REPLACE, 'passwordAdminDN', ADMIN_DN)]) - except ldap.LDAPError as e: - topology.standalone.log.error('Failed to add password admin to config: ' + e.message['desc']) - assert False - - # Bind as Password Admin - try: - topology.standalone.simple_bind_s(ADMIN_DN, ADMIN_PWD) - except ldap.LDAPError as e: - topology.standalone.log.error('Failed to bind as the Password Admin: ' + e.message['desc']) - assert False - - # - # Start adding entries with invalid passwords, delete the entry after each pass. - # - for passwd in INVALID_PWDS: - entry.setValues('userpassword', passwd) - topology.standalone.log.info("Create a regular user entry %s with password (%s)..." % (ENTRY_DN, passwd)) - try: - topology.standalone.add_s(entry) - except ldap.LDAPError as e: - topology.standalone.log.error('Failed to add entry with password (%s) result (%s)' - % (passwd, e.message['desc'])) - assert False - - topology.standalone.log.info('Succesfully added entry (%s)' % ENTRY_DN) - - # Delete entry for the next pass - try: - topology.standalone.delete_s(ENTRY_DN) - except ldap.LDAPError as e: - topology.standalone.log.error('Failed to delete entry: %s' % (e.message['desc'])) - assert False - - # - # Add the entry for the next round of testing (modify password) - # - entry.setValues('userpassword', ADMIN_PWD) - try: - topology.standalone.add_s(entry) - except ldap.LDAPError as e: - topology.standalone.log.error('Failed to add entry with valid password (%s) result (%s)' - % (passwd, e.message['desc'])) - assert False - - # - # Deactivate the password admin and make sure invalid password updates fail - # - topology.standalone.log.info("Deactivate Password Administator and try invalid password updates...") - - # Bind as root DN - try: - topology.standalone.simple_bind_s(DN_DM, PASSWORD) - except ldap.LDAPError as e: - topology.standalone.log.error('Root DN failed to authenticate: ' + e.message['desc']) - assert False - - # Update config - try: - topology.standalone.modify_s(CONFIG_DN, [(ldap.MOD_DELETE, 'passwordAdminDN', None)]) - except ldap.LDAPError as e: - topology.standalone.log.error('Failed to remove password admin from config: ' + e.message['desc']) - assert False - - # Bind as Password Admin - try: - topology.standalone.simple_bind_s(ADMIN_DN, ADMIN_PWD) - except ldap.LDAPError as e: - topology.standalone.log.error('Failed to bind as the Password Admin: ' + e.message['desc']) - assert False - - # - # Make invalid password updates that should fail - # - for passwd in INVALID_PWDS: - failed_as_expected = False - entry.setValues('userpassword', passwd) - try: - topology.standalone.modify_s(ENTRY_DN, [(ldap.MOD_REPLACE, 'userpassword', passwd)]) - except ldap.LDAPError as e: - # We failed as expected - failed_as_expected = True - topology.standalone.log.info('Password update failed as expected: password (%s) result (%s)' - % (passwd, e.message['desc'])) - - if not failed_as_expected: - topology.standalone.log.error("We were incorrectly able to add an invalid password (%s)" - % (passwd)) - assert False - - # - # Now activate a password administator - # - topology.standalone.log.info("Activate Password Administator and try updates again...") - - # Bind as root DN - try: - topology.standalone.simple_bind_s(DN_DM, PASSWORD) - except ldap.LDAPError as e: - topology.standalone.log.error('Root DN failed to authenticate: ' + e.message['desc']) - assert False - - # Update config - try: - topology.standalone.modify_s(CONFIG_DN, [(ldap.MOD_REPLACE, 'passwordAdminDN', ADMIN_DN)]) - except ldap.LDAPError as e: - topology.standalone.log.error('Failed to add password admin to config: ' + e.message['desc']) - assert False - - # Bind as Password Admin - try: - topology.standalone.simple_bind_s(ADMIN_DN, ADMIN_PWD) - except ldap.LDAPError as e: - topology.standalone.log.error('Failed to bind as the Password Admin: ' + e.message['desc']) - assert False - - # - # Make the same password updates, but this time they should succeed - # - for passwd in INVALID_PWDS: - entry.setValues('userpassword', passwd) - try: - topology.standalone.modify_s(ENTRY_DN, [(ldap.MOD_REPLACE, 'userpassword', passwd)]) - except ldap.LDAPError as e: - topology.standalone.log.error('Password update failed unexpectedly: password (%s) result (%s)' - % (passwd, e.message['desc'])) - assert False - topology.standalone.log.info('Password update succeeded (%s)' % passwd) - - -def test_ticket47900_final(topology): - topology.standalone.delete() - log.info('Testcase PASSED') - - -def run_isolated(): - ''' - run_isolated is used to run these test cases independently of a test scheduler (xunit, py.test..) - To run isolated without py.test, you need to - - edit this file and comment '@pytest.fixture' line before 'topology' function. - - set the installation prefix - - run this program - ''' - global installation_prefix - installation_prefix = None - - topo = topology(True) - test_ticket47900(topo) - test_ticket47900_final(topo) - -if __name__ == '__main__': - run_isolated() diff --git a/dirsrvtests/tickets/ticket47910_test.py b/dirsrvtests/tickets/ticket47910_test.py deleted file mode 100644 index afcfd88..0000000 --- a/dirsrvtests/tickets/ticket47910_test.py +++ /dev/null @@ -1,205 +0,0 @@ -# Copyright (C) 2015 Red Hat, Inc. -# All rights reserved. -# -# License: GPL (version 3 or any later version). -# See LICENSE for details. -# --- END COPYRIGHT BLOCK --- -# -import os -import sys -import time -import ldap -import logging -import pytest -import re -import subprocess -from lib389 import DirSrv, Entry, tools, tasks -from lib389.tools import DirSrvTools -from lib389._constants import * -from lib389.properties import * -from lib389.tasks import * -from datetime import datetime, timedelta - - -logging.getLogger(__name__).setLevel(logging.DEBUG) -log = logging.getLogger(__name__) - -installation1_prefix = None - - -class TopologyStandalone(object): - def __init__(self, standalone): - standalone.open() - self.standalone = standalone - - -@pytest.fixture(scope="module") -def topology(request): - global installation1_prefix - if installation1_prefix: - args_instance[SER_DEPLOYED_DIR] = installation1_prefix - - # Creating standalone instance ... - standalone = DirSrv(verbose=False) - args_instance[SER_HOST] = HOST_STANDALONE - args_instance[SER_PORT] = PORT_STANDALONE - args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE - args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX - args_standalone = args_instance.copy() - standalone.allocate(args_standalone) - instance_standalone = standalone.exists() - if instance_standalone: - standalone.delete() - standalone.create() - standalone.open() - - # Clear out the tmp dir - standalone.clearTmpDir(__file__) - - def fin(): - standalone.delete() - request.addfinalizer(fin) - - return TopologyStandalone(standalone) - - -@pytest.fixture(scope="module") -def log_dir(topology): - ''' - Do a search operation - and disable access log buffering - to generate the access log - ''' - - log.info("Diable access log buffering") - topology.standalone.setAccessLogBuffering(False) - - log.info("Do a ldapsearch operation") - topology.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, "(objectclass=*)") - - log.info("sleep for sometime so that access log file get generated") - time.sleep( 1 ) - - return topology.standalone.accesslog - - -def format_time(local_datetime): - formatted_time = (local_datetime.strftime("[%d/%b/%Y:%H:%M:%S]")) - return formatted_time - - -def execute_logconv(start_time_stamp, end_time_stamp, access_log): - ''' - This function will take start time and end time - as input parameter and - assign these values to -S and -E options of logconv - and, it will execute logconv and return result value - ''' - - log.info("Executing logconv.pl with -S current time and -E end time") - cmd = ['logconv.pl', '-S', start_time_stamp, '-E', end_time_stamp, access_log] - log.info(" ".join(cmd)) - proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) - stdout, stderr = proc.communicate() - log.info("standard output" + stdout) - log.info("standard errors" + stderr) - return proc.returncode - - -def test_ticket47910_logconv_start_end_positive(topology, log_dir): - ''' - Execute logconv.pl with -S and -E(endtime) with random time stamp - This is execute successfully - ''' - # - # Execute logconv.pl -S -E with random timestamp - # - log.info('Running test_ticket47910 - Execute logconv.pl -S -E with random values') - - log.info("taking current time with offset of 2 mins and formatting it to feed -S") - start_time_stamp = (datetime.now() - timedelta(minutes=2)) - formatted_start_time_stamp = format_time(start_time_stamp) - - log.info("taking current time with offset of 2 mins and formatting it to feed -E") - end_time_stamp = (datetime.now() + timedelta(minutes=2)) - formatted_end_time_stamp = format_time(end_time_stamp) - - log.info("Executing logconv.pl with -S and -E") - result = execute_logconv(formatted_start_time_stamp, formatted_end_time_stamp, log_dir) - assert result == 0 - - -def test_ticket47910_logconv_start_end_negative(topology, log_dir): - ''' - Execute logconv.pl with -S and -E(endtime) with random time stamp - This is a negative test case, where endtime will be lesser than the - starttime - This should give error message - ''' - - # - # Execute logconv.pl -S and -E with random timestamp - # - log.info('Running test_ticket47910 - Execute logconv.pl -S -E with starttime>endtime') - - log.info("taking current time with offset of 2 mins and formatting it to feed -S") - start_time_stamp = (datetime.now() + timedelta(minutes=2)) - formatted_start_time_stamp = format_time(start_time_stamp) - - log.info("taking current time with offset of 2 mins and formatting it to feed -E") - end_time_stamp = (datetime.now() - timedelta(minutes=2)) - formatted_end_time_stamp = format_time(end_time_stamp) - - log.info("Executing logconv.pl with -S and -E") - result = execute_logconv(formatted_start_time_stamp, formatted_end_time_stamp, log_dir) - assert result == 1 - - -def test_ticket47910_logconv_start_end_invalid(topology, log_dir): - ''' - Execute logconv.pl with -S and -E(endtime) with invalid time stamp - This is a negative test case, where it should give error message - ''' - # - # Execute logconv.pl -S and -E with invalid timestamp - # - log.info('Running test_ticket47910 - Execute logconv.pl -S -E with invalid timestamp') - log.info("Set start time and end time to invalid values") - start_time_stamp = "invalid" - end_time_stamp = "invalid" - - log.info("Executing logconv.pl with -S and -E") - result = execute_logconv(start_time_stamp, end_time_stamp, log_dir) - assert result == 1 - - -def test_ticket47910_logconv_noaccesslogs(topology, log_dir): - - ''' - Execute logconv.pl -S(starttime) without specify - access logs location - ''' - - # - # Execute logconv.pl -S with random timestamp and no access log location - # - log.info('Running test_ticket47910 - Execute logconv.pl without access logs') - - log.info("taking current time with offset of 2 mins and formatting it to feed -S") - time_stamp = (datetime.now() - timedelta(minutes=2)) - formatted_time_stamp = format_time(time_stamp) - log.info("Executing logconv.pl with -S current time") - cmd = ['logconv.pl', '-S', formatted_time_stamp] - log.info(" ".join(cmd)) - proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) - stdout, stderr = proc.communicate() - log.info("standard output" + stdout) - log.info("standard errors" + stderr) - - assert proc.returncode == 1 - - -if __name__ == '__main__': - # Run isolated - # -s for DEBUG mode - pytest.main("-s ticket47910_test.py") diff --git a/dirsrvtests/tickets/ticket47920_test.py b/dirsrvtests/tickets/ticket47920_test.py deleted file mode 100644 index d4f6a53..0000000 --- a/dirsrvtests/tickets/ticket47920_test.py +++ /dev/null @@ -1,194 +0,0 @@ -# --- BEGIN COPYRIGHT BLOCK --- -# Copyright (C) 2015 Red Hat, Inc. -# All rights reserved. -# -# License: GPL (version 3 or any later version). -# See LICENSE for details. -# --- END COPYRIGHT BLOCK --- -# -import os -import sys -import time -import ldap -import logging -import pytest -from lib389 import DirSrv, Entry, tools -from lib389.tools import DirSrvTools -from lib389._constants import * -from lib389.properties import * -from ldap.controls.readentry import PreReadControl,PostReadControl - - -SCOPE_IN_CN = 'in' -SCOPE_OUT_CN = 'out' -SCOPE_IN_DN = 'cn=%s,%s' % (SCOPE_IN_CN, SUFFIX) -SCOPE_OUT_DN = 'cn=%s,%s' % (SCOPE_OUT_CN, SUFFIX) - -PROVISIONING_CN = "provisioning" -PROVISIONING_DN = "cn=%s,%s" % (PROVISIONING_CN, SCOPE_IN_DN) - -ACTIVE_CN = "accounts" -STAGE_CN = "staged users" -DELETE_CN = "deleted users" -ACTIVE_DN = "cn=%s,%s" % (ACTIVE_CN, SCOPE_IN_DN) -STAGE_DN = "cn=%s,%s" % (STAGE_CN, PROVISIONING_DN) -DELETE_DN = "cn=%s,%s" % (DELETE_CN, PROVISIONING_DN) - -STAGE_USER_CN = "stage guy" -STAGE_USER_DN = "cn=%s,%s" % (STAGE_USER_CN, STAGE_DN) - -ACTIVE_USER_CN = "active guy" -ACTIVE_USER_DN = "cn=%s,%s" % (ACTIVE_USER_CN, ACTIVE_DN) - -OUT_USER_CN = "out guy" -OUT_USER_DN = "cn=%s,%s" % (OUT_USER_CN, SCOPE_OUT_DN) - -STAGE_GROUP_CN = "stage group" -STAGE_GROUP_DN = "cn=%s,%s" % (STAGE_GROUP_CN, STAGE_DN) - -ACTIVE_GROUP_CN = "active group" -ACTIVE_GROUP_DN = "cn=%s,%s" % (ACTIVE_GROUP_CN, ACTIVE_DN) - -OUT_GROUP_CN = "out group" -OUT_GROUP_DN = "cn=%s,%s" % (OUT_GROUP_CN, SCOPE_OUT_DN) - -INDIRECT_ACTIVE_GROUP_CN = "indirect active group" -INDIRECT_ACTIVE_GROUP_DN = "cn=%s,%s" % (INDIRECT_ACTIVE_GROUP_CN, ACTIVE_DN) - -INITIAL_DESC = "inital description" -FINAL_DESC = "final description" - -log = logging.getLogger(__name__) - -installation_prefix = None - - -class TopologyStandalone(object): - def __init__(self, standalone): - standalone.open() - self.standalone = standalone - - -@pytest.fixture(scope="module") -def topology(request): - ''' - This fixture is used to standalone topology for the 'module'. - ''' - global installation_prefix - - if installation_prefix: - args_instance[SER_DEPLOYED_DIR] = installation_prefix - - standalone = DirSrv(verbose=False) - - # Args for the standalone instance - args_instance[SER_HOST] = HOST_STANDALONE - args_instance[SER_PORT] = PORT_STANDALONE - args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE - args_standalone = args_instance.copy() - standalone.allocate(args_standalone) - - # Get the status of the instance and restart it if it exists - instance_standalone = standalone.exists() - - # Remove the instance - if instance_standalone: - standalone.delete() - - # Create the instance - standalone.create() - - # Used to retrieve configuration information (dbdir, confdir...) - standalone.open() - - # clear the tmp directory - standalone.clearTmpDir(__file__) - - # Here we have standalone instance up and running - return TopologyStandalone(standalone) - - -def _header(topology, label): - topology.standalone.log.info("\n\n###############################################") - topology.standalone.log.info("#######") - topology.standalone.log.info("####### %s" % label) - topology.standalone.log.info("#######") - topology.standalone.log.info("###############################################") - - -def _add_user(topology, type='active'): - if type == 'active': - topology.standalone.add_s(Entry((ACTIVE_USER_DN, { - 'objectclass': "top person inetuser".split(), - 'sn': ACTIVE_USER_CN, - 'cn': ACTIVE_USER_CN, - 'description': INITIAL_DESC}))) - elif type == 'stage': - topology.standalone.add_s(Entry((STAGE_USER_DN, { - 'objectclass': "top person inetuser".split(), - 'sn': STAGE_USER_CN, - 'cn': STAGE_USER_CN}))) - else: - topology.standalone.add_s(Entry((OUT_USER_DN, { - 'objectclass': "top person inetuser".split(), - 'sn': OUT_USER_CN, - 'cn': OUT_USER_CN}))) - - -def test_ticket47920_init(topology): - topology.standalone.add_s(Entry((SCOPE_IN_DN, { - 'objectclass': "top nscontainer".split(), - 'cn': SCOPE_IN_DN}))) - topology.standalone.add_s(Entry((ACTIVE_DN, { - 'objectclass': "top nscontainer".split(), - 'cn': ACTIVE_CN}))) - - # add users - _add_user(topology, 'active') - - -def test_ticket47920_mod_readentry_ctrl(topology): - _header(topology, 'MOD: with a readentry control') - - topology.standalone.log.info("Check the initial value of the entry") - ent = topology.standalone.getEntry(ACTIVE_USER_DN, ldap.SCOPE_BASE, "(objectclass=*)", ['description']) - assert ent.hasAttr('description') - assert ent.getValue('description') == INITIAL_DESC - - pr = PostReadControl(criticality=True, attrList=['cn', 'description']) - _, _, _, resp_ctrls = topology.standalone.modify_ext_s(ACTIVE_USER_DN, [(ldap.MOD_REPLACE, 'description', [FINAL_DESC])], serverctrls=[pr]) - - assert resp_ctrls[0].dn == ACTIVE_USER_DN - assert 'description' in resp_ctrls[0].entry - assert 'cn' in resp_ctrls[0].entry - print(resp_ctrls[0].entry['description']) - - ent = topology.standalone.getEntry(ACTIVE_USER_DN, ldap.SCOPE_BASE, "(objectclass=*)", ['description']) - assert ent.hasAttr('description') - assert ent.getValue('description') == FINAL_DESC - - -def test_ticket47920_final(topology): - topology.standalone.delete() - log.info('Testcase PASSED') - - -def run_isolated(): - ''' - run_isolated is used to run these test cases independently of a test scheduler (xunit, py.test..) - To run isolated without py.test, you need to - - edit this file and comment '@pytest.fixture' line before 'topology' function. - - set the installation prefix - - run this program - ''' - global installation_prefix - installation_prefix = None - - topo = topology(True) - test_ticket47920_init(topo) - test_ticket47920_mod_readentry_ctrl(topo) - test_ticket47920_final(topo) - - -if __name__ == '__main__': - run_isolated() diff --git a/dirsrvtests/tickets/ticket47921_test.py b/dirsrvtests/tickets/ticket47921_test.py deleted file mode 100644 index 4f3d54e..0000000 --- a/dirsrvtests/tickets/ticket47921_test.py +++ /dev/null @@ -1,163 +0,0 @@ -# --- BEGIN COPYRIGHT BLOCK --- -# Copyright (C) 2015 Red Hat, Inc. -# All rights reserved. -# -# License: GPL (version 3 or any later version). -# See LICENSE for details. -# --- END COPYRIGHT BLOCK --- -# -import os -import sys -import time -import ldap -import logging -import pytest -from lib389 import DirSrv, Entry, tools, tasks -from lib389.tools import DirSrvTools -from lib389._constants import * -from lib389.properties import * -from lib389.tasks import * -from lib389.utils import * - -logging.getLogger(__name__).setLevel(logging.DEBUG) -log = logging.getLogger(__name__) - -installation1_prefix = None - - -class TopologyStandalone(object): - def __init__(self, standalone): - standalone.open() - self.standalone = standalone - - -@pytest.fixture(scope="module") -def topology(request): - global installation1_prefix - if installation1_prefix: - args_instance[SER_DEPLOYED_DIR] = installation1_prefix - - # Creating standalone instance ... - standalone = DirSrv(verbose=False) - args_instance[SER_HOST] = HOST_STANDALONE - args_instance[SER_PORT] = PORT_STANDALONE - args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE - args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX - args_standalone = args_instance.copy() - standalone.allocate(args_standalone) - instance_standalone = standalone.exists() - if instance_standalone: - standalone.delete() - standalone.create() - standalone.open() - - # Clear out the tmp dir - standalone.clearTmpDir(__file__) - - return TopologyStandalone(standalone) - - -def test_ticket47921(topology): - ''' - Test that indirect cos reflects the current value of the indirect entry - ''' - - INDIRECT_COS_DN = 'cn=cos definition,' + DEFAULT_SUFFIX - MANAGER_DN = 'uid=my manager,ou=people,' + DEFAULT_SUFFIX - USER_DN = 'uid=user,ou=people,' + DEFAULT_SUFFIX - - # Add COS definition - try: - topology.standalone.add_s(Entry((INDIRECT_COS_DN, - {'objectclass': 'top cosSuperDefinition cosIndirectDefinition ldapSubEntry'.split(), - 'cosIndirectSpecifier': 'manager', - 'cosAttribute': 'roomnumber' - }))) - except ldap.LDAPError as e: - log.fatal('Failed to add cos defintion, error: ' + e.message['desc']) - assert False - - # Add manager entry - try: - topology.standalone.add_s(Entry((MANAGER_DN, - {'objectclass': 'top extensibleObject'.split(), - 'uid': 'my manager', - 'roomnumber': '1' - }))) - except ldap.LDAPError as e: - log.fatal('Failed to add manager entry, error: ' + e.message['desc']) - assert False - - # Add user entry - try: - topology.standalone.add_s(Entry((USER_DN, - {'objectclass': 'top person organizationalPerson inetorgperson'.split(), - 'sn': 'last', - 'cn': 'full', - 'givenname': 'mark', - 'uid': 'user', - 'manager': MANAGER_DN - }))) - except ldap.LDAPError as e: - log.fatal('Failed to add manager entry, error: ' + e.message['desc']) - assert False - - # Test COS is working - try: - entry = topology.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, - "uid=user", - ['roomnumber']) - if entry: - if entry[0].getValue('roomnumber') != '1': - log.fatal('COS is not working.') - assert False - else: - log.fatal('Failed to find user entry') - assert False - except ldap.LDAPError as e: - log.error('Failed to search for user entry: ' + e.message['desc']) - assert False - - # Modify manager entry - try: - topology.standalone.modify_s(MANAGER_DN, [(ldap.MOD_REPLACE, 'roomnumber', '2')]) - except ldap.LDAPError as e: - log.error('Failed to modify manager entry: ' + e.message['desc']) - assert False - - # Confirm COS is returning the new value - try: - entry = topology.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, - "uid=user", - ['roomnumber']) - if entry: - if entry[0].getValue('roomnumber') != '2': - log.fatal('COS is not working after manager update.') - assert False - else: - log.fatal('Failed to find user entry') - assert False - except ldap.LDAPError as e: - log.error('Failed to search for user entry: ' + e.message['desc']) - assert False - - log.info('Test complete') - - -def test_ticket47921_final(topology): - topology.standalone.delete() - log.info('Testcase PASSED') - - -def run_isolated(): - global installation1_prefix - installation1_prefix = None - - topo = topology(True) - test_ticket47921(topo) - test_ticket47921_final(topo) - - -if __name__ == '__main__': - run_isolated() - diff --git a/dirsrvtests/tickets/ticket47927_test.py b/dirsrvtests/tickets/ticket47927_test.py deleted file mode 100644 index 78e0b29..0000000 --- a/dirsrvtests/tickets/ticket47927_test.py +++ /dev/null @@ -1,313 +0,0 @@ -# --- BEGIN COPYRIGHT BLOCK --- -# Copyright (C) 2015 Red Hat, Inc. -# All rights reserved. -# -# License: GPL (version 3 or any later version). -# See LICENSE for details. -# --- END COPYRIGHT BLOCK --- -# -import os -import sys -import time -import ldap -import logging -import pytest -from lib389 import DirSrv, Entry, tools, tasks -from lib389.tools import DirSrvTools -from lib389._constants import * -from lib389.properties import * -from lib389.tasks import * -from lib389.utils import * - -logging.getLogger(__name__).setLevel(logging.DEBUG) -log = logging.getLogger(__name__) - -installation1_prefix = None - -EXCLUDED_CONTAINER_CN = "excluded_container" -EXCLUDED_CONTAINER_DN = "cn=%s,%s" % (EXCLUDED_CONTAINER_CN, SUFFIX) - -EXCLUDED_BIS_CONTAINER_CN = "excluded_bis_container" -EXCLUDED_BIS_CONTAINER_DN = "cn=%s,%s" % (EXCLUDED_BIS_CONTAINER_CN, SUFFIX) - -ENFORCED_CONTAINER_CN = "enforced_container" -ENFORCED_CONTAINER_DN = "cn=%s,%s" % (ENFORCED_CONTAINER_CN, SUFFIX) - -USER_1_CN = "test_1" -USER_1_DN = "cn=%s,%s" % (USER_1_CN, ENFORCED_CONTAINER_DN) -USER_2_CN = "test_2" -USER_2_DN = "cn=%s,%s" % (USER_2_CN, ENFORCED_CONTAINER_DN) -USER_3_CN = "test_3" -USER_3_DN = "cn=%s,%s" % (USER_3_CN, EXCLUDED_CONTAINER_DN) -USER_4_CN = "test_4" -USER_4_DN = "cn=%s,%s" % (USER_4_CN, EXCLUDED_BIS_CONTAINER_DN) - - -class TopologyStandalone(object): - def __init__(self, standalone): - standalone.open() - self.standalone = standalone - - -@pytest.fixture(scope="module") -def topology(request): - global installation1_prefix - - # Creating standalone instance ... - standalone = DirSrv(verbose=False) - if installation1_prefix: - args_instance[SER_DEPLOYED_DIR] = installation1_prefix - args_instance[SER_HOST] = HOST_STANDALONE - args_instance[SER_PORT] = PORT_STANDALONE - args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE - args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX - args_standalone = args_instance.copy() - standalone.allocate(args_standalone) - instance_standalone = standalone.exists() - if instance_standalone: - standalone.delete() - standalone.create() - standalone.open() - - # Clear out the tmp dir - standalone.clearTmpDir(__file__) - - return TopologyStandalone(standalone) - - -def test_ticket47927_init(topology): - topology.standalone.plugins.enable(name=PLUGIN_ATTR_UNIQUENESS) - try: - topology.standalone.modify_s('cn=' + PLUGIN_ATTR_UNIQUENESS + ',cn=plugins,cn=config', - [(ldap.MOD_REPLACE, 'uniqueness-attribute-name', 'telephonenumber'), - (ldap.MOD_REPLACE, 'uniqueness-subtrees', DEFAULT_SUFFIX), - ]) - except ldap.LDAPError as e: - log.fatal('test_ticket47927: Failed to configure plugin for "telephonenumber": error ' + e.message['desc']) - assert False - topology.standalone.restart(timeout=120) - - topology.standalone.add_s(Entry((EXCLUDED_CONTAINER_DN, {'objectclass': "top nscontainer".split(), - 'cn': EXCLUDED_CONTAINER_CN}))) - topology.standalone.add_s(Entry((EXCLUDED_BIS_CONTAINER_DN, {'objectclass': "top nscontainer".split(), - 'cn': EXCLUDED_BIS_CONTAINER_CN}))) - topology.standalone.add_s(Entry((ENFORCED_CONTAINER_DN, {'objectclass': "top nscontainer".split(), - 'cn': ENFORCED_CONTAINER_CN}))) - - # adding an entry on a stage with a different 'cn' - topology.standalone.add_s(Entry((USER_1_DN, { - 'objectclass': "top person".split(), - 'sn': USER_1_CN, - 'cn': USER_1_CN}))) - # adding an entry on a stage with a different 'cn' - topology.standalone.add_s(Entry((USER_2_DN, { - 'objectclass': "top person".split(), - 'sn': USER_2_CN, - 'cn': USER_2_CN}))) - topology.standalone.add_s(Entry((USER_3_DN, { - 'objectclass': "top person".split(), - 'sn': USER_3_CN, - 'cn': USER_3_CN}))) - topology.standalone.add_s(Entry((USER_4_DN, { - 'objectclass': "top person".split(), - 'sn': USER_4_CN, - 'cn': USER_4_CN}))) - - -def test_ticket47927_one(topology): - ''' - Check that uniqueness is enforce on all SUFFIX - ''' - UNIQUE_VALUE='1234' - try: - topology.standalone.modify_s(USER_1_DN, - [(ldap.MOD_REPLACE, 'telephonenumber', UNIQUE_VALUE)]) - except ldap.LDAPError as e: - log.fatal('test_ticket47927_one: Failed to set the telephonenumber for %s: %s' % (USER_1_DN, e.message['desc'])) - assert False - - # we expect to fail because user1 is in the scope of the plugin - try: - topology.standalone.modify_s(USER_2_DN, - [(ldap.MOD_REPLACE, 'telephonenumber', UNIQUE_VALUE)]) - log.fatal('test_ticket47927_one: unexpected success to set the telephonenumber for %s' % (USER_2_DN)) - assert False - except ldap.LDAPError as e: - log.fatal('test_ticket47927_one: Failed (expected) to set the telephonenumber for %s: %s' % (USER_2_DN, e.message['desc'])) - pass - - - # we expect to fail because user1 is in the scope of the plugin - try: - topology.standalone.modify_s(USER_3_DN, - [(ldap.MOD_REPLACE, 'telephonenumber', UNIQUE_VALUE)]) - log.fatal('test_ticket47927_one: unexpected success to set the telephonenumber for %s' % (USER_3_DN)) - assert False - except ldap.LDAPError as e: - log.fatal('test_ticket47927_one: Failed (expected) to set the telephonenumber for %s: %s' % (USER_3_DN, e.message['desc'])) - pass - - -def test_ticket47927_two(topology): - ''' - Exclude the EXCLUDED_CONTAINER_DN from the uniqueness plugin - ''' - try: - topology.standalone.modify_s('cn=' + PLUGIN_ATTR_UNIQUENESS + ',cn=plugins,cn=config', - [(ldap.MOD_REPLACE, 'uniqueness-exclude-subtrees', EXCLUDED_CONTAINER_DN)]) - except ldap.LDAPError as e: - log.fatal('test_ticket47927_two: Failed to configure plugin for to exclude %s: error %s' % (EXCLUDED_CONTAINER_DN, e.message['desc'])) - assert False - topology.standalone.restart(timeout=120) - - -def test_ticket47927_three(topology): - ''' - Check that uniqueness is enforced on full SUFFIX except EXCLUDED_CONTAINER_DN - First case: it exists an entry (with the same attribute value) in the scope - of the plugin and we set the value in an entry that is in an excluded scope - ''' - UNIQUE_VALUE='9876' - try: - topology.standalone.modify_s(USER_1_DN, - [(ldap.MOD_REPLACE, 'telephonenumber', UNIQUE_VALUE)]) - except ldap.LDAPError as e: - log.fatal('test_ticket47927_three: Failed to set the telephonenumber ' + e.message['desc']) - assert False - - # we should not be allowed to set this value (because user1 is in the scope) - try: - topology.standalone.modify_s(USER_2_DN, - [(ldap.MOD_REPLACE, 'telephonenumber', UNIQUE_VALUE)]) - log.fatal('test_ticket47927_three: unexpected success to set the telephonenumber for %s' % (USER_2_DN)) - assert False - except ldap.LDAPError as e: - log.fatal('test_ticket47927_three: Failed (expected) to set the telephonenumber for %s: %s' % (USER_2_DN , e.message['desc'])) - - - # USER_3_DN is in EXCLUDED_CONTAINER_DN so update should be successful - try: - topology.standalone.modify_s(USER_3_DN, - [(ldap.MOD_REPLACE, 'telephonenumber', UNIQUE_VALUE)]) - log.fatal('test_ticket47927_three: success to set the telephonenumber for %s' % (USER_3_DN)) - except ldap.LDAPError as e: - log.fatal('test_ticket47927_three: Failed (unexpected) to set the telephonenumber for %s: %s' % (USER_3_DN, e.message['desc'])) - assert False - - -def test_ticket47927_four(topology): - ''' - Check that uniqueness is enforced on full SUFFIX except EXCLUDED_CONTAINER_DN - Second case: it exists an entry (with the same attribute value) in an excluded scope - of the plugin and we set the value in an entry is in the scope - ''' - UNIQUE_VALUE='1111' - # USER_3_DN is in EXCLUDED_CONTAINER_DN so update should be successful - try: - topology.standalone.modify_s(USER_3_DN, - [(ldap.MOD_REPLACE, 'telephonenumber', UNIQUE_VALUE)]) - log.fatal('test_ticket47927_four: success to set the telephonenumber for %s' % USER_3_DN) - except ldap.LDAPError as e: - log.fatal('test_ticket47927_four: Failed (unexpected) to set the telephonenumber for %s: %s' % (USER_3_DN, e.message['desc'])) - assert False - - - # we should be allowed to set this value (because user3 is excluded from scope) - try: - topology.standalone.modify_s(USER_1_DN, - [(ldap.MOD_REPLACE, 'telephonenumber', UNIQUE_VALUE)]) - except ldap.LDAPError as e: - log.fatal('test_ticket47927_four: Failed to set the telephonenumber for %s: %s' % (USER_1_DN, e.message['desc'])) - assert False - - # we should not be allowed to set this value (because user1 is in the scope) - try: - topology.standalone.modify_s(USER_2_DN, - [(ldap.MOD_REPLACE, 'telephonenumber', UNIQUE_VALUE)]) - log.fatal('test_ticket47927_four: unexpected success to set the telephonenumber %s' % USER_2_DN) - assert False - except ldap.LDAPError as e: - log.fatal('test_ticket47927_four: Failed (expected) to set the telephonenumber for %s: %s' % (USER_2_DN, e.message['desc'])) - pass - - -def test_ticket47927_five(topology): - ''' - Exclude the EXCLUDED_BIS_CONTAINER_DN from the uniqueness plugin - ''' - try: - topology.standalone.modify_s('cn=' + PLUGIN_ATTR_UNIQUENESS + ',cn=plugins,cn=config', - [(ldap.MOD_ADD, 'uniqueness-exclude-subtrees', EXCLUDED_BIS_CONTAINER_DN)]) - except ldap.LDAPError as e: - log.fatal('test_ticket47927_five: Failed to configure plugin for to exclude %s: error %s' % (EXCLUDED_BIS_CONTAINER_DN, e.message['desc'])) - assert False - topology.standalone.restart(timeout=120) - topology.standalone.getEntry('cn=' + PLUGIN_ATTR_UNIQUENESS + ',cn=plugins,cn=config', ldap.SCOPE_BASE) - - -def test_ticket47927_six(topology): - ''' - Check that uniqueness is enforced on full SUFFIX except EXCLUDED_CONTAINER_DN - and EXCLUDED_BIS_CONTAINER_DN - First case: it exists an entry (with the same attribute value) in the scope - of the plugin and we set the value in an entry that is in an excluded scope - ''' - UNIQUE_VALUE = '222' - try: - topology.standalone.modify_s(USER_1_DN, - [(ldap.MOD_REPLACE, 'telephonenumber', UNIQUE_VALUE)]) - except ldap.LDAPError as e: - log.fatal('test_ticket47927_six: Failed to set the telephonenumber ' + e.message['desc']) - assert False - - # we should not be allowed to set this value (because user1 is in the scope) - try: - topology.standalone.modify_s(USER_2_DN, - [(ldap.MOD_REPLACE, 'telephonenumber', UNIQUE_VALUE)]) - log.fatal('test_ticket47927_six: unexpected success to set the telephonenumber for %s' % (USER_2_DN)) - assert False - except ldap.LDAPError as e: - log.fatal('test_ticket47927_six: Failed (expected) to set the telephonenumber for %s: %s' % (USER_2_DN , e.message['desc'])) - - - # USER_3_DN is in EXCLUDED_CONTAINER_DN so update should be successful - try: - topology.standalone.modify_s(USER_3_DN, - [(ldap.MOD_REPLACE, 'telephonenumber', UNIQUE_VALUE)]) - log.fatal('test_ticket47927_six: success to set the telephonenumber for %s' % (USER_3_DN)) - except ldap.LDAPError as e: - log.fatal('test_ticket47927_six: Failed (unexpected) to set the telephonenumber for %s: %s' % (USER_3_DN, e.message['desc'])) - assert False - # USER_4_DN is in EXCLUDED_CONTAINER_DN so update should be successful - try: - topology.standalone.modify_s(USER_4_DN, - [(ldap.MOD_REPLACE, 'telephonenumber', UNIQUE_VALUE)]) - log.fatal('test_ticket47927_six: success to set the telephonenumber for %s' % (USER_4_DN)) - except ldap.LDAPError as e: - log.fatal('test_ticket47927_six: Failed (unexpected) to set the telephonenumber for %s: %s' % (USER_4_DN, e.message['desc'])) - assert False - - -def test_ticket47927_final(topology): - topology.standalone.delete() - log.info('Testcase PASSED') - - -def run_isolated(): - global installation1_prefix - installation1_prefix = None - - topo = topology(True) - test_ticket47927_init(topo) - test_ticket47927_one(topo) - test_ticket47927_two(topo) - test_ticket47927_three(topo) - test_ticket47927_four(topo) - test_ticket47927_five(topo) - test_ticket47927_six(topo) - test_ticket47927_final(topo) - - -if __name__ == '__main__': - run_isolated() - diff --git a/dirsrvtests/tickets/ticket47931_test.py b/dirsrvtests/tickets/ticket47931_test.py deleted file mode 100644 index 9aa54fc..0000000 --- a/dirsrvtests/tickets/ticket47931_test.py +++ /dev/null @@ -1,207 +0,0 @@ -import os -import sys -import time -import ldap -import logging -import pytest -import threading -from lib389 import DirSrv, Entry, tools, tasks -from lib389.tools import DirSrvTools -from lib389._constants import * -from lib389.properties import * -from lib389.tasks import * -from lib389.utils import * - -logging.getLogger(__name__).setLevel(logging.DEBUG) -log = logging.getLogger(__name__) - -installation1_prefix = None -SECOND_SUFFIX = "dc=deadlock" -SECOND_BACKEND = "deadlock" -RETROCL_PLUGIN_DN = ('cn=' + PLUGIN_RETRO_CHANGELOG + ',cn=plugins,cn=config') -MEMBEROF_PLUGIN_DN = ('cn=' + PLUGIN_MEMBER_OF + ',cn=plugins,cn=config') -GROUP_DN = ("cn=group," + DEFAULT_SUFFIX) -MEMBER_DN_COMP = "uid=member" -TIME_OUT = 5 - - -class TopologyStandalone(object): - def __init__(self, standalone): - standalone.open() - self.standalone = standalone - - -class modifySecondBackendThread(threading.Thread): - def __init__(self, inst, timeout): - threading.Thread.__init__(self) - self.daemon = True - self.inst = inst - self.timeout = timeout - - def run(self): - conn = self.inst.openConnection() - conn.set_option(ldap.OPT_TIMEOUT, self.timeout) - log.info('Modify second suffix...') - for x in range(0, 5000): - try: - conn.modify_s(SECOND_SUFFIX, - [(ldap.MOD_REPLACE, - 'description', - 'new description')]) - except ldap.LDAPError as e: - log.fatal('Failed to modify second suffix - error: %s' % - (e.message['desc'])) - assert False - - conn.close() - log.info('Finished modifying second suffix') - - -@pytest.fixture(scope="module") -def topology(request): - global installation1_prefix - if installation1_prefix: - args_instance[SER_DEPLOYED_DIR] = installation1_prefix - - # Creating standalone instance ... - standalone = DirSrv(verbose=False) - args_instance[SER_HOST] = HOST_STANDALONE - args_instance[SER_PORT] = PORT_STANDALONE - args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE - args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX - args_standalone = args_instance.copy() - standalone.allocate(args_standalone) - instance_standalone = standalone.exists() - if instance_standalone: - standalone.delete() - standalone.create() - standalone.open() - - # Delete each instance in the end - def fin(): - standalone.delete() - request.addfinalizer(fin) - - # Clear out the tmp dir - standalone.clearTmpDir(__file__) - - return TopologyStandalone(standalone) - - -def test_ticket47931(topology): - """Test Retro Changelog and MemberOf deadlock fix. - Verification steps: - - Enable retro cl and memberOf. - - Create two backends: A & B. - - Configure retrocl scoping for backend A. - - Configure memberOf plugin for uniquemember - - Create group in backend A. - - In parallel, add members to the group on A, and make modifications - to entries in backend B. - - Make sure the server does not hang during the updates to both - backends. - - """ - - # Enable dynamic plugins to make plugin configuration easier - try: - topology.standalone.modify_s(DN_CONFIG, - [(ldap.MOD_REPLACE, - 'nsslapd-dynamic-plugins', - 'on')]) - except ldap.LDAPError as e: - ldap.error('Failed to enable dynamic plugins! ' + e.message['desc']) - assert False - - # Enable the plugins - topology.standalone.plugins.enable(name=PLUGIN_MEMBER_OF) - topology.standalone.plugins.enable(name=PLUGIN_RETRO_CHANGELOG) - - # Create second backend - topology.standalone.backend.create(SECOND_SUFFIX, {BACKEND_NAME: SECOND_BACKEND}) - topology.standalone.mappingtree.create(SECOND_SUFFIX, bename=SECOND_BACKEND) - - # Create the root node of the second backend - try: - topology.standalone.add_s(Entry((SECOND_SUFFIX, - {'objectclass': 'top domain'.split(), - 'dc': 'deadlock'}))) - except ldap.LDAPError as e: - log.fatal('Failed to create suffix entry: error ' + e.message['desc']) - assert False - - # Configure retrocl scope - try: - topology.standalone.modify_s(RETROCL_PLUGIN_DN, - [(ldap.MOD_REPLACE, - 'nsslapd-include-suffix', - DEFAULT_SUFFIX)]) - except ldap.LDAPError as e: - ldap.error('Failed to configure retrocl plugin: ' + e.message['desc']) - assert False - - # Configure memberOf group attribute - try: - topology.standalone.modify_s(MEMBEROF_PLUGIN_DN, - [(ldap.MOD_REPLACE, - 'memberofgroupattr', - 'uniquemember')]) - except ldap.LDAPError as e: - log.fatal('Failed to configure memberOf plugin: error ' + e.message['desc']) - assert False - - # Create group - try: - topology.standalone.add_s(Entry((GROUP_DN, - {'objectclass': 'top extensibleObject'.split(), - 'cn': 'group'}))) - except ldap.LDAPError as e: - log.fatal('Failed to add grouo: error ' + e.message['desc']) - assert False - - # Create 1500 entries (future members of the group) - for idx in range(1, 1500): - try: - USER_DN = ("uid=member%d,%s" % (idx, DEFAULT_SUFFIX)) - topology.standalone.add_s(Entry((USER_DN, - {'objectclass': 'top extensibleObject'.split(), - 'uid': 'member%d' % (x)}))) - except ldap.LDAPError as e: - log.fatal('Failed to add user (%s): error %s' % (USER_DN, e.message['desc'])) - assert False - - # Modify second backend (separate thread) - mod_backend_thrd = modifySecondBackendThread(topology.standalone, TIME_OUT) - mod_backend_thrd.start() - - # Add members to the group - set timeout - log.info('Adding members to the group...') - topology.standalone.set_option(ldap.OPT_TIMEOUT, TIME_OUT) - for idx in range(1, 1500): - try: - MEMBER_VAL = ("uid=member%d,%s" % (idx, DEFAULT_SUFFIX)) - topology.standalone.modify_s(GROUP_DN, - [(ldap.MOD_ADD, - 'uniquemember', - MEMBER_VAL)]) - except ldap.TIMEOUT: - log.fatal('Deadlock! Bug verification failed.') - assert False - except ldap.LDAPError as e: - log.fatal('Failed to update group(not a deadlock) member (%s) - error: %s' % - (MEMBER_VAL, e.message['desc'])) - assert False - log.info('Finished adding members to the group.') - - # Wait for the thread to finish - mod_backend_thrd.join() - - # No timeout, test passed! - log.info('Test complete\n') - - -if __name__ == '__main__': - # Run isolated - # -s for DEBUG mode - CURRENT_FILE = os.path.realpath(__file__) - pytest.main("-s %s" % CURRENT_FILE) \ No newline at end of file diff --git a/dirsrvtests/tickets/ticket47937_test.py b/dirsrvtests/tickets/ticket47937_test.py deleted file mode 100644 index 6c09cf8..0000000 --- a/dirsrvtests/tickets/ticket47937_test.py +++ /dev/null @@ -1,188 +0,0 @@ -# --- BEGIN COPYRIGHT BLOCK --- -# Copyright (C) 2015 Red Hat, Inc. -# All rights reserved. -# -# License: GPL (version 3 or any later version). -# See LICENSE for details. -# --- END COPYRIGHT BLOCK --- -# -import os -import sys -import time -import ldap -import logging -import pytest -from lib389 import DirSrv, Entry, tools -from lib389.tools import DirSrvTools -from lib389._constants import * -from lib389.properties import * - -log = logging.getLogger(__name__) - -installation_prefix = None - - -class TopologyStandalone(object): - def __init__(self, standalone): - standalone.open() - self.standalone = standalone - - -@pytest.fixture(scope="module") -def topology(request): - ''' - This fixture is used to standalone topology for the 'module'. - ''' - global installation_prefix - - if installation_prefix: - args_instance[SER_DEPLOYED_DIR] = installation_prefix - - standalone = DirSrv(verbose=False) - - # Args for the standalone instance - args_instance[SER_HOST] = HOST_STANDALONE - args_instance[SER_PORT] = PORT_STANDALONE - args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE - args_standalone = args_instance.copy() - standalone.allocate(args_standalone) - - # Get the status of the instance and restart it if it exists - instance_standalone = standalone.exists() - - # Remove the instance - if instance_standalone: - standalone.delete() - - # Create the instance - standalone.create() - - # Used to retrieve configuration information (dbdir, confdir...) - standalone.open() - - # clear the tmp directory - standalone.clearTmpDir(__file__) - - # Here we have standalone instance up and running - return TopologyStandalone(standalone) - - -def test_ticket47937(topology): - """ - Test that DNA plugin only accepts valid attributes for "dnaType" - """ - - log.info("Creating \"ou=people\"...") - try: - topology.standalone.add_s(Entry(('ou=people,' + SUFFIX, { - 'objectclass': 'top organizationalunit'.split(), - 'ou': 'people' - }))) - - except ldap.ALREADY_EXISTS: - pass - except ldap.LDAPError as e: - log.error('Failed to add ou=people org unit: error ' + e.message['desc']) - assert False - - log.info("Creating \"ou=ranges\"...") - try: - topology.standalone.add_s(Entry(('ou=ranges,' + SUFFIX, { - 'objectclass': 'top organizationalunit'.split(), - 'ou': 'ranges' - }))) - - except ldap.LDAPError as e: - log.error('Failed to add ou=ranges org unit: error ' + e.message['desc']) - assert False - - log.info("Creating \"cn=entry\"...") - try: - topology.standalone.add_s(Entry(('cn=entry,ou=people,' + SUFFIX, { - 'objectclass': 'top groupofuniquenames'.split(), - 'cn': 'entry' - }))) - - except ldap.LDAPError as e: - log.error('Failed to add test entry: error ' + e.message['desc']) - assert False - - log.info("Creating DNA shared config entry...") - try: - topology.standalone.add_s(Entry(('dnaHostname=localhost.localdomain+dnaPortNum=389,ou=ranges,%s' % SUFFIX, { - 'objectclass': 'top dnaSharedConfig'.split(), - 'dnaHostname': 'localhost.localdomain', - 'dnaPortNum': '389', - 'dnaSecurePortNum': '636', - 'dnaRemainingValues': '9501' - }))) - - except ldap.LDAPError as e: - log.error('Failed to add shared config entry: error ' + e.message['desc']) - assert False - - log.info("Add dna plugin config entry...") - try: - topology.standalone.add_s(Entry(('cn=dna config,cn=Distributed Numeric Assignment Plugin,cn=plugins,cn=config', { - 'objectclass': 'top dnaPluginConfig'.split(), - 'dnaType': 'description', - 'dnaMaxValue': '10000', - 'dnaMagicRegen': '0', - 'dnaFilter': '(objectclass=top)', - 'dnaScope': 'ou=people,%s' % SUFFIX, - 'dnaNextValue': '500', - 'dnaSharedCfgDN': 'ou=ranges,%s' % SUFFIX - }))) - - except ldap.LDAPError as e: - log.error('Failed to add DNA config entry: error ' + e.message['desc']) - assert False - - log.info("Enable the DNA plugin...") - try: - topology.standalone.plugins.enable(name=PLUGIN_DNA) - except e: - log.error("Failed to enable DNA Plugin: error " + e.message['desc']) - assert False - - log.info("Restarting the server...") - topology.standalone.stop(timeout=120) - time.sleep(1) - topology.standalone.start(timeout=120) - time.sleep(3) - - log.info("Apply an invalid attribute to the DNA config(dnaType: foo)...") - - try: - topology.standalone.modify_s('cn=dna config,cn=Distributed Numeric Assignment Plugin,cn=plugins,cn=config', - [(ldap.MOD_REPLACE, 'dnaType', 'foo')]) - except ldap.LDAPError as e: - log.info('Operation failed as expected (error: %s)' % e.message['desc']) - else: - log.error('Operation incorectly succeeded! Test Failed!') - assert False - - -def test_ticket47937_final(topology): - topology.standalone.delete() - log.info('Testcase PASSED') - - -def run_isolated(): - ''' - run_isolated is used to run these test cases independently of a test scheduler (xunit, py.test..) - To run isolated without py.test, you need to - - edit this file and comment '@pytest.fixture' line before 'topology' function. - - set the installation prefix - - run this program - ''' - global installation_prefix - installation_prefix = None - - topo = topology(True) - test_ticket47937(topo) - test_ticket47937_final(topo) - - -if __name__ == '__main__': - run_isolated() diff --git a/dirsrvtests/tickets/ticket47950_test.py b/dirsrvtests/tickets/ticket47950_test.py deleted file mode 100644 index 7226637..0000000 --- a/dirsrvtests/tickets/ticket47950_test.py +++ /dev/null @@ -1,223 +0,0 @@ -# --- BEGIN COPYRIGHT BLOCK --- -# Copyright (C) 2015 Red Hat, Inc. -# All rights reserved. -# -# License: GPL (version 3 or any later version). -# See LICENSE for details. -# --- END COPYRIGHT BLOCK --- -# -import os -import sys -import time -import ldap -import logging -import pytest -from lib389 import DirSrv, Entry, tools, tasks -from lib389.tools import DirSrvTools -from lib389._constants import * -from lib389.properties import * -from lib389.tasks import * - -log = logging.getLogger(__name__) - -installation_prefix = None - -USER1_DN = "uid=user1,%s" % DEFAULT_SUFFIX -USER2_DN = "uid=user2,%s" % DEFAULT_SUFFIX - - -class TopologyStandalone(object): - def __init__(self, standalone): - standalone.open() - self.standalone = standalone - - -@pytest.fixture(scope="module") -def topology(request): - ''' - This fixture is used to standalone topology for the 'module'. - ''' - global installation_prefix - - if installation_prefix: - args_instance[SER_DEPLOYED_DIR] = installation_prefix - - standalone = DirSrv(verbose=False) - - # Args for the standalone instance - args_instance[SER_HOST] = HOST_STANDALONE - args_instance[SER_PORT] = PORT_STANDALONE - args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE - args_standalone = args_instance.copy() - standalone.allocate(args_standalone) - - # Get the status of the instance and restart it if it exists - instance_standalone = standalone.exists() - - # Remove the instance - if instance_standalone: - standalone.delete() - - # Create the instance - standalone.create() - - # Used to retrieve configuration information (dbdir, confdir...) - standalone.open() - - # clear the tmp directory - standalone.clearTmpDir(__file__) - - # Here we have standalone instance up and running - return TopologyStandalone(standalone) - - -def test_ticket47950(topology): - """ - Testing nsslapd-plugin-binddn-tracking does not cause issues around - access control and reconfiguring replication/repl agmt. - """ - - log.info('Testing Ticket 47950 - Testing nsslapd-plugin-binddn-tracking') - - # - # Turn on bind dn tracking - # - try: - topology.standalone.modify_s("cn=config", [(ldap.MOD_REPLACE, 'nsslapd-plugin-binddn-tracking', 'on')]) - log.info('nsslapd-plugin-binddn-tracking enabled.') - except ldap.LDAPError as e: - log.error('Failed to enable bind dn tracking: ' + e.message['desc']) - assert False - - # - # Add two users - # - try: - topology.standalone.add_s(Entry((USER1_DN, { - 'objectclass': "top person inetuser".split(), - 'userpassword': "password", - 'sn': "1", - 'cn': "user 1"}))) - log.info('Added test user %s' % USER1_DN) - except ldap.LDAPError as e: - log.error('Failed to add %s: %s' % (USER1_DN, e.message['desc'])) - assert False - - try: - topology.standalone.add_s(Entry((USER2_DN, { - 'objectclass': "top person inetuser".split(), - 'sn': "2", - 'cn': "user 2"}))) - log.info('Added test user %s' % USER2_DN) - except ldap.LDAPError as e: - log.error('Failed to add user1: ' + e.message['desc']) - assert False - - # - # Add an aci - # - try: - acival = '(targetattr ="cn")(version 3.0;acl "Test bind dn tracking"' + \ - ';allow (all) (userdn = "ldap:///%s");)' % USER1_DN - - topology.standalone.modify_s(DEFAULT_SUFFIX, [(ldap.MOD_ADD, 'aci', acival)]) - log.info('Added aci') - except ldap.LDAPError as e: - log.error('Failed to add aci: ' + e.message['desc']) - assert False - - # - # Make modification as user - # - try: - topology.standalone.simple_bind_s(USER1_DN, "password") - log.info('Bind as user %s successful' % USER1_DN) - except ldap.LDAPError as e: - log.error('Failed to bind as user1: ' + e.message['desc']) - assert False - - try: - topology.standalone.modify_s(USER2_DN, [(ldap.MOD_REPLACE, 'cn', 'new value')]) - log.info('%s successfully modified user %s' % (USER1_DN, USER2_DN)) - except ldap.LDAPError as e: - log.error('Failed to update user2: ' + e.message['desc']) - assert False - - # - # Setup replica and create a repl agmt - # - try: - topology.standalone.simple_bind_s(DN_DM, PASSWORD) - log.info('Bind as %s successful' % DN_DM) - except ldap.LDAPError as e: - log.error('Failed to bind as rootDN: ' + e.message['desc']) - assert False - - try: - topology.standalone.replica.enableReplication(suffix=DEFAULT_SUFFIX, role=REPLICAROLE_MASTER, - replicaId=REPLICAID_MASTER_1) - log.info('Successfully enabled replication.') - except ValueError: - log.error('Failed to enable replication') - assert False - - properties = {RA_NAME: r'test plugin internal bind dn', - RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], - RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], - RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], - RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} - - try: - repl_agreement = topology.standalone.agreement.create(suffix=DEFAULT_SUFFIX, host="127.0.0.1", - port="7777", properties=properties) - log.info('Successfully created replication agreement') - except InvalidArgumentError as e: - log.error('Failed to create replication agreement: ' + e.message['desc']) - assert False - - # - # modify replica - # - try: - properties = {REPLICA_ID: "7"} - topology.standalone.replica.setProperties(DEFAULT_SUFFIX, None, None, properties) - log.info('Successfully modified replica') - except ldap.LDAPError as e: - log.error('Failed to update replica config: ' + e.message['desc']) - assert False - - # - # modify repl agmt - # - try: - properties = {RA_CONSUMER_PORT: "8888"} - topology.standalone.agreement.setProperties(None, repl_agreement, None, properties) - log.info('Successfully modified replication agreement') - except ValueError: - log.error('Failed to update replica agreement: ' + repl_agreement) - assert False - - -def test_ticket47953_final(topology): - topology.standalone.delete() - log.info('Testcase PASSED') - - -def run_isolated(): - ''' - run_isolated is used to run these test cases independently of a test scheduler (xunit, py.test..) - To run isolated without py.test, you need to - - edit this file and comment '@pytest.fixture' line before 'topology' function. - - set the installation prefix - - run this program - ''' - global installation_prefix - installation_prefix = None - - topo = topology(True) - test_ticket47950(topo) - test_ticket47953_final(topo) - - -if __name__ == '__main__': - run_isolated() diff --git a/dirsrvtests/tickets/ticket47953_test.py b/dirsrvtests/tickets/ticket47953_test.py deleted file mode 100644 index f64d899..0000000 --- a/dirsrvtests/tickets/ticket47953_test.py +++ /dev/null @@ -1,128 +0,0 @@ -# --- BEGIN COPYRIGHT BLOCK --- -# Copyright (C) 2015 Red Hat, Inc. -# All rights reserved. -# -# License: GPL (version 3 or any later version). -# See LICENSE for details. -# --- END COPYRIGHT BLOCK --- -# -import os -import sys -import time -import ldap -import logging -import pytest -from lib389 import DirSrv, Entry, tools, tasks -from lib389.tools import DirSrvTools -from lib389._constants import * -from lib389.properties import * -from lib389.tasks import * - -log = logging.getLogger(__name__) - -installation_prefix = None - - -class TopologyStandalone(object): - def __init__(self, standalone): - standalone.open() - self.standalone = standalone - - -@pytest.fixture(scope="module") -def topology(request): - ''' - This fixture is used to standalone topology for the 'module'. - ''' - global installation_prefix - - if installation_prefix: - args_instance[SER_DEPLOYED_DIR] = installation_prefix - - standalone = DirSrv(verbose=False) - - # Args for the standalone instance - args_instance[SER_HOST] = HOST_STANDALONE - args_instance[SER_PORT] = PORT_STANDALONE - args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE - args_standalone = args_instance.copy() - standalone.allocate(args_standalone) - - # Get the status of the instance and restart it if it exists - instance_standalone = standalone.exists() - - # Remove the instance - if instance_standalone: - standalone.delete() - - # Create the instance - standalone.create() - - # Used to retrieve configuration information (dbdir, confdir...) - standalone.open() - - # clear the tmp directory - standalone.clearTmpDir(__file__) - - # Here we have standalone instance up and running - return TopologyStandalone(standalone) - - -def test_ticket47953(topology): - """ - Test that we can delete an aci that has an invalid syntax. - Sart by importing an ldif with a "bad" aci, then simply try - to remove that value without error. - """ - - log.info('Testing Ticket 47953 - Test we can delete aci that has invalid syntax') - - # - # Import an invalid ldif - # - ldif_file = topology.standalone.getDir(__file__, DATA_DIR) + "ticket47953/ticket47953.ldif" - importTask = Tasks(topology.standalone) - args = {TASK_WAIT: True} - try: - importTask.importLDIF(DEFAULT_SUFFIX, None, ldif_file, args) - except ValueError: - assert False - - # - # Delete the invalid aci - # - acival = '(targetattr ="fffff")(version 3.0;acl "Directory Administrators Group"' + \ - ';allow (all) (groupdn = "ldap:///cn=Directory Administrators, dc=example,dc=com");)' - - log.info('Attempting to remove invalid aci...') - try: - topology.standalone.modify_s(DEFAULT_SUFFIX, [(ldap.MOD_DELETE, 'aci', acival)]) - log.info('Removed invalid aci.') - except ldap.LDAPError as e: - log.error('Failed to remove invalid aci: ' + e.message['desc']) - assert False - - -def test_ticket47953_final(topology): - topology.standalone.delete() - log.info('Testcase PASSED') - - -def run_isolated(): - ''' - run_isolated is used to run these test cases independently of a test scheduler (xunit, py.test..) - To run isolated without py.test, you need to - - edit this file and comment '@pytest.fixture' line before 'topology' function. - - set the installation prefix - - run this program - ''' - global installation_prefix - installation_prefix = None - - topo = topology(True) - test_ticket47953(topo) - test_ticket47953_final(topo) - - -if __name__ == '__main__': - run_isolated() diff --git a/dirsrvtests/tickets/ticket47963_test.py b/dirsrvtests/tickets/ticket47963_test.py deleted file mode 100644 index deed905..0000000 --- a/dirsrvtests/tickets/ticket47963_test.py +++ /dev/null @@ -1,199 +0,0 @@ -# --- BEGIN COPYRIGHT BLOCK --- -# Copyright (C) 2015 Red Hat, Inc. -# All rights reserved. -# -# License: GPL (version 3 or any later version). -# See LICENSE for details. -# --- END COPYRIGHT BLOCK --- -# -import os -import sys -import time -import ldap -import logging -import pytest -from lib389 import DirSrv, Entry, tools, tasks -from lib389.tools import DirSrvTools -from lib389._constants import * -from lib389.properties import * -from lib389.tasks import * - -logging.getLogger(__name__).setLevel(logging.DEBUG) -log = logging.getLogger(__name__) - -installation1_prefix = None - - -class TopologyStandalone(object): - def __init__(self, standalone): - standalone.open() - self.standalone = standalone - - -@pytest.fixture(scope="module") -def topology(request): - global installation1_prefix - if installation1_prefix: - args_instance[SER_DEPLOYED_DIR] = installation1_prefix - - # Creating standalone instance ... - standalone = DirSrv(verbose=False) - args_instance[SER_HOST] = HOST_STANDALONE - args_instance[SER_PORT] = PORT_STANDALONE - args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE - args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX - args_standalone = args_instance.copy() - standalone.allocate(args_standalone) - instance_standalone = standalone.exists() - if instance_standalone: - standalone.delete() - standalone.create() - standalone.open() - - # Clear out the tmp dir - standalone.clearTmpDir(__file__) - - return TopologyStandalone(standalone) - - -def test_ticket47963(topology): - ''' - Test that the memberOf plugin works correctly after setting: - - memberofskipnested: on - - ''' - PLUGIN_DN = 'cn=' + PLUGIN_MEMBER_OF + ',cn=plugins,cn=config' - USER_DN = 'uid=test_user,' + DEFAULT_SUFFIX - GROUP_DN1 = 'cn=group1,' + DEFAULT_SUFFIX - GROUP_DN2 = 'cn=group2,' + DEFAULT_SUFFIX - GROUP_DN3 = 'cn=group3,' + DEFAULT_SUFFIX - - # - # Enable the plugin and configure the skiop nest attribute, then restart the server - # - topology.standalone.plugins.enable(name=PLUGIN_MEMBER_OF) - try: - topology.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, 'memberofskipnested', 'on')]) - except ldap.LDAPError as e: - log.error('test_automember: Failed to modify config entry: error ' + e.message['desc']) - assert False - - topology.standalone.restart(timeout=10) - - # - # Add our groups, users, memberships, etc - # - try: - topology.standalone.add_s(Entry((USER_DN, { - 'objectclass': 'top extensibleObject'.split(), - 'uid': 'test_user' - }))) - except ldap.LDAPError as e: - log.error('Failed to add teset user: error ' + e.message['desc']) - assert False - - try: - topology.standalone.add_s(Entry((GROUP_DN1, { - 'objectclass': 'top groupOfNames groupOfUniqueNames extensibleObject'.split(), - 'cn': 'group1', - 'member': USER_DN - }))) - except ldap.LDAPError as e: - log.error('Failed to add group1: error ' + e.message['desc']) - assert False - - try: - topology.standalone.add_s(Entry((GROUP_DN2, { - 'objectclass': 'top groupOfNames groupOfUniqueNames extensibleObject'.split(), - 'cn': 'group2', - 'member': USER_DN - }))) - except ldap.LDAPError as e: - log.error('Failed to add group2: error ' + e.message['desc']) - assert False - - # Add group with no member(yet) - try: - topology.standalone.add_s(Entry((GROUP_DN3, { - 'objectclass': 'top groupOfNames groupOfUniqueNames extensibleObject'.split(), - 'cn': 'group' - }))) - except ldap.LDAPError as e: - log.error('Failed to add group3: error ' + e.message['desc']) - assert False - time.sleep(1) - - # - # Test we have the correct memberOf values in the user entry - # - try: - member_filter = ('(&(memberOf=' + GROUP_DN1 + ')(memberOf=' + GROUP_DN2 + '))') - entries = topology.standalone.search_s(USER_DN, ldap.SCOPE_BASE, member_filter) - if not entries: - log.fatal('User is missing expected memberOf attrs') - assert False - except ldap.LDAPError as e: - log.fatal('Search for user1 failed: ' + e.message['desc']) - assert False - - # Add the user to the group - try: - topology.standalone.modify_s(GROUP_DN3, [(ldap.MOD_ADD, 'member', USER_DN)]) - except ldap.LDAPError as e: - log.error('Failed to member to group: error ' + e.message['desc']) - assert False - time.sleep(1) - - # Check that the test user is a "memberOf" all three groups - try: - member_filter = ('(&(memberOf=' + GROUP_DN1 + ')(memberOf=' + GROUP_DN2 + - ')(memberOf=' + GROUP_DN3 + '))') - entries = topology.standalone.search_s(USER_DN, ldap.SCOPE_BASE, member_filter) - if not entries: - log.fatal('User is missing expected memberOf attrs') - assert False - except ldap.LDAPError as e: - log.fatal('Search for user1 failed: ' + e.message['desc']) - assert False - - # - # Delete group2, and check memberOf values in the user entry - # - try: - topology.standalone.delete_s(GROUP_DN2) - except ldap.LDAPError as e: - log.error('Failed to delete test group2: ' + e.message['desc']) - assert False - time.sleep(1) - - try: - member_filter = ('(&(memberOf=' + GROUP_DN1 + ')(memberOf=' + GROUP_DN3 + '))') - entries = topology.standalone.search_s(USER_DN, ldap.SCOPE_BASE, member_filter) - if not entries: - log.fatal('User incorrect memberOf attrs') - assert False - except ldap.LDAPError as e: - log.fatal('Search for user1 failed: ' + e.message['desc']) - assert False - - log.info('Test complete') - - -def test_ticket47963_final(topology): - topology.standalone.delete() - log.info('Testcase PASSED') - - -def run_isolated(): - global installation1_prefix - installation1_prefix = None - - topo = topology(True) - test_ticket47963(topo) - test_ticket47963_final(topo) - - -if __name__ == '__main__': - run_isolated() - diff --git a/dirsrvtests/tickets/ticket47966_test.py b/dirsrvtests/tickets/ticket47966_test.py deleted file mode 100644 index b311f47..0000000 --- a/dirsrvtests/tickets/ticket47966_test.py +++ /dev/null @@ -1,227 +0,0 @@ -# --- BEGIN COPYRIGHT BLOCK --- -# Copyright (C) 2015 Red Hat, Inc. -# All rights reserved. -# -# License: GPL (version 3 or any later version). -# See LICENSE for details. -# --- END COPYRIGHT BLOCK --- -# -import os -import sys -import time -import ldap -import logging -import pytest -from lib389 import DirSrv, Entry, tools, tasks -from lib389.tools import DirSrvTools -from lib389._constants import * -from lib389.properties import * -from lib389.tasks import * -from lib389.utils import * - -logging.getLogger(__name__).setLevel(logging.DEBUG) -log = logging.getLogger(__name__) - -installation1_prefix = None - -m1_m2_agmt = "" - -class TopologyReplication(object): - def __init__(self, master1, master2): - master1.open() - self.master1 = master1 - master2.open() - self.master2 = master2 - - -@pytest.fixture(scope="module") -def topology(request): - global installation1_prefix - if installation1_prefix: - args_instance[SER_DEPLOYED_DIR] = installation1_prefix - - # Creating master 1... - master1 = DirSrv(verbose=False) - args_instance[SER_HOST] = HOST_MASTER_1 - args_instance[SER_PORT] = PORT_MASTER_1 - args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1 - args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX - args_master = args_instance.copy() - master1.allocate(args_master) - instance_master1 = master1.exists() - if instance_master1: - master1.delete() - master1.create() - master1.open() - master1.replica.enableReplication(suffix=DEFAULT_SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_1) - - # Creating master 2... - master2 = DirSrv(verbose=False) - args_instance[SER_HOST] = HOST_MASTER_2 - args_instance[SER_PORT] = PORT_MASTER_2 - args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_2 - args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX - args_master = args_instance.copy() - master2.allocate(args_master) - instance_master2 = master2.exists() - if instance_master2: - master2.delete() - master2.create() - master2.open() - master2.replica.enableReplication(suffix=DEFAULT_SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_2) - - # - # Create all the agreements - # - # Creating agreement from master 1 to master 2 - properties = {RA_NAME: r'meTo_$host:$port', - RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], - RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], - RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], - RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} - global m1_m2_agmt - m1_m2_agmt = master1.agreement.create(suffix=DEFAULT_SUFFIX, host=master2.host, port=master2.port, properties=properties) - if not m1_m2_agmt: - log.fatal("Fail to create a master -> master replica agreement") - sys.exit(1) - log.debug("%s created" % m1_m2_agmt) - - # Creating agreement from master 2 to master 1 - properties = {RA_NAME: r'meTo_$host:$port', - RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], - RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], - RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], - RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} - m2_m1_agmt = master2.agreement.create(suffix=DEFAULT_SUFFIX, host=master1.host, port=master1.port, properties=properties) - if not m2_m1_agmt: - log.fatal("Fail to create a master -> master replica agreement") - sys.exit(1) - log.debug("%s created" % m2_m1_agmt) - - # Allow the replicas to get situated with the new agreements... - time.sleep(5) - - # - # Initialize all the agreements - # - master1.agreement.init(DEFAULT_SUFFIX, HOST_MASTER_2, PORT_MASTER_2) - master1.waitForReplInit(m1_m2_agmt) - - # Check replication is working... - if master1.testReplication(DEFAULT_SUFFIX, master2): - log.info('Replication is working.') - else: - log.fatal('Replication is not working.') - assert False - - # Clear out the tmp dir - master1.clearTmpDir(__file__) - - return TopologyReplication(master1, master2) - - -def test_ticket47966(topology): - ''' - Testing bulk import when the backend with VLV was recreated. - If the test passes without the server crash, 47966 is verified. - ''' - log.info('Testing Ticket 47966 - [VLV] slapd crashes during Dogtag clone reinstallation') - M1 = topology.master1 - M2 = topology.master2 - - log.info('0. Create a VLV index on Master 2.') - # get the backend entry - be = M2.replica.conn.backend.list(suffix=DEFAULT_SUFFIX) - if not be: - log.fatal("ticket47966: enable to retrieve the backend for %s" % DEFAULT_SUFFIX) - raise ValueError("no backend for suffix %s" % DEFAULT_SUFFIX) - bent = be[0] - beName = bent.getValue('cn') - beDn = "cn=%s,cn=ldbm database,cn=plugins,cn=config" % beName - - # generate vlvSearch entry - vlvSrchDn = "cn=vlvSrch,%s" % beDn - log.info('0-1. vlvSearch dn: %s' % vlvSrchDn) - vlvSrchEntry = Entry(vlvSrchDn) - vlvSrchEntry.setValues('objectclass', 'top', 'vlvSearch') - vlvSrchEntry.setValues('cn', 'vlvSrch') - vlvSrchEntry.setValues('vlvBase', DEFAULT_SUFFIX) - vlvSrchEntry.setValues('vlvFilter', '(|(objectclass=*)(objectclass=ldapsubentry))') - vlvSrchEntry.setValues('vlvScope', '2') - M2.add_s(vlvSrchEntry) - - # generate vlvIndex entry - vlvIndexDn = "cn=vlvIdx,%s" % vlvSrchDn - log.info('0-2. vlvIndex dn: %s' % vlvIndexDn) - vlvIndexEntry = Entry(vlvIndexDn) - vlvIndexEntry.setValues('objectclass', 'top', 'vlvIndex') - vlvIndexEntry.setValues('cn', 'vlvIdx') - vlvIndexEntry.setValues('vlvSort', 'cn ou sn') - M2.add_s(vlvIndexEntry) - - log.info('1. Initialize Master 2 from Master 1.') - M1.agreement.init(DEFAULT_SUFFIX, HOST_MASTER_2, PORT_MASTER_2) - M1.waitForReplInit(m1_m2_agmt) - - # Check replication is working... - if M1.testReplication(DEFAULT_SUFFIX, M2): - log.info('1-1. Replication is working.') - else: - log.fatal('1-1. Replication is not working.') - assert False - - log.info('2. Delete the backend instance on Master 2.') - M2.delete_s(vlvIndexDn) - M2.delete_s(vlvSrchDn) - # delete the agreement, replica, and mapping tree, too. - M2.replica.disableReplication(DEFAULT_SUFFIX) - mappingTree = 'cn="%s",cn=mapping tree,cn=config' % DEFAULT_SUFFIX - M2.mappingtree.delete(DEFAULT_SUFFIX, beName, mappingTree) - M2.backend.delete(DEFAULT_SUFFIX, beDn, beName) - - log.info('3. Recreate the backend and the VLV index on Master 2.') - M2.mappingtree.create(DEFAULT_SUFFIX, beName) - M2.backend.create(DEFAULT_SUFFIX, {BACKEND_NAME: beName}) - log.info('3-1. Recreating %s and %s on Master 2.' % (vlvSrchDn, vlvIndexDn)) - M2.add_s(vlvSrchEntry) - M2.add_s(vlvIndexEntry) - M2.replica.enableReplication(suffix=DEFAULT_SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_2) - # agreement m2_m1_agmt is not needed... :p - - log.info('4. Initialize Master 2 from Master 1 again.') - M1.agreement.init(DEFAULT_SUFFIX, HOST_MASTER_2, PORT_MASTER_2) - M1.waitForReplInit(m1_m2_agmt) - - # Check replication is working... - if M1.testReplication(DEFAULT_SUFFIX, M2): - log.info('4-1. Replication is working.') - else: - log.fatal('4-1. Replication is not working.') - assert False - - log.info('5. Check Master 2 is up.') - entries = M2.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, '(cn=*)') - assert len(entries) > 0 - log.info('5-1. %s entries are returned from M2.' % len(entries)) - - log.info('Test complete') - - -def test_ticket47966_final(topology): - topology.master1.delete() - topology.master2.delete() - log.info('Testcase PASSED') - - -def run_isolated(): - global installation1_prefix - installation1_prefix = None - - topo = topology(True) - test_ticket47966(topo) - test_ticket47966_final(topo) - - -if __name__ == '__main__': - run_isolated() - diff --git a/dirsrvtests/tickets/ticket47970_test.py b/dirsrvtests/tickets/ticket47970_test.py deleted file mode 100644 index a748939..0000000 --- a/dirsrvtests/tickets/ticket47970_test.py +++ /dev/null @@ -1,158 +0,0 @@ -# --- BEGIN COPYRIGHT BLOCK --- -# Copyright (C) 2015 Red Hat, Inc. -# All rights reserved. -# -# License: GPL (version 3 or any later version). -# See LICENSE for details. -# --- END COPYRIGHT BLOCK --- -# -import os -import sys -import time -import ldap -import ldap.sasl -import logging -import pytest -from lib389 import DirSrv, Entry, tools, tasks -from lib389.tools import DirSrvTools -from lib389._constants import * -from lib389.properties import * -from lib389.tasks import * - -log = logging.getLogger(__name__) - -installation_prefix = None - -USER1_DN = "uid=user1,%s" % DEFAULT_SUFFIX -USER2_DN = "uid=user2,%s" % DEFAULT_SUFFIX - - -class TopologyStandalone(object): - def __init__(self, standalone): - standalone.open() - self.standalone = standalone - - -@pytest.fixture(scope="module") -def topology(request): - ''' - This fixture is used to standalone topology for the 'module'. - ''' - global installation_prefix - - if installation_prefix: - args_instance[SER_DEPLOYED_DIR] = installation_prefix - - standalone = DirSrv(verbose=False) - - # Args for the standalone instance - args_instance[SER_HOST] = HOST_STANDALONE - args_instance[SER_PORT] = PORT_STANDALONE - args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE - args_standalone = args_instance.copy() - standalone.allocate(args_standalone) - - # Get the status of the instance and restart it if it exists - instance_standalone = standalone.exists() - - # Remove the instance - if instance_standalone: - standalone.delete() - - # Create the instance - standalone.create() - - # Used to retrieve configuration information (dbdir, confdir...) - standalone.open() - - # clear the tmp directory - standalone.clearTmpDir(__file__) - - # Here we have standalone instance up and running - return TopologyStandalone(standalone) - - -def test_ticket47970(topology): - """ - Testing that a failed SASL bind does not trigger account lockout - - which would attempt to update the passwordRetryCount on the root dse entry - """ - - log.info('Testing Ticket 47970 - Testing that a failed SASL bind does not trigger account lockout') - - # - # Enable account lockout - # - try: - topology.standalone.modify_s("cn=config", [(ldap.MOD_REPLACE, 'passwordLockout', 'on')]) - log.info('account lockout enabled.') - except ldap.LDAPError as e: - log.error('Failed to enable account lockout: ' + e.message['desc']) - assert False - - try: - topology.standalone.modify_s("cn=config", [(ldap.MOD_REPLACE, 'passwordMaxFailure', '5')]) - log.info('passwordMaxFailure set.') - except ldap.LDAPError as e: - log.error('Failed to to set passwordMaxFailure: ' + e.message['desc']) - assert False - - # - # Perform SASL bind that should fail - # - failed_as_expected = False - try: - user_name = "mark" - pw = "secret" - auth_tokens = ldap.sasl.digest_md5(user_name, pw) - topology.standalone.sasl_interactive_bind_s("", auth_tokens) - except ldap.INVALID_CREDENTIALS as e: - log.info("SASL Bind failed as expected") - failed_as_expected = True - - if not failed_as_expected: - log.error("SASL bind unexpectedly succeeded!") - assert False - - # - # Check that passwordRetryCount was not set on the root dse entry - # - try: - entry = topology.standalone.search_s("", ldap.SCOPE_BASE, - "passwordRetryCount=*", - ['passwordRetryCount']) - except ldap.LDAPError as e: - log.error('Failed to search Root DSE entry: ' + e.message['desc']) - assert False - - if entry: - log.error('Root DSE was incorrectly updated') - assert False - - # We passed - log.info('Root DSE was correctly not updated') - - -def test_ticket47970_final(topology): - topology.standalone.delete() - log.info('Testcase PASSED') - - -def run_isolated(): - ''' - run_isolated is used to run these test cases independently of a test scheduler (xunit, py.test..) - To run isolated without py.test, you need to - - edit this file and comment '@pytest.fixture' line before 'topology' function. - - set the installation prefix - - run this program - ''' - global installation_prefix - installation_prefix = None - - topo = topology(True) - test_ticket47970(topo) - test_ticket47970_final(topo) - - -if __name__ == '__main__': - run_isolated() diff --git a/dirsrvtests/tickets/ticket47973_test.py b/dirsrvtests/tickets/ticket47973_test.py deleted file mode 100644 index 12bb789..0000000 --- a/dirsrvtests/tickets/ticket47973_test.py +++ /dev/null @@ -1,185 +0,0 @@ -# --- BEGIN COPYRIGHT BLOCK --- -# Copyright (C) 2015 Red Hat, Inc. -# All rights reserved. -# -# License: GPL (version 3 or any later version). -# See LICENSE for details. -# --- END COPYRIGHT BLOCK --- -# -import os -import sys -import time -import ldap -import ldap.sasl -import logging -import pytest -from lib389 import DirSrv, Entry, tools, tasks -from lib389.tools import DirSrvTools -from lib389._constants import * -from lib389.properties import * -from lib389.tasks import * - -log = logging.getLogger(__name__) - -installation_prefix = None - -USER_DN = 'uid=user1,%s' % (DEFAULT_SUFFIX) -SCHEMA_RELOAD_COUNT = 10 - - -class TopologyStandalone(object): - def __init__(self, standalone): - standalone.open() - self.standalone = standalone - - -@pytest.fixture(scope="module") -def topology(request): - ''' - This fixture is used to standalone topology for the 'module'. - ''' - global installation_prefix - - if installation_prefix: - args_instance[SER_DEPLOYED_DIR] = installation_prefix - - standalone = DirSrv(verbose=False) - - # Args for the standalone instance - args_instance[SER_HOST] = HOST_STANDALONE - args_instance[SER_PORT] = PORT_STANDALONE - args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE - args_standalone = args_instance.copy() - standalone.allocate(args_standalone) - - # Get the status of the instance and restart it if it exists - instance_standalone = standalone.exists() - - # Remove the instance - if instance_standalone: - standalone.delete() - - # Create the instance - standalone.create() - - # Used to retrieve configuration information (dbdir, confdir...) - standalone.open() - - # clear the tmp directory - standalone.clearTmpDir(__file__) - - # Here we have standalone instance up and running - return TopologyStandalone(standalone) - - -def task_complete(conn, task_dn): - finished = False - - try: - task_entry = conn.search_s(task_dn, ldap.SCOPE_BASE, 'objectclass=*') - if not task_entry: - log.fatal('wait_for_task: Search failed to find task: ' + task_dn) - assert False - if task_entry[0].hasAttr('nstaskexitcode'): - # task is done - finished = True - except ldap.LDAPError as e: - log.fatal('wait_for_task: Search failed: ' + e.message['desc']) - assert False - - return finished - - -def test_ticket47973(topology): - """ - During the schema reload task there is a small window where the new schema is not loaded - into the asi hashtables - this results in searches not returning entries. - """ - - log.info('Testing Ticket 47973 - Test the searches still work as expected during schema reload tasks') - - # - # Add a user - # - try: - topology.standalone.add_s(Entry((USER_DN, { - 'objectclass': 'top extensibleObject'.split(), - 'uid': 'user1' - }))) - except ldap.LDAPError as e: - log.error('Failed to add user1: error ' + e.message['desc']) - assert False - - # - # Run a series of schema_reload tasks while searching for our user. Since - # this is a race condition, run it several times. - # - task_count = 0 - while task_count < SCHEMA_RELOAD_COUNT: - # - # Add a schema reload task - # - - TASK_DN = 'cn=task-' + str(task_count) + ',cn=schema reload task, cn=tasks, cn=config' - try: - topology.standalone.add_s(Entry((TASK_DN, { - 'objectclass': 'top extensibleObject'.split(), - 'cn': 'task-' + str(task_count) - }))) - except ldap.LDAPError as e: - log.error('Failed to add task entry: error ' + e.message['desc']) - assert False - - # - # While we wait for the task to complete keep searching for our user - # - search_count = 0 - while search_count < 100: - # - # Now check the user is still being returned - # - try: - entries = topology.standalone.search_s(DEFAULT_SUFFIX, - ldap.SCOPE_SUBTREE, - '(uid=user1)') - if not entries or not entries[0]: - log.fatal('User was not returned from search!') - assert False - except ldap.LDAPError as e: - log.fatal('Unable to search for entry %s: error %s' % (USER_DN, e.message['desc'])) - assert False - - # - # Check if task is complete - # - if task_complete(topology.standalone, TASK_DN): - break - - search_count += 1 - - task_count += 1 - - -def test_ticket47973_final(topology): - topology.standalone.delete() - log.info('Testcase PASSED') - - -def run_isolated(): - ''' - run_isolated is used to run these test cases independently of a test scheduler (xunit, py.test..) - To run isolated without py.test, you need to - - edit this file and comment '@pytest.fixture' line before 'topology' function. - - set the installation prefix - - run this program - ''' - global installation_prefix - installation_prefix = None - - topo = topology(True) - test_ticket47973(topo) - test_ticket47973_final(topo) - - -if __name__ == '__main__': - run_isolated() diff --git a/dirsrvtests/tickets/ticket47980_test.py b/dirsrvtests/tickets/ticket47980_test.py deleted file mode 100644 index 34f0d3f..0000000 --- a/dirsrvtests/tickets/ticket47980_test.py +++ /dev/null @@ -1,662 +0,0 @@ -# --- BEGIN COPYRIGHT BLOCK --- -# Copyright (C) 2015 Red Hat, Inc. -# All rights reserved. -# -# License: GPL (version 3 or any later version). -# See LICENSE for details. -# --- END COPYRIGHT BLOCK --- -# -import os -import sys -import time -import ldap -import ldap.sasl -import logging -import pytest -from lib389 import DirSrv, Entry, tools, tasks -from lib389.tools import DirSrvTools -from lib389._constants import * -from lib389.properties import * -from lib389.tasks import * - -log = logging.getLogger(__name__) - -installation_prefix = None - -BRANCH1 = 'ou=level1,' + DEFAULT_SUFFIX -BRANCH2 = 'ou=level2,ou=level1,' + DEFAULT_SUFFIX -BRANCH3 = 'ou=level3,ou=level2,ou=level1,' + DEFAULT_SUFFIX -BRANCH4 = 'ou=people,' + DEFAULT_SUFFIX -BRANCH5 = 'ou=lower,ou=people,' + DEFAULT_SUFFIX -BRANCH6 = 'ou=lower,ou=lower,ou=people,' + DEFAULT_SUFFIX -USER1_DN = 'uid=user1,%s' % (BRANCH1) -USER2_DN = 'uid=user2,%s' % (BRANCH2) -USER3_DN = 'uid=user3,%s' % (BRANCH3) -USER4_DN = 'uid=user4,%s' % (BRANCH4) -USER5_DN = 'uid=user5,%s' % (BRANCH5) -USER6_DN = 'uid=user6,%s' % (BRANCH6) - -BRANCH1_CONTAINER = 'cn=nsPwPolicyContainer,ou=level1,dc=example,dc=com' -BRANCH1_PWP = 'cn=cn\3DnsPwPolicyEntry\2Cou\3Dlevel1\2Cdc\3Dexample\2Cdc\3Dcom,' + \ - 'cn=nsPwPolicyContainer,ou=level1,dc=example,dc=com' -BRANCH1_COS_TMPL = 'cn=cn\3DnsPwTemplateEntry\2Cou\3Dlevel1\2Cdc\3Dexample\2Cdc\3Dcom,' + \ - 'cn=nsPwPolicyContainer,ou=level1,dc=example,dc=com' -BRANCH1_COS_DEF = 'cn=nsPwPolicy_CoS,ou=level1,dc=example,dc=com' - -BRANCH2_CONTAINER = 'cn=nsPwPolicyContainer,ou=level2,ou=level1,dc=example,dc=com' -BRANCH2_PWP = 'cn=cn\3DnsPwPolicyEntry\2Cou\3Dlevel2\2Cou\3Dlevel1\2Cdc\3Dexample\2Cdc\3Dcom,' + \ - 'cn=nsPwPolicyContainer,ou=level2,ou=level1,dc=example,dc=com' -BRANCH2_COS_TMPL = 'cn=cn\3DnsPwTemplateEntry\2Cou\3Dlevel2\2Cou\3Dlevel1\2Cdc\3Dexample\2Cdc\3Dcom,' + \ - 'cn=nsPwPolicyContainer,ou=level2,ou=level1,dc=example,dc=com' -BRANCH2_COS_DEF = 'cn=nsPwPolicy_CoS,ou=level2,ou=level1,dc=example,dc=com' - -BRANCH3_CONTAINER = 'cn=nsPwPolicyContainer,ou=level3,ou=level2,ou=level1,dc=example,dc=com' -BRANCH3_PWP = 'cn=cn\3DnsPwPolicyEntry\2Cou\3Dlevel3\2Cou\3Dlevel2\2Cou\3Dlevel1\2Cdc\3Dexample\2Cdc\3Dcom,' + \ - 'cn=nsPwPolicyContainer,ou=level3,ou=level2,ou=level1,dc=example,dc=com' -BRANCH3_COS_TMPL = 'cn=cn\3DnsPwTemplateEntry\2Cou\3Dlevel3\2Cou\3Dlevel2\2Cou\3Dlevel1\2Cdc\3Dexample\2Cdc\3Dcom,' + \ - 'cn=nsPwPolicyContainer,ou=level3,ou=level2,ou=level1,dc=example,dc=com' -BRANCH3_COS_DEF = 'cn=nsPwPolicy_CoS,ou=level3,ou=level2,ou=level1,dc=example,dc=com' - -BRANCH4_CONTAINER = 'cn=nsPwPolicyContainer,ou=people,dc=example,dc=com' -BRANCH4_PWP = 'cn=cn\3DnsPwPolicyEntry\2Cou\3DPeople\2Cdc\3Dexample\2Cdc\3Dcom,' + \ - 'cn=nsPwPolicyContainer,ou=People,dc=example,dc=com' -BRANCH4_COS_TMPL = 'cn=cn\3DnsPwTemplateEntry\2Cou\3DPeople\2Cdc\3Dexample\2Cdc\3Dcom,' + \ - 'cn=nsPwPolicyContainer,ou=People,dc=example,dc=com' -BRANCH4_COS_DEF = 'cn=nsPwPolicy_CoS,ou=people,dc=example,dc=com' - -BRANCH5_CONTAINER = 'cn=nsPwPolicyContainer,ou=lower,ou=people,dc=example,dc=com' -BRANCH5_PWP = 'cn=cn\3DnsPwPolicyEntry\2Cou\3Dlower\2Cou\3DPeople\2Cdc\3Dexample\2Cdc\3Dcom,' + \ - 'cn=nsPwPolicyContainer,ou=lower,ou=People,dc=example,dc=com' -BRANCH5_COS_TMPL = 'cn=cn\3DnsPwTemplateEntry\2Cou\3Dlower\2Cou\3DPeople\2Cdc\3Dexample\2Cdc\3Dcom,' + \ - 'cn=nsPwPolicyContainer,ou=lower,ou=People,dc=example,dc=com' -BRANCH5_COS_DEF = 'cn=nsPwPolicy_CoS,ou=lower,ou=People,dc=example,dc=com' - -BRANCH6_CONTAINER = 'cn=nsPwPolicyContainer,ou=lower,ou=lower,ou=People,dc=example,dc=com' -BRANCH6_PWP = 'cn=cn\3DnsPwPolicyEntry\2Cou\3Dlower\2Cou\3Dlower\2Cou\3DPeople\2Cdc\3Dexample\2Cdc\3Dcom,' + \ - 'cn=nsPwPolicyContainer,ou=lower,ou=lower,ou=People,dc=example,dc=com' -BRANCH6_COS_TMPL = 'cn=cn\3DnsPwTemplateEntry\2Cou\3Dlower\2Cou\3Dlower\2Cou\3DPeople\2Cdc\3Dexample\2Cdc\3Dcom,' + \ - 'cn=nsPwPolicyContainer,ou=lower,ou=lower,ou=People,dc=example,dc=com' -BRANCH6_COS_DEF = 'cn=nsPwPolicy_CoS,ou=lower,ou=lower,ou=People,dc=example,dc=com' - - -class TopologyStandalone(object): - def __init__(self, standalone): - standalone.open() - self.standalone = standalone - - -@pytest.fixture(scope="module") -def topology(request): - ''' - This fixture is used to standalone topology for the 'module'. - ''' - global installation_prefix - - if installation_prefix: - args_instance[SER_DEPLOYED_DIR] = installation_prefix - - standalone = DirSrv(verbose=False) - - # Args for the standalone instance - args_instance[SER_HOST] = HOST_STANDALONE - args_instance[SER_PORT] = PORT_STANDALONE - args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE - args_standalone = args_instance.copy() - standalone.allocate(args_standalone) - - # Get the status of the instance and restart it if it exists - instance_standalone = standalone.exists() - - # Remove the instance - if instance_standalone: - standalone.delete() - - # Create the instance - standalone.create() - - # Used to retrieve configuration information (dbdir, confdir...) - standalone.open() - - # clear the tmp directory - standalone.clearTmpDir(__file__) - - # Here we have standalone instance up and running - return TopologyStandalone(standalone) - - -def test_ticket47980(topology): - """ - Multiple COS pointer definitions that use the same attribute are not correctly ordered. - The cos plugin was incorrectly sorting the attribute indexes based on subtree, which lead - to the wrong cos attribute value being applied to the entry. - """ - - log.info('Testing Ticket 47980 - Testing multiple nested COS pointer definitions are processed correctly') - - # Add our nested branches - try: - topology.standalone.add_s(Entry((BRANCH1, { - 'objectclass': 'top extensibleObject'.split(), - 'ou': 'level1' - }))) - except ldap.LDAPError as e: - log.error('Failed to add level1: error ' + e.message['desc']) - assert False - - try: - topology.standalone.add_s(Entry((BRANCH2, { - 'objectclass': 'top extensibleObject'.split(), - 'ou': 'level2' - }))) - except ldap.LDAPError as e: - log.error('Failed to add level2: error ' + e.message['desc']) - assert False - - try: - topology.standalone.add_s(Entry((BRANCH3, { - 'objectclass': 'top extensibleObject'.split(), - 'uid': 'level3' - }))) - except ldap.LDAPError as e: - log.error('Failed to add level3: error ' + e.message['desc']) - assert False - - # People branch, might already exist - try: - topology.standalone.add_s(Entry((BRANCH4, { - 'objectclass': 'top extensibleObject'.split(), - 'ou': 'level4' - }))) - except ldap.ALREADY_EXISTS: - pass - except ldap.LDAPError as e: - log.error('Failed to add level4: error ' + e.message['desc']) - assert False - - try: - topology.standalone.add_s(Entry((BRANCH5, { - 'objectclass': 'top extensibleObject'.split(), - 'ou': 'level5' - }))) - except ldap.LDAPError as e: - log.error('Failed to add level5: error ' + e.message['desc']) - assert False - - try: - topology.standalone.add_s(Entry((BRANCH6, { - 'objectclass': 'top extensibleObject'.split(), - 'uid': 'level6' - }))) - except ldap.LDAPError as e: - log.error('Failed to add level6: error ' + e.message['desc']) - assert False - - # Add users to each branch - try: - topology.standalone.add_s(Entry((USER1_DN, { - 'objectclass': 'top extensibleObject'.split(), - 'uid': 'user1' - }))) - except ldap.LDAPError as e: - log.error('Failed to add user1: error ' + e.message['desc']) - assert False - - try: - topology.standalone.add_s(Entry((USER2_DN, { - 'objectclass': 'top extensibleObject'.split(), - 'uid': 'user2' - }))) - except ldap.LDAPError as e: - log.error('Failed to add user2: error ' + e.message['desc']) - assert False - - try: - topology.standalone.add_s(Entry((USER3_DN, { - 'objectclass': 'top extensibleObject'.split(), - 'uid': 'user3' - }))) - except ldap.LDAPError as e: - log.error('Failed to add user3: error ' + e.message['desc']) - assert False - - try: - topology.standalone.add_s(Entry((USER4_DN, { - 'objectclass': 'top extensibleObject'.split(), - 'uid': 'user4' - }))) - except ldap.LDAPError as e: - log.error('Failed to add user4: error ' + e.message['desc']) - assert False - - try: - topology.standalone.add_s(Entry((USER5_DN, { - 'objectclass': 'top extensibleObject'.split(), - 'uid': 'user5' - }))) - except ldap.LDAPError as e: - log.error('Failed to add user5: error ' + e.message['desc']) - assert False - - try: - topology.standalone.add_s(Entry((USER6_DN, { - 'objectclass': 'top extensibleObject'.split(), - 'uid': 'user6' - }))) - except ldap.LDAPError as e: - log.error('Failed to add user6: error ' + e.message['desc']) - assert False - - # Enable password policy - try: - topology.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-pwpolicy-local', 'on')]) - except ldap.LDAPError as e: - log.error('Failed to set pwpolicy-local: error ' + e.message['desc']) - assert False - - # - # Add subtree policy to branch 1 - # - # Add the container - try: - topology.standalone.add_s(Entry((BRANCH1_CONTAINER, { - 'objectclass': 'top nsContainer'.split(), - 'cn': 'nsPwPolicyContainer' - }))) - except ldap.LDAPError as e: - log.error('Failed to add subtree container for level1: error ' + e.message['desc']) - assert False - - # Add the password policy subentry - try: - topology.standalone.add_s(Entry((BRANCH1_PWP, { - 'objectclass': 'top ldapsubentry passwordpolicy'.split(), - 'cn': 'cn=nsPwPolicyEntry,ou=level1,dc=example,dc=com', - 'passwordMustChange': 'off', - 'passwordExp': 'off', - 'passwordHistory': 'off', - 'passwordMinAge': '0', - 'passwordChange': 'off', - 'passwordStorageScheme': 'ssha' - }))) - except ldap.LDAPError as e: - log.error('Failed to add passwordpolicy for level1: error ' + e.message['desc']) - assert False - - # Add the COS template - try: - topology.standalone.add_s(Entry((BRANCH1_COS_TMPL, { - 'objectclass': 'top ldapsubentry costemplate extensibleObject'.split(), - 'cn': 'cn=nsPwPolicyEntry,ou=level1,dc=example,dc=com', - 'cosPriority': '1', - 'cn': 'cn=nsPwTemplateEntry,ou=level1,dc=example,dc=com', - 'pwdpolicysubentry': BRANCH1_PWP - }))) - except ldap.LDAPError as e: - log.error('Failed to add COS template for level1: error ' + e.message['desc']) - assert False - - # Add the COS definition - try: - topology.standalone.add_s(Entry((BRANCH1_COS_DEF, { - 'objectclass': 'top ldapsubentry cosSuperDefinition cosPointerDefinition'.split(), - 'cn': 'cn=nsPwPolicyEntry,ou=level1,dc=example,dc=com', - 'costemplatedn': BRANCH1_COS_TMPL, - 'cosAttribute': 'pwdpolicysubentry default operational-default' - }))) - except ldap.LDAPError as e: - log.error('Failed to add COS def for level1: error ' + e.message['desc']) - assert False - - # - # Add subtree policy to branch 2 - # - # Add the container - try: - topology.standalone.add_s(Entry((BRANCH2_CONTAINER, { - 'objectclass': 'top nsContainer'.split(), - 'cn': 'nsPwPolicyContainer' - }))) - except ldap.LDAPError as e: - log.error('Failed to add subtree container for level2: error ' + e.message['desc']) - assert False - - # Add the password policy subentry - try: - topology.standalone.add_s(Entry((BRANCH2_PWP, { - 'objectclass': 'top ldapsubentry passwordpolicy'.split(), - 'cn': 'cn=nsPwPolicyEntry,ou=level2,dc=example,dc=com', - 'passwordMustChange': 'off', - 'passwordExp': 'off', - 'passwordHistory': 'off', - 'passwordMinAge': '0', - 'passwordChange': 'off', - 'passwordStorageScheme': 'ssha' - }))) - except ldap.LDAPError as e: - log.error('Failed to add passwordpolicy for level2: error ' + e.message['desc']) - assert False - - # Add the COS template - try: - topology.standalone.add_s(Entry((BRANCH2_COS_TMPL, { - 'objectclass': 'top ldapsubentry costemplate extensibleObject'.split(), - 'cn': 'cn=nsPwPolicyEntry,ou=level2,dc=example,dc=com', - 'cosPriority': '1', - 'cn': 'cn=nsPwTemplateEntry,ou=level2,dc=example,dc=com', - 'pwdpolicysubentry': BRANCH2_PWP - }))) - except ldap.LDAPError as e: - log.error('Failed to add COS template for level2: error ' + e.message['desc']) - assert False - - # Add the COS definition - try: - topology.standalone.add_s(Entry((BRANCH2_COS_DEF, { - 'objectclass': 'top ldapsubentry cosSuperDefinition cosPointerDefinition'.split(), - 'cn': 'cn=nsPwPolicyEntry,ou=level2,dc=example,dc=com', - 'costemplatedn': BRANCH2_COS_TMPL, - 'cosAttribute': 'pwdpolicysubentry default operational-default' - }))) - except ldap.LDAPError as e: - log.error('Failed to add COS def for level2: error ' + e.message['desc']) - assert False - - # - # Add subtree policy to branch 3 - # - # Add the container - try: - topology.standalone.add_s(Entry((BRANCH3_CONTAINER, { - 'objectclass': 'top nsContainer'.split(), - 'cn': 'nsPwPolicyContainer' - }))) - except ldap.LDAPError as e: - log.error('Failed to add subtree container for level3: error ' + e.message['desc']) - assert False - - # Add the password policy subentry - try: - topology.standalone.add_s(Entry((BRANCH3_PWP, { - 'objectclass': 'top ldapsubentry passwordpolicy'.split(), - 'cn': 'cn=nsPwPolicyEntry,ou=level3,dc=example,dc=com', - 'passwordMustChange': 'off', - 'passwordExp': 'off', - 'passwordHistory': 'off', - 'passwordMinAge': '0', - 'passwordChange': 'off', - 'passwordStorageScheme': 'ssha' - }))) - except ldap.LDAPError as e: - log.error('Failed to add passwordpolicy for level3: error ' + e.message['desc']) - assert False - - # Add the COS template - try: - topology.standalone.add_s(Entry((BRANCH3_COS_TMPL, { - 'objectclass': 'top ldapsubentry costemplate extensibleObject'.split(), - 'cn': 'cn=nsPwPolicyEntry,ou=level3,dc=example,dc=com', - 'cosPriority': '1', - 'cn': 'cn=nsPwTemplateEntry,ou=level3,dc=example,dc=com', - 'pwdpolicysubentry': BRANCH3_PWP - }))) - except ldap.LDAPError as e: - log.error('Failed to add COS template for level3: error ' + e.message['desc']) - assert False - - # Add the COS definition - try: - topology.standalone.add_s(Entry((BRANCH3_COS_DEF, { - 'objectclass': 'top ldapsubentry cosSuperDefinition cosPointerDefinition'.split(), - 'cn': 'cn=nsPwPolicyEntry,ou=level3,dc=example,dc=com', - 'costemplatedn': BRANCH3_COS_TMPL, - 'cosAttribute': 'pwdpolicysubentry default operational-default' - }))) - except ldap.LDAPError as e: - log.error('Failed to add COS def for level3: error ' + e.message['desc']) - assert False - - # - # Add subtree policy to branch 4 - # - # Add the container - try: - topology.standalone.add_s(Entry((BRANCH4_CONTAINER, { - 'objectclass': 'top nsContainer'.split(), - 'cn': 'nsPwPolicyContainer' - }))) - except ldap.LDAPError as e: - log.error('Failed to add subtree container for level3: error ' + e.message['desc']) - assert False - - # Add the password policy subentry - try: - topology.standalone.add_s(Entry((BRANCH4_PWP, { - 'objectclass': 'top ldapsubentry passwordpolicy'.split(), - 'cn': 'cn=nsPwPolicyEntry,ou=people,dc=example,dc=com', - 'passwordMustChange': 'off', - 'passwordExp': 'off', - 'passwordHistory': 'off', - 'passwordMinAge': '0', - 'passwordChange': 'off', - 'passwordStorageScheme': 'ssha' - }))) - except ldap.LDAPError as e: - log.error('Failed to add passwordpolicy for branch4: error ' + e.message['desc']) - assert False - - # Add the COS template - try: - topology.standalone.add_s(Entry((BRANCH4_COS_TMPL, { - 'objectclass': 'top ldapsubentry costemplate extensibleObject'.split(), - 'cn': 'cn=nsPwPolicyEntry,ou=people,dc=example,dc=com', - 'cosPriority': '1', - 'cn': 'cn=nsPwTemplateEntry,ou=people,dc=example,dc=com', - 'pwdpolicysubentry': BRANCH4_PWP - }))) - except ldap.LDAPError as e: - log.error('Failed to add COS template for level3: error ' + e.message['desc']) - assert False - - # Add the COS definition - try: - topology.standalone.add_s(Entry((BRANCH4_COS_DEF, { - 'objectclass': 'top ldapsubentry cosSuperDefinition cosPointerDefinition'.split(), - 'cn': 'cn=nsPwPolicyEntry,ou=people,dc=example,dc=com', - 'costemplatedn': BRANCH4_COS_TMPL, - 'cosAttribute': 'pwdpolicysubentry default operational-default' - }))) - except ldap.LDAPError as e: - log.error('Failed to add COS def for branch4: error ' + e.message['desc']) - assert False - - # - # Add subtree policy to branch 5 - # - # Add the container - try: - topology.standalone.add_s(Entry((BRANCH5_CONTAINER, { - 'objectclass': 'top nsContainer'.split(), - 'cn': 'nsPwPolicyContainer' - }))) - except ldap.LDAPError as e: - log.error('Failed to add subtree container for branch5: error ' + e.message['desc']) - assert False - - # Add the password policy subentry - try: - topology.standalone.add_s(Entry((BRANCH5_PWP, { - 'objectclass': 'top ldapsubentry passwordpolicy'.split(), - 'cn': 'cn=nsPwPolicyEntry,ou=lower,ou=people,dc=example,dc=com', - 'passwordMustChange': 'off', - 'passwordExp': 'off', - 'passwordHistory': 'off', - 'passwordMinAge': '0', - 'passwordChange': 'off', - 'passwordStorageScheme': 'ssha' - }))) - except ldap.LDAPError as e: - log.error('Failed to add passwordpolicy for branch5: error ' + e.message['desc']) - assert False - - # Add the COS template - try: - topology.standalone.add_s(Entry((BRANCH5_COS_TMPL, { - 'objectclass': 'top ldapsubentry costemplate extensibleObject'.split(), - 'cn': 'cn=nsPwPolicyEntry,ou=lower,ou=people,dc=example,dc=com', - 'cosPriority': '1', - 'cn': 'cn=nsPwTemplateEntry,ou=lower,ou=people,dc=example,dc=com', - 'pwdpolicysubentry': BRANCH5_PWP - }))) - except ldap.LDAPError as e: - log.error('Failed to add COS template for branch5: error ' + e.message['desc']) - assert False - - # Add the COS definition - try: - topology.standalone.add_s(Entry((BRANCH5_COS_DEF, { - 'objectclass': 'top ldapsubentry cosSuperDefinition cosPointerDefinition'.split(), - 'cn': 'cn=nsPwPolicyEntry,ou=lower,ou=people,dc=example,dc=com', - 'costemplatedn': BRANCH5_COS_TMPL, - 'cosAttribute': 'pwdpolicysubentry default operational-default' - }))) - except ldap.LDAPError as e: - log.error('Failed to add COS def for level3: error ' + e.message['desc']) - assert False - - # - # Add subtree policy to branch 6 - # - # Add the container - try: - topology.standalone.add_s(Entry((BRANCH6_CONTAINER, { - 'objectclass': 'top nsContainer'.split(), - 'cn': 'nsPwPolicyContainer' - }))) - except ldap.LDAPError as e: - log.error('Failed to add subtree container for branch6: error ' + e.message['desc']) - assert False - - # Add the password policy subentry - try: - topology.standalone.add_s(Entry((BRANCH6_PWP, { - 'objectclass': 'top ldapsubentry passwordpolicy'.split(), - 'cn': 'cn=nsPwPolicyEntry,ou=level3,dc=example,dc=com', - 'passwordMustChange': 'off', - 'passwordExp': 'off', - 'passwordHistory': 'off', - 'passwordMinAge': '0', - 'passwordChange': 'off', - 'passwordStorageScheme': 'ssha' - }))) - except ldap.LDAPError as e: - log.error('Failed to add passwordpolicy for branch6: error ' + e.message['desc']) - assert False - - # Add the COS template - try: - topology.standalone.add_s(Entry((BRANCH6_COS_TMPL, { - 'objectclass': 'top ldapsubentry costemplate extensibleObject'.split(), - 'cn': 'cn=nsPwPolicyEntry,ou=lower,ou=lower,ou=people,dc=example,dc=com', - 'cosPriority': '1', - 'cn': 'cn=nsPwTemplateEntry,ou=lower,ou=lower,ou=people,dc=example,dc=com', - 'pwdpolicysubentry': BRANCH6_PWP - }))) - except ldap.LDAPError as e: - log.error('Failed to add COS template for branch6: error ' + e.message['desc']) - assert False - - # Add the COS definition - try: - topology.standalone.add_s(Entry((BRANCH6_COS_DEF, { - 'objectclass': 'top ldapsubentry cosSuperDefinition cosPointerDefinition'.split(), - 'cn': 'cn=nsPwPolicyEntry,ou=lower,ou=lower,ou=people,dc=example,dc=com', - 'costemplatedn': BRANCH6_COS_TMPL, - 'cosAttribute': 'pwdpolicysubentry default operational-default' - }))) - except ldap.LDAPError as e: - log.error('Failed to add COS def for branch6: error ' + e.message['desc']) - assert False - - time.sleep(2) - - # - # Now check that each user has its expected passwordPolicy subentry - # - try: - entries = topology.standalone.search_s(USER1_DN, ldap.SCOPE_BASE, '(objectclass=top)', ['pwdpolicysubentry']) - if not entries[0].hasValue('pwdpolicysubentry', BRANCH1_PWP): - log.fatal('User %s does not have expected pwdpolicysubentry!') - assert False - except ldap.LDAPError as e: - log.fatal('Unable to search for entry %s: error %s' % (USER1_DN, e.message['desc'])) - assert False - - try: - entries = topology.standalone.search_s(USER2_DN, ldap.SCOPE_BASE, '(objectclass=top)', ['pwdpolicysubentry']) - if not entries[0].hasValue('pwdpolicysubentry', BRANCH2_PWP): - log.fatal('User %s does not have expected pwdpolicysubentry!' % USER2_DN) - assert False - except ldap.LDAPError as e: - log.fatal('Unable to search for entry %s: error %s' % (USER2_DN, e.message['desc'])) - assert False - - try: - entries = topology.standalone.search_s(USER3_DN, ldap.SCOPE_BASE, '(objectclass=top)', ['pwdpolicysubentry']) - if not entries[0].hasValue('pwdpolicysubentry', BRANCH3_PWP): - log.fatal('User %s does not have expected pwdpolicysubentry!' % USER3_DN) - assert False - except ldap.LDAPError as e: - log.fatal('Unable to search for entry %s: error %s' % (USER3_DN, e.message['desc'])) - assert False - - try: - entries = topology.standalone.search_s(USER4_DN, ldap.SCOPE_BASE, '(objectclass=top)', ['pwdpolicysubentry']) - if not entries[0].hasValue('pwdpolicysubentry', BRANCH4_PWP): - log.fatal('User %s does not have expected pwdpolicysubentry!' % USER4_DN) - assert False - except ldap.LDAPError as e: - log.fatal('Unable to search for entry %s: error %s' % (USER4_DN, e.message['desc'])) - assert False - - try: - entries = topology.standalone.search_s(USER5_DN, ldap.SCOPE_BASE, '(objectclass=top)', ['pwdpolicysubentry']) - if not entries[0].hasValue('pwdpolicysubentry', BRANCH5_PWP): - log.fatal('User %s does not have expected pwdpolicysubentry!' % USER5_DN) - assert False - except ldap.LDAPError as e: - log.fatal('Unable to search for entry %s: error %s' % (USER5_DN, e.message['desc'])) - assert False - - try: - entries = topology.standalone.search_s(USER6_DN, ldap.SCOPE_BASE, '(objectclass=top)', ['pwdpolicysubentry']) - if not entries[0].hasValue('pwdpolicysubentry', BRANCH6_PWP): - log.fatal('User %s does not have expected pwdpolicysubentry!' % USER6_DN) - assert False - except ldap.LDAPError as e: - log.fatal('Unable to search for entry %s: error %s' % (USER6_DN, e.message['desc'])) - assert False - - -def test_ticket47980_final(topology): - topology.standalone.delete() - log.info('Testcase PASSED') - - -def run_isolated(): - ''' - run_isolated is used to run these test cases independently of a test scheduler (xunit, py.test..) - To run isolated without py.test, you need to - - edit this file and comment '@pytest.fixture' line before 'topology' function. - - set the installation prefix - - run this program - ''' - global installation_prefix - installation_prefix = None - - topo = topology(True) - test_ticket47980(topo) - test_ticket47980_final(topo) - - -if __name__ == '__main__': - run_isolated() diff --git a/dirsrvtests/tickets/ticket47981_test.py b/dirsrvtests/tickets/ticket47981_test.py deleted file mode 100644 index b25d7dd..0000000 --- a/dirsrvtests/tickets/ticket47981_test.py +++ /dev/null @@ -1,295 +0,0 @@ -# --- BEGIN COPYRIGHT BLOCK --- -# Copyright (C) 2015 Red Hat, Inc. -# All rights reserved. -# -# License: GPL (version 3 or any later version). -# See LICENSE for details. -# --- END COPYRIGHT BLOCK --- -# -import os -import sys -import time -import ldap -import ldap.sasl -import logging -import pytest -from lib389 import DirSrv, Entry, tools, tasks -from lib389.tools import DirSrvTools -from lib389._constants import * -from lib389.properties import * -from lib389.tasks import * - -log = logging.getLogger(__name__) - -installation_prefix = None - -BRANCH = 'ou=people,' + DEFAULT_SUFFIX -USER_DN = 'uid=user1,%s' % (BRANCH) -BRANCH_CONTAINER = 'cn=nsPwPolicyContainer,ou=people,dc=example,dc=com' -BRANCH_COS_DEF = 'cn=nsPwPolicy_CoS,ou=people,dc=example,dc=com' -BRANCH_PWP = 'cn=cn\\3DnsPwPolicyEntry\\2Cou\\3DPeople\\2Cdc\\3Dexample\\2Cdc\\3Dcom,' + \ - 'cn=nsPwPolicyContainer,ou=People,dc=example,dc=com' -BRANCH_COS_TMPL = 'cn=cn\\3DnsPwTemplateEntry\\2Cou\\3DPeople\\2Cdc\\3Dexample\\2Cdc\\3Dcom,' + \ - 'cn=nsPwPolicyContainer,ou=People,dc=example,dc=com' -SECOND_SUFFIX = 'o=netscaperoot' -BE_NAME = 'netscaperoot' - - -class TopologyStandalone(object): - def __init__(self, standalone): - standalone.open() - self.standalone = standalone - - -@pytest.fixture(scope="module") -def topology(request): - ''' - This fixture is used to standalone topology for the 'module'. - ''' - global installation_prefix - - if installation_prefix: - args_instance[SER_DEPLOYED_DIR] = installation_prefix - - standalone = DirSrv(verbose=False) - - # Args for the standalone instance - args_instance[SER_HOST] = HOST_STANDALONE - args_instance[SER_PORT] = PORT_STANDALONE - args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE - args_standalone = args_instance.copy() - standalone.allocate(args_standalone) - - # Get the status of the instance and restart it if it exists - instance_standalone = standalone.exists() - - # Remove the instance - if instance_standalone: - standalone.delete() - - # Create the instance - standalone.create() - - # Used to retrieve configuration information (dbdir, confdir...) - standalone.open() - - # clear the tmp directory - standalone.clearTmpDir(__file__) - - # Here we have standalone instance up and running - return TopologyStandalone(standalone) - - -def addSubtreePwPolicy(inst): - # - # Add subtree policy to the people branch - # - try: - inst.add_s(Entry((BRANCH_CONTAINER, { - 'objectclass': 'top nsContainer'.split(), - 'cn': 'nsPwPolicyContainer' - }))) - except ldap.LDAPError as e: - log.error('Failed to add subtree container for ou=people: error ' + e.message['desc']) - assert False - - # Add the password policy subentry - try: - inst.add_s(Entry((BRANCH_PWP, { - 'objectclass': 'top ldapsubentry passwordpolicy'.split(), - 'cn': 'cn=nsPwPolicyEntry,ou=people,dc=example,dc=com', - 'passwordMustChange': 'off', - 'passwordExp': 'off', - 'passwordHistory': 'off', - 'passwordMinAge': '0', - 'passwordChange': 'off', - 'passwordStorageScheme': 'ssha' - }))) - except ldap.LDAPError as e: - log.error('Failed to add passwordpolicy: error ' + e.message['desc']) - assert False - - # Add the COS template - try: - inst.add_s(Entry((BRANCH_COS_TMPL, { - 'objectclass': 'top ldapsubentry costemplate extensibleObject'.split(), - 'cn': 'cn=nsPwPolicyEntry,ou=people,dc=example,dc=com', - 'cosPriority': '1', - 'cn': 'cn=nsPwTemplateEntry,ou=people,dc=example,dc=com', - 'pwdpolicysubentry': BRANCH_PWP - }))) - except ldap.LDAPError as e: - log.error('Failed to add COS template: error ' + e.message['desc']) - assert False - - # Add the COS definition - try: - inst.add_s(Entry((BRANCH_COS_DEF, { - 'objectclass': 'top ldapsubentry cosSuperDefinition cosPointerDefinition'.split(), - 'cn': 'cn=nsPwPolicyEntry,ou=people,dc=example,dc=com', - 'costemplatedn': BRANCH_COS_TMPL, - 'cosAttribute': 'pwdpolicysubentry default operational-default' - }))) - except ldap.LDAPError as e: - log.error('Failed to add COS def: error ' + e.message['desc']) - assert False - time.sleep(0.5) - - -def delSubtreePwPolicy(inst): - try: - inst.delete_s(BRANCH_COS_DEF) - except ldap.LDAPError as e: - log.error('Failed to delete COS def: error ' + e.message['desc']) - assert False - - try: - inst.delete_s(BRANCH_COS_TMPL) - except ldap.LDAPError as e: - log.error('Failed to delete COS template: error ' + e.message['desc']) - assert False - - try: - inst.delete_s(BRANCH_PWP) - except ldap.LDAPError as e: - log.error('Failed to delete COS password policy: error ' + e.message['desc']) - assert False - - try: - inst.delete_s(BRANCH_CONTAINER) - except ldap.LDAPError as e: - log.error('Failed to delete COS container: error ' + e.message['desc']) - assert False - time.sleep(0.5) - - -def test_ticket47981(topology): - """ - If there are multiple suffixes, and the last suffix checked does not contain any COS entries, - while other suffixes do, then the vattr cache is not invalidated as it should be. Then any - cached entries will still contain the old COS attributes/values. - """ - - log.info('Testing Ticket 47981 - Test that COS def changes are correctly reflected in affected users') - - # - # Create a second backend that does not have any COS entries - # - log.info('Adding second suffix that will not contain any COS entries...\n') - - topology.standalone.backend.create(SECOND_SUFFIX, {BACKEND_NAME: BE_NAME}) - topology.standalone.mappingtree.create(SECOND_SUFFIX, bename=BE_NAME) - try: - topology.standalone.add_s(Entry((SECOND_SUFFIX, { - 'objectclass': 'top organization'.split(), - 'o': BE_NAME}))) - except ldap.ALREADY_EXISTS: - pass - except ldap.LDAPError as e: - log.error('Failed to create suffix entry: error ' + e.message['desc']) - assert False - - # - # Add People branch, it might already exist - # - log.info('Add our test entries to the default suffix, and proceed with the test...') - - try: - topology.standalone.add_s(Entry((BRANCH, { - 'objectclass': 'top extensibleObject'.split(), - 'ou': 'level4' - }))) - except ldap.ALREADY_EXISTS: - pass - except ldap.LDAPError as e: - log.error('Failed to add ou=people: error ' + e.message['desc']) - assert False - - # - # Add a user to the branch - # - try: - topology.standalone.add_s(Entry((USER_DN, { - 'objectclass': 'top extensibleObject'.split(), - 'uid': 'user1' - }))) - except ldap.LDAPError as e: - log.error('Failed to add user1: error ' + e.message['desc']) - assert False - - # - # Enable password policy and add the subtree policy - # - try: - topology.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-pwpolicy-local', 'on')]) - except ldap.LDAPError as e: - log.error('Failed to set pwpolicy-local: error ' + e.message['desc']) - assert False - - addSubtreePwPolicy(topology.standalone) - - # - # Now check the user has its expected passwordPolicy subentry - # - try: - entries = topology.standalone.search_s(USER_DN, - ldap.SCOPE_BASE, - '(objectclass=top)', - ['pwdpolicysubentry', 'dn']) - if not entries[0].hasAttr('pwdpolicysubentry'): - log.fatal('User does not have expected pwdpolicysubentry!') - assert False - except ldap.LDAPError as e: - log.fatal('Unable to search for entry %s: error %s' % (USER_DN, e.message['desc'])) - assert False - - # - # Delete the password policy and make sure it is removed from the same user - # - delSubtreePwPolicy(topology.standalone) - try: - entries = topology.standalone.search_s(USER_DN, ldap.SCOPE_BASE, '(objectclass=top)', ['pwdpolicysubentry']) - if entries[0].hasAttr('pwdpolicysubentry'): - log.fatal('User unexpectedly does have the pwdpolicysubentry!') - assert False - except ldap.LDAPError as e: - log.fatal('Unable to search for entry %s: error %s' % (USER_DN, e.message['desc'])) - assert False - - # - # Add the subtree policvy back and see if the user now has it - # - addSubtreePwPolicy(topology.standalone) - try: - entries = topology.standalone.search_s(USER_DN, ldap.SCOPE_BASE, '(objectclass=top)', ['pwdpolicysubentry']) - if not entries[0].hasAttr('pwdpolicysubentry'): - log.fatal('User does not have expected pwdpolicysubentry!') - assert False - except ldap.LDAPError as e: - log.fatal('Unable to search for entry %s: error %s' % (USER_DN, e.message['desc'])) - assert False - - -def test_ticket47981_final(topology): - topology.standalone.delete() - log.info('Testcase PASSED') - - -def run_isolated(): - ''' - run_isolated is used to run these test cases independently of a test scheduler (xunit, py.test..) - To run isolated without py.test, you need to - - edit this file and comment '@pytest.fixture' line before 'topology' function. - - set the installation prefix - - run this program - ''' - global installation_prefix - installation_prefix = None - - topo = topology(True) - test_ticket47981(topo) - test_ticket47981_final(topo) - - -if __name__ == '__main__': - run_isolated() diff --git a/dirsrvtests/tickets/ticket47988_test.py b/dirsrvtests/tickets/ticket47988_test.py deleted file mode 100644 index db58e9d..0000000 --- a/dirsrvtests/tickets/ticket47988_test.py +++ /dev/null @@ -1,503 +0,0 @@ -# --- BEGIN COPYRIGHT BLOCK --- -# Copyright (C) 2015 Red Hat, Inc. -# All rights reserved. -# -# License: GPL (version 3 or any later version). -# See LICENSE for details. -# --- END COPYRIGHT BLOCK --- -# -''' -Created on Nov 7, 2013 - -@author: tbordaz -''' -import os -import sys -import time -import ldap -import logging -import pytest -import tarfile -import stat -import shutil -from random import randint -from lib389 import DirSrv, Entry, tools -from lib389.tools import DirSrvTools -from lib389._constants import * -from lib389.properties import * - - -logging.getLogger(__name__).setLevel(logging.DEBUG) -log = logging.getLogger(__name__) - -# -# important part. We can deploy Master1 and Master2 on different versions -# -installation1_prefix = None -installation2_prefix = None - -TEST_REPL_DN = "cn=test_repl, %s" % SUFFIX -OC_NAME = 'OCticket47988' -MUST = "(postalAddress $ postalCode)" -MAY = "(member $ street)" - -OTHER_NAME = 'other_entry' -MAX_OTHERS = 10 - -BIND_NAME = 'bind_entry' -BIND_DN = 'cn=%s, %s' % (BIND_NAME, SUFFIX) -BIND_PW = 'password' - -ENTRY_NAME = 'test_entry' -ENTRY_DN = 'cn=%s, %s' % (ENTRY_NAME, SUFFIX) -ENTRY_OC = "top person %s" % OC_NAME - -def _oc_definition(oid_ext, name, must=None, may=None): - oid = "1.2.3.4.5.6.7.8.9.10.%d" % oid_ext - desc = 'To test ticket 47490' - sup = 'person' - if not must: - must = MUST - if not may: - may = MAY - - new_oc = "( %s NAME '%s' DESC '%s' SUP %s AUXILIARY MUST %s MAY %s )" % (oid, name, desc, sup, must, may) - return new_oc -class TopologyMaster1Master2(object): - def __init__(self, master1, master2): - master1.open() - self.master1 = master1 - - master2.open() - self.master2 = master2 - - -@pytest.fixture(scope="module") -def topology(request): - ''' - This fixture is used to create a replicated topology for the 'module'. - The replicated topology is MASTER1 <-> Master2. - ''' - global installation1_prefix - global installation2_prefix - - #os.environ['USE_VALGRIND'] = '1' - - # allocate master1 on a given deployement - master1 = DirSrv(verbose=False) - if installation1_prefix: - args_instance[SER_DEPLOYED_DIR] = installation1_prefix - - # Args for the master1 instance - args_instance[SER_HOST] = HOST_MASTER_1 - args_instance[SER_PORT] = PORT_MASTER_1 - args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1 - args_master = args_instance.copy() - master1.allocate(args_master) - - # allocate master1 on a given deployement - master2 = DirSrv(verbose=False) - if installation2_prefix: - args_instance[SER_DEPLOYED_DIR] = installation2_prefix - - # Args for the consumer instance - args_instance[SER_HOST] = HOST_MASTER_2 - args_instance[SER_PORT] = PORT_MASTER_2 - args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_2 - args_master = args_instance.copy() - master2.allocate(args_master) - - # Get the status of the instance and restart it if it exists - instance_master1 = master1.exists() - instance_master2 = master2.exists() - - # Remove all the instances - if instance_master1: - master1.delete() - if instance_master2: - master2.delete() - - # Create the instances - master1.create() - master1.open() - master2.create() - master2.open() - - # - # Now prepare the Master-Consumer topology - # - # First Enable replication - master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_1) - master2.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_2) - - # Initialize the supplier->consumer - - properties = {RA_NAME: r'meTo_$host:$port', - RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], - RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], - RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], - RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} - repl_agreement = master1.agreement.create(suffix=SUFFIX, host=master2.host, port=master2.port, properties=properties) - - if not repl_agreement: - log.fatal("Fail to create a replica agreement") - sys.exit(1) - - log.debug("%s created" % repl_agreement) - - properties = {RA_NAME: r'meTo_$host:$port', - RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], - RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], - RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], - RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} - master2.agreement.create(suffix=SUFFIX, host=master1.host, port=master1.port, properties=properties) - - master1.agreement.init(SUFFIX, HOST_MASTER_2, PORT_MASTER_2) - master1.waitForReplInit(repl_agreement) - - # Check replication is working fine - if master1.testReplication(DEFAULT_SUFFIX, master2): - log.info('Replication is working.') - else: - log.fatal('Replication is not working.') - assert False - - # Here we have two instances master and consumer - return TopologyMaster1Master2(master1, master2) - - -def _header(topology, label): - topology.master1.log.info("\n\n###############################################") - topology.master1.log.info("#######") - topology.master1.log.info("####### %s" % label) - topology.master1.log.info("#######") - topology.master1.log.info("###################################################") - - -def _install_schema(server, tarFile): - server.stop(timeout=10) - - tmpSchema = '/tmp/schema_47988' - if not os.path.isdir(tmpSchema): - os.mkdir(tmpSchema) - - for the_file in os.listdir(tmpSchema): - file_path = os.path.join(tmpSchema, the_file) - if os.path.isfile(file_path): - os.unlink(file_path) - - os.chdir(tmpSchema) - tar = tarfile.open(tarFile, 'r:gz') - for member in tar.getmembers(): - tar.extract(member.name) - - tar.close() - - st = os.stat(server.schemadir) - os.chmod(server.schemadir, st.st_mode | stat.S_IWUSR | stat.S_IXUSR | stat.S_IRUSR) - for the_file in os.listdir(tmpSchema): - schemaFile = os.path.join(server.schemadir, the_file) - if os.path.isfile(schemaFile): - if the_file.startswith('99user.ldif'): - # only replace 99user.ldif, the other standard definition are kept - os.chmod(schemaFile, stat.S_IWUSR | stat.S_IRUSR) - server.log.info("replace %s" % schemaFile) - shutil.copy(the_file, schemaFile) - - else: - server.log.info("add %s" % schemaFile) - shutil.copy(the_file, schemaFile) - os.chmod(schemaFile, stat.S_IRUSR | stat.S_IRGRP) - os.chmod(server.schemadir, st.st_mode | stat.S_IRUSR | stat.S_IRGRP) - - -def test_ticket47988_init(topology): - """ - It adds - - Objectclass with MAY 'member' - - an entry ('bind_entry') with which we bind to test the 'SELFDN' operation - It deletes the anonymous aci - - """ - - _header(topology, 'test_ticket47988_init') - - # enable acl error logging - mod = [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', str(8192))] # REPL - topology.master1.modify_s(DN_CONFIG, mod) - topology.master2.modify_s(DN_CONFIG, mod) - - mod = [(ldap.MOD_REPLACE, 'nsslapd-accesslog-level', str(260))] # Internal op - topology.master1.modify_s(DN_CONFIG, mod) - topology.master2.modify_s(DN_CONFIG, mod) - - # add dummy entries - for cpt in range(MAX_OTHERS): - name = "%s%d" % (OTHER_NAME, cpt) - topology.master1.add_s(Entry(("cn=%s,%s" % (name, SUFFIX), { - 'objectclass': "top person".split(), - 'sn': name, - 'cn': name}))) - - # check that entry 0 is replicated before - loop = 0 - entryDN = "cn=%s0,%s" % (OTHER_NAME, SUFFIX) - while loop <= 10: - try: - ent = topology.master2.getEntry(entryDN, ldap.SCOPE_BASE, "(objectclass=*)", ['telephonenumber']) - break - except ldap.NO_SUCH_OBJECT: - time.sleep(1) - loop += 1 - assert (loop <= 10) - - topology.master1.stop(timeout=10) - topology.master2.stop(timeout=10) - - #install the specific schema M1: ipa3.3, M2: ipa4.1 - schema_file = os.path.join(topology.master1.getDir(__file__, DATA_DIR), "ticket47988/schema_ipa3.3.tar.gz") - _install_schema(topology.master1, schema_file) - schema_file = os.path.join(topology.master1.getDir(__file__, DATA_DIR), "ticket47988/schema_ipa4.1.tar.gz") - _install_schema(topology.master2, schema_file) - - topology.master1.start(timeout=10) - topology.master2.start(timeout=10) - - -def _do_update_schema(server, range=3999): - ''' - Update the schema of the M2 (IPA4.1). to generate a nsSchemaCSN - ''' - postfix = str(randint(range, range + 1000)) - OID = '2.16.840.1.113730.3.8.12.%s' % postfix - NAME = 'thierry%s' % postfix - value = '( %s NAME \'%s\' DESC \'Override for Group Attributes\' STRUCTURAL MUST ( cn ) MAY sn X-ORIGIN ( \'IPA v4.1.2\' \'user defined\' ) )' % (OID, NAME) - mod = [(ldap.MOD_ADD, 'objectclasses', value)] - server.modify_s('cn=schema', mod) - - -def _do_update_entry(supplier=None, consumer=None, attempts=10): - ''' - This is doing an update on M2 (IPA4.1) and checks the update has been - propagated to M1 (IPA3.3) - ''' - assert(supplier) - assert(consumer) - entryDN = "cn=%s0,%s" % (OTHER_NAME, SUFFIX) - value = str(randint(100, 200)) - mod = [(ldap.MOD_REPLACE, 'telephonenumber', value)] - supplier.modify_s(entryDN, mod) - - loop = 0 - while loop <= attempts: - ent = consumer.getEntry(entryDN, ldap.SCOPE_BASE, "(objectclass=*)", ['telephonenumber']) - read_val = ent.telephonenumber or "0" - if read_val == value: - break - # the expected value is not yet replicated. try again - time.sleep(5) - loop += 1 - supplier.log.debug("test_do_update: receive %s (expected %s)" % (read_val, value)) - assert (loop <= attempts) - - -def _pause_M2_to_M1(topology): - topology.master1.log.info("\n\n######################### Pause RA M2->M1 ######################\n") - ents = topology.master2.agreement.list(suffix=SUFFIX) - assert len(ents) == 1 - topology.master2.agreement.pause(ents[0].dn) - - -def _resume_M1_to_M2(topology): - topology.master1.log.info("\n\n######################### resume RA M1->M2 ######################\n") - ents = topology.master1.agreement.list(suffix=SUFFIX) - assert len(ents) == 1 - topology.master1.agreement.resume(ents[0].dn) - - -def _pause_M1_to_M2(topology): - topology.master1.log.info("\n\n######################### Pause RA M1->M2 ######################\n") - ents = topology.master1.agreement.list(suffix=SUFFIX) - assert len(ents) == 1 - topology.master1.agreement.pause(ents[0].dn) - - -def _resume_M2_to_M1(topology): - topology.master1.log.info("\n\n######################### resume RA M2->M1 ######################\n") - ents = topology.master2.agreement.list(suffix=SUFFIX) - assert len(ents) == 1 - topology.master2.agreement.resume(ents[0].dn) - - -def test_ticket47988_1(topology): - ''' - Check that replication is working and pause replication M2->M1 - ''' - _header(topology, 'test_ticket47988_1') - - topology.master1.log.debug("\n\nCheck that replication is working and pause replication M2->M1\n") - _do_update_entry(supplier=topology.master2, consumer=topology.master1, attempts=5) - _pause_M2_to_M1(topology) - - -def test_ticket47988_2(topology): - ''' - Update M1 schema and trigger update M1->M2 - So M1 should learn new/extended definitions that are in M2 schema - ''' - _header(topology, 'test_ticket47988_2') - - topology.master1.log.debug("\n\nUpdate M1 schema and an entry on M1\n") - master1_schema_csn = topology.master1.schema.get_schema_csn() - master2_schema_csn = topology.master2.schema.get_schema_csn() - topology.master1.log.debug("\nBefore updating the schema on M1\n") - topology.master1.log.debug("Master1 nsschemaCSN: %s" % master1_schema_csn) - topology.master1.log.debug("Master2 nsschemaCSN: %s" % master2_schema_csn) - - # Here M1 should no, should check M2 schema and learn - _do_update_schema(topology.master1) - master1_schema_csn = topology.master1.schema.get_schema_csn() - master2_schema_csn = topology.master2.schema.get_schema_csn() - topology.master1.log.debug("\nAfter updating the schema on M1\n") - topology.master1.log.debug("Master1 nsschemaCSN: %s" % master1_schema_csn) - topology.master1.log.debug("Master2 nsschemaCSN: %s" % master2_schema_csn) - assert (master1_schema_csn) - - # to avoid linger effect where a replication session is reused without checking the schema - _pause_M1_to_M2(topology) - _resume_M1_to_M2(topology) - - #topo.master1.log.debug("\n\nSleep.... attach the debugger dse_modify") - #time.sleep(60) - _do_update_entry(supplier=topology.master1, consumer=topology.master2, attempts=15) - master1_schema_csn = topology.master1.schema.get_schema_csn() - master2_schema_csn = topology.master2.schema.get_schema_csn() - topology.master1.log.debug("\nAfter a full replication session\n") - topology.master1.log.debug("Master1 nsschemaCSN: %s" % master1_schema_csn) - topology.master1.log.debug("Master2 nsschemaCSN: %s" % master2_schema_csn) - assert (master1_schema_csn) - assert (master2_schema_csn) - - -def test_ticket47988_3(topology): - ''' - Resume replication M2->M1 and check replication is still working - ''' - _header(topology, 'test_ticket47988_3') - - _resume_M2_to_M1(topology) - _do_update_entry(supplier=topology.master1, consumer=topology.master2, attempts=5) - _do_update_entry(supplier=topology.master2, consumer=topology.master1, attempts=5) - - -def test_ticket47988_4(topology): - ''' - Check schemaCSN is identical on both server - And save the nsschemaCSN to later check they do not change unexpectedly - ''' - _header(topology, 'test_ticket47988_4') - - master1_schema_csn = topology.master1.schema.get_schema_csn() - master2_schema_csn = topology.master2.schema.get_schema_csn() - topology.master1.log.debug("\n\nMaster1 nsschemaCSN: %s" % master1_schema_csn) - topology.master1.log.debug("\n\nMaster2 nsschemaCSN: %s" % master2_schema_csn) - assert (master1_schema_csn) - assert (master2_schema_csn) - assert (master1_schema_csn == master2_schema_csn) - - topology.master1.saved_schema_csn = master1_schema_csn - topology.master2.saved_schema_csn = master2_schema_csn - - -def test_ticket47988_5(topology): - ''' - Check schemaCSN do not change unexpectedly - ''' - _header(topology, 'test_ticket47988_5') - - _do_update_entry(supplier=topology.master1, consumer=topology.master2, attempts=5) - _do_update_entry(supplier=topology.master2, consumer=topology.master1, attempts=5) - master1_schema_csn = topology.master1.schema.get_schema_csn() - master2_schema_csn = topology.master2.schema.get_schema_csn() - topology.master1.log.debug("\n\nMaster1 nsschemaCSN: %s" % master1_schema_csn) - topology.master1.log.debug("\n\nMaster2 nsschemaCSN: %s" % master2_schema_csn) - assert (master1_schema_csn) - assert (master2_schema_csn) - assert (master1_schema_csn == master2_schema_csn) - - assert (topology.master1.saved_schema_csn == master1_schema_csn) - assert (topology.master2.saved_schema_csn == master2_schema_csn) - - -def test_ticket47988_6(topology): - ''' - Update M1 schema and trigger update M2->M1 - So M2 should learn new/extended definitions that are in M1 schema - ''' - - _header(topology, 'test_ticket47988_6') - - topology.master1.log.debug("\n\nUpdate M1 schema and an entry on M1\n") - master1_schema_csn = topology.master1.schema.get_schema_csn() - master2_schema_csn = topology.master2.schema.get_schema_csn() - topology.master1.log.debug("\nBefore updating the schema on M1\n") - topology.master1.log.debug("Master1 nsschemaCSN: %s" % master1_schema_csn) - topology.master1.log.debug("Master2 nsschemaCSN: %s" % master2_schema_csn) - - # Here M1 should no, should check M2 schema and learn - _do_update_schema(topology.master1, range=5999) - master1_schema_csn = topology.master1.schema.get_schema_csn() - master2_schema_csn = topology.master2.schema.get_schema_csn() - topology.master1.log.debug("\nAfter updating the schema on M1\n") - topology.master1.log.debug("Master1 nsschemaCSN: %s" % master1_schema_csn) - topology.master1.log.debug("Master2 nsschemaCSN: %s" % master2_schema_csn) - assert (master1_schema_csn) - - # to avoid linger effect where a replication session is reused without checking the schema - _pause_M1_to_M2(topology) - _resume_M1_to_M2(topology) - - #topo.master1.log.debug("\n\nSleep.... attach the debugger dse_modify") - #time.sleep(60) - _do_update_entry(supplier=topology.master2, consumer=topology.master1, attempts=15) - master1_schema_csn = topology.master1.schema.get_schema_csn() - master2_schema_csn = topology.master2.schema.get_schema_csn() - topology.master1.log.debug("\nAfter a full replication session\n") - topology.master1.log.debug("Master1 nsschemaCSN: %s" % master1_schema_csn) - topology.master1.log.debug("Master2 nsschemaCSN: %s" % master2_schema_csn) - assert (master1_schema_csn) - assert (master2_schema_csn) - - -def test_ticket47988_final(topology): - topology.master1.delete() - topology.master2.delete() - log.info('Testcase PASSED') - - -def run_isolated(): - ''' - run_isolated is used to run these test cases independently of a test scheduler (xunit, py.test..) - To run isolated without py.test, you need to - - edit this file and comment '@pytest.fixture' line before 'topology' function. - - set the installation prefix - - run this program - ''' - global installation1_prefix - global installation2_prefix - installation1_prefix = None - installation2_prefix = None - - topo = topology(True) - test_ticket47988_init(topo) - test_ticket47988_1(topo) - test_ticket47988_2(topo) - test_ticket47988_3(topo) - test_ticket47988_4(topo) - test_ticket47988_5(topo) - test_ticket47988_6(topo) - test_ticket47988_final(topo) - -if __name__ == '__main__': - run_isolated() - diff --git a/dirsrvtests/tickets/ticket48005_test.py b/dirsrvtests/tickets/ticket48005_test.py deleted file mode 100644 index b2a93e1..0000000 --- a/dirsrvtests/tickets/ticket48005_test.py +++ /dev/null @@ -1,415 +0,0 @@ -# --- BEGIN COPYRIGHT BLOCK --- -# Copyright (C) 2015 Red Hat, Inc. -# All rights reserved. -# -# License: GPL (version 3 or any later version). -# See LICENSE for details. -# --- END COPYRIGHT BLOCK --- -# -import os -import sys -import time -import ldap -import logging -import pytest -import re -from lib389 import DirSrv, Entry, tools, tasks -from lib389.tools import DirSrvTools -from lib389._constants import * -from lib389.properties import * -from lib389.tasks import * - -logging.getLogger(__name__).setLevel(logging.DEBUG) -log = logging.getLogger(__name__) - -installation1_prefix = None - -class TopologyStandalone(object): - def __init__(self, standalone): - standalone.open() - self.standalone = standalone - - -@pytest.fixture(scope="module") -def topology(request): - global installation1_prefix - if installation1_prefix: - args_instance[SER_DEPLOYED_DIR] = installation1_prefix - - # Creating standalone instance ... - standalone = DirSrv(verbose=False) - args_instance[SER_HOST] = HOST_STANDALONE - args_instance[SER_PORT] = PORT_STANDALONE - args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE - args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX - args_standalone = args_instance.copy() - standalone.allocate(args_standalone) - instance_standalone = standalone.exists() - if instance_standalone: - standalone.delete() - standalone.create() - standalone.open() - - # Clear out the tmp dir - standalone.clearTmpDir(__file__) - - return TopologyStandalone(standalone) - - -def test_ticket48005_setup(topology): - ''' - allow dump core - generate a test ldif file using dbgen.pl - import the ldif - ''' - log.info("Ticket 48005 setup...") - if hasattr(topology.standalone, 'prefix'): - prefix = topology.standalone.prefix - else: - prefix = None - sysconfig_dirsrv = prefix + ENV_SYSCONFIG_DIR + "/dirsrv" - cmdline = 'egrep "ulimit -c unlimited" %s' % sysconfig_dirsrv - p = os.popen(cmdline, "r") - ulimitc = p.readline() - if ulimitc == "": - log.info('No ulimit -c in %s' % sysconfig_dirsrv) - log.info('Adding it') - cmdline = 'echo "ulimit -c unlimited" >> %s' % sysconfig_dirsrv - - sysconfig_dirsrv_systemd = sysconfig_dirsrv + ".systemd" - cmdline = 'egrep LimitCORE=infinity %s' % sysconfig_dirsrv_systemd - p = os.popen(cmdline, "r") - lcore = p.readline() - if lcore == "": - log.info('No LimitCORE in %s' % sysconfig_dirsrv_systemd) - log.info('Adding it') - cmdline = 'echo LimitCORE=infinity >> %s' % sysconfig_dirsrv_systemd - - topology.standalone.restart(timeout=10) - - ldif_file = topology.standalone.getDir(__file__, DATA_DIR) + "ticket48005.ldif" - os.system('ls %s' % ldif_file) - os.system('rm -f %s' % ldif_file) - if hasattr(topology.standalone, 'prefix'): - prefix = topology.standalone.prefix - else: - prefix = None - dbgen_prog = prefix + '/bin/dbgen.pl' - log.info('dbgen_prog: %s' % dbgen_prog) - os.system('%s -s %s -o %s -u -n 10000' % (dbgen_prog, SUFFIX, ldif_file)) - cmdline = 'egrep dn: %s | wc -l' % ldif_file - p = os.popen(cmdline, "r") - dnnumstr = p.readline() - num = int(dnnumstr) - log.info("We have %d entries.\n", num) - - importTask = Tasks(topology.standalone) - args = {TASK_WAIT: True} - importTask.importLDIF(SUFFIX, None, ldif_file, args) - log.info('Importing %s complete.' % ldif_file) - - -def test_ticket48005_memberof(topology): - ''' - Enable memberof and referint plugin - Run fixmemberof task without waiting - Shutdown the server - Check if a core file was generated or not - If no core was found, this test case was successful. - ''' - log.info("Ticket 48005 memberof test...") - topology.standalone.plugins.enable(name=PLUGIN_MEMBER_OF) - topology.standalone.plugins.enable(name=PLUGIN_REFER_INTEGRITY) - - topology.standalone.restart(timeout=10) - - try: - # run the fixup task - topology.standalone.tasks.fixupMemberOf(suffix=SUFFIX, args={TASK_WAIT: False}) - except ValueError: - log.error('Some problem occured with a value that was provided') - assert False - - topology.standalone.stop(timeout=10) - - mytmp = topology.standalone.getDir(__file__, TMP_DIR) - logdir = re.sub('errors', '', topology.standalone.errlog) - cmdline = 'ls ' + logdir + 'core*' - p = os.popen(cmdline, "r") - lcore = p.readline() - if lcore != "": - s.system('mv %score* %s/core.ticket48005_memberof' % (logdir, mytmp)) - log.error('FixMemberof: Moved core file(s) to %s; Test failed' % mytmp) - assert False - log.info('No core files are found') - - topology.standalone.start(timeout=10) - - topology.standalone.plugins.disable(name=PLUGIN_REFER_INTEGRITY) - topology.standalone.plugins.disable(name=PLUGIN_MEMBER_OF) - - topology.standalone.restart(timeout=10) - - log.info("Ticket 48005 memberof test complete") - - -def test_ticket48005_automember(topology): - ''' - Enable automember and referint plugin - 1. Run automember rebuild membership task without waiting - Shutdown the server - Check if a core file was generated or not - If no core was found, this test case was successful. - 2. Run automember export updates task without waiting - Shutdown the server - Check if a core file was generated or not - If no core was found, this test case was successful. - 3. Run automember map updates task without waiting - Shutdown the server - Check if a core file was generated or not - If no core was found, this test case was successful. - ''' - log.info("Ticket 48005 automember test...") - topology.standalone.plugins.enable(name=PLUGIN_AUTOMEMBER) - topology.standalone.plugins.enable(name=PLUGIN_REFER_INTEGRITY) - - # configure automember config entry - log.info('Adding automember config') - try: - topology.standalone.add_s(Entry(('cn=group cfg,cn=Auto Membership Plugin,cn=plugins,cn=config', { - 'objectclass': 'top autoMemberDefinition'.split(), - 'autoMemberScope': 'dc=example,dc=com', - 'autoMemberFilter': 'objectclass=inetorgperson', - 'autoMemberDefaultGroup': 'cn=group0,dc=example,dc=com', - 'autoMemberGroupingAttr': 'uniquemember:dn', - 'cn': 'group cfg'}))) - except ValueError: - log.error('Failed to add automember config') - assert False - - topology.standalone.restart(timeout=10) - - try: - # run the automember rebuild task - topology.standalone.tasks.automemberRebuild(suffix=SUFFIX, args={TASK_WAIT: False}) - except ValueError: - log.error('Automember rebuild task failed.') - assert False - - topology.standalone.stop(timeout=10) - - mytmp = topology.standalone.getDir(__file__, TMP_DIR) - logdir = re.sub('errors', '', topology.standalone.errlog) - cmdline = 'ls ' + logdir + 'core*' - p = os.popen(cmdline, "r") - lcore = p.readline() - if lcore != "": - s.system('mv %score* %s/core.ticket48005_automember_rebuild' % (logdir, mytmp)) - log.error('Automember_rebuld: Moved core file(s) to %s; Test failed' % mytmp) - assert False - log.info('No core files are found') - - topology.standalone.start(timeout=10) - - ldif_out_file = mytmp + "/ticket48005_automember_exported.ldif" - try: - # run the automember export task - topology.standalone.tasks.automemberExport(suffix=SUFFIX, ldif_out=ldif_out_file, args={TASK_WAIT: False}) - except ValueError: - log.error('Automember Export task failed.') - assert False - - topology.standalone.stop(timeout=10) - - logdir = re.sub('errors', '', topology.standalone.errlog) - cmdline = 'ls ' + logdir + 'core*' - p = os.popen(cmdline, "r") - lcore = p.readline() - if lcore != "": - s.system('mv %score* %s/core.ticket48005_automember_export' % (logdir, mytmp)) - log.error('Automember_export: Moved core file(s) to %s; Test failed' % mytmp) - assert False - log.info('No core files are found') - - topology.standalone.start(timeout=10) - - ldif_in_file = topology.standalone.getDir(__file__, DATA_DIR) + "ticket48005.ldif" - ldif_out_file = mytmp + "/ticket48005_automember_map.ldif" - try: - # run the automember map task - topology.standalone.tasks.automemberMap(ldif_in=ldif_in_file, ldif_out=ldif_out_file, args={TASK_WAIT: False}) - except ValueError: - log.error('Automember Map task failed.') - assert False - - topology.standalone.stop(timeout=10) - - logdir = re.sub('errors', '', topology.standalone.errlog) - cmdline = 'ls ' + logdir + 'core*' - p = os.popen(cmdline, "r") - lcore = p.readline() - if lcore != "": - s.system('mv %score* %s/core.ticket48005_automember_map' % (logdir, mytmp)) - log.error('Automember_map: Moved core file(s) to %s; Test failed' % mytmp) - assert False - log.info('No core files are found') - - topology.standalone.start(timeout=10) - - topology.standalone.plugins.disable(name=PLUGIN_REFER_INTEGRITY) - topology.standalone.plugins.enable(name=PLUGIN_AUTOMEMBER) - - topology.standalone.restart(timeout=10) - - log.info("Ticket 48005 automember test complete") - - -def test_ticket48005_syntaxvalidate(topology): - ''' - Run syntax validate task without waiting - Shutdown the server - Check if a core file was generated or not - If no core was found, this test case was successful. - ''' - log.info("Ticket 48005 syntax validate test...") - - try: - # run the fixup task - topology.standalone.tasks.syntaxValidate(suffix=SUFFIX, args={TASK_WAIT: False}) - except ValueError: - log.error('Some problem occured with a value that was provided') - assert False - - topology.standalone.stop(timeout=10) - - mytmp = topology.standalone.getDir(__file__, TMP_DIR) - logdir = re.sub('errors', '', topology.standalone.errlog) - cmdline = 'ls ' + logdir + 'core*' - p = os.popen(cmdline, "r") - lcore = p.readline() - if lcore != "": - s.system('mv %score* %s/core.ticket48005_syntaxvalidate' % (logdir, mytmp)) - log.error('SyntaxValidate: Moved core file(s) to %s; Test failed' % mytmp) - assert False - log.info('No core files are found') - - topology.standalone.start(timeout=10) - - log.info("Ticket 48005 syntax validate test complete") - - -def test_ticket48005_usn(topology): - ''' - Enable entryusn - Delete all user entries. - Run USN tombstone cleanup task - Shutdown the server - Check if a core file was generated or not - If no core was found, this test case was successful. - ''' - log.info("Ticket 48005 usn test...") - topology.standalone.plugins.enable(name=PLUGIN_USN) - - topology.standalone.restart(timeout=10) - - try: - entries = topology.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, "(objectclass=inetorgperson)") - if len(entries) == 0: - log.info("No user entries.") - else: - for i in range(len(entries)): - # log.info('Deleting %s' % entries[i].dn) - try: - topology.standalone.delete_s(entries[i].dn) - except ValueError: - log.error('delete_s %s failed.' % entries[i].dn) - assert False - except ValueError: - log.error('search_s failed.') - assert False - - try: - # run the usn tombstone cleanup - topology.standalone.tasks.usnTombstoneCleanup(suffix=SUFFIX, bename="userRoot", args={TASK_WAIT: False}) - except ValueError: - log.error('Some problem occured with a value that was provided') - assert False - - topology.standalone.stop(timeout=10) - - mytmp = topology.standalone.getDir(__file__, TMP_DIR) - logdir = re.sub('errors', '', topology.standalone.errlog) - cmdline = 'ls ' + logdir + 'core*' - p = os.popen(cmdline, "r") - lcore = p.readline() - if lcore != "": - s.system('mv %score* %s/core.ticket48005_usn' % (logdir, mytmp)) - log.error('usnTombstoneCleanup: Moved core file(s) to %s; Test failed' % mytmp) - assert False - log.info('No core files are found') - - topology.standalone.start(timeout=10) - - topology.standalone.plugins.disable(name=PLUGIN_USN) - - topology.standalone.restart(timeout=10) - - log.info("Ticket 48005 usn test complete") - - -def test_ticket48005_schemareload(topology): - ''' - Run schema reload task without waiting - Shutdown the server - Check if a core file was generated or not - If no core was found, this test case was successful. - ''' - log.info("Ticket 48005 schema reload test...") - - try: - # run the schema reload task - topology.standalone.tasks.schemaReload(args={TASK_WAIT: False}) - except ValueError: - log.error('Schema Reload task failed.') - assert False - - topology.standalone.stop(timeout=10) - - logdir = re.sub('errors', '', topology.standalone.errlog) - cmdline = 'ls ' + logdir + 'core*' - p = os.popen(cmdline, "r") - lcore = p.readline() - if lcore != "": - mytmp = topology.standalone.getDir(__file__, TMP_DIR) - s.system('mv %score* %s/core.ticket48005_schema_reload' % (logdir, mytmp)) - log.error('Schema reload: Moved core file(s) to %s; Test failed' % mytmp) - assert False - log.info('No core files are found') - - topology.standalone.start(timeout=10) - - log.info("Ticket 48005 schema reload test complete") - - -def test_ticket48005_final(topology): - topology.standalone.delete() - log.info('Testcase PASSED') - - -def run_isolated(): - global installation1_prefix - installation1_prefix = None - - topo = topology(True) - test_ticket48005_setup(topo) - test_ticket48005_memberof(topo) - test_ticket48005_automember(topo) - test_ticket48005_syntaxvalidate(topo) - test_ticket48005_usn(topo) - test_ticket48005_schemareload(topo) - test_ticket48005_final(topo) - - -if __name__ == '__main__': - run_isolated() - diff --git a/dirsrvtests/tickets/ticket48013_test.py b/dirsrvtests/tickets/ticket48013_test.py deleted file mode 100644 index 0ccdeba..0000000 --- a/dirsrvtests/tickets/ticket48013_test.py +++ /dev/null @@ -1,134 +0,0 @@ -import os -import sys -import time -import ldap -import logging -import pytest -import pyasn1 -import pyasn1_modules -import ldap,ldapurl -from ldap.ldapobject import SimpleLDAPObject -from ldap.syncrepl import SyncreplConsumer -from lib389 import DirSrv, Entry, tools, tasks -from lib389.tools import DirSrvTools -from lib389._constants import * -from lib389.properties import * -from lib389.tasks import * -from lib389.utils import * - -logging.getLogger(__name__).setLevel(logging.DEBUG) -log = logging.getLogger(__name__) - -installation1_prefix = None - - -class TopologyStandalone(object): - def __init__(self, standalone): - standalone.open() - self.standalone = standalone - - -class SyncObject(SimpleLDAPObject, SyncreplConsumer): - def __init__(self, uri): - # Init the ldap connection - SimpleLDAPObject.__init__(self, uri) - - def sync_search(self, test_cookie): - self.syncrepl_search('dc=example,dc=com', ldap.SCOPE_SUBTREE, - filterstr='(objectclass=*)', mode='refreshOnly', - cookie=test_cookie) - - def poll(self): - self.syncrepl_poll(all=1) - - -@pytest.fixture(scope="module") -def topology(request): - global installation1_prefix - if installation1_prefix: - args_instance[SER_DEPLOYED_DIR] = installation1_prefix - - # Creating standalone instance ... - standalone = DirSrv(verbose=False) - args_instance[SER_HOST] = HOST_STANDALONE - args_instance[SER_PORT] = PORT_STANDALONE - args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE - args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX - args_standalone = args_instance.copy() - standalone.allocate(args_standalone) - instance_standalone = standalone.exists() - if instance_standalone: - standalone.delete() - standalone.create() - standalone.open() - - # Clear out the tmp dir - standalone.clearTmpDir(__file__) - - return TopologyStandalone(standalone) - - -def test_ticket48013(topology): - ''' - Content Synchonization: Test that invalid cookies are caught - ''' - - cookies = ('#', '##', 'a#a#a', 'a#a#1') - - # Enable dynamic plugins - try: - topology.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-dynamic-plugins', 'on')]) - except ldap.LDAPError as e: - ldap.error('Failed to enable dynamic plugin!' + e.message['desc']) - assert False - - # Enable retro changelog - topology.standalone.plugins.enable(name=PLUGIN_RETRO_CHANGELOG) - - # Enbale content sync plugin - topology.standalone.plugins.enable(name=PLUGIN_REPL_SYNC) - - # Set everything up - ldap_url = ldapurl.LDAPUrl('ldap://localhost:31389') - ldap_connection = SyncObject(ldap_url.initializeUrl()) - - # Authenticate - try: - ldap_connection.simple_bind_s(DN_DM, PASSWORD) - except ldap.LDAPError as e: - print('Login to LDAP server failed: %s' % e.message['desc']) - assert False - - # Test invalid cookies - for invalid_cookie in cookies: - log.info('Testing cookie: %s' % invalid_cookie) - try: - ldap_connection.sync_search(invalid_cookie) - ldap_connection.poll() - log.fatal('Invalid cookie accepted!') - assert False - except Exception as e: - log.info('Invalid cookie correctly rejected: %s' % e.message['info']) - pass - - # Success - log.info('Test complete') - - -def test_ticket48013_final(topology): - topology.standalone.delete() - log.info('Testcase PASSED') - - -def run_isolated(): - global installation1_prefix - installation1_prefix = None - - topo = topology(True) - test_ticket48013(topo) - test_ticket48013_final(topo) - - -if __name__ == '__main__': - run_isolated() - diff --git a/dirsrvtests/tickets/ticket48026_test.py b/dirsrvtests/tickets/ticket48026_test.py deleted file mode 100644 index f8d440f..0000000 --- a/dirsrvtests/tickets/ticket48026_test.py +++ /dev/null @@ -1,168 +0,0 @@ -# --- BEGIN COPYRIGHT BLOCK --- -# Copyright (C) 2015 Red Hat, Inc. -# All rights reserved. -# -# License: GPL (version 3 or any later version). -# See LICENSE for details. -# --- END COPYRIGHT BLOCK --- -# -import os -import sys -import time -import ldap -import logging -import pytest -from lib389 import DirSrv, Entry, tools, tasks -from lib389.tools import DirSrvTools -from lib389._constants import * -from lib389.properties import * -from lib389.tasks import * -from lib389.utils import * - -logging.getLogger(__name__).setLevel(logging.DEBUG) -log = logging.getLogger(__name__) - -installation1_prefix = None - -USER1_DN = 'uid=user1,' + DEFAULT_SUFFIX -USER2_DN = 'uid=user2,' + DEFAULT_SUFFIX - - -class TopologyStandalone(object): - def __init__(self, standalone): - standalone.open() - self.standalone = standalone - - -@pytest.fixture(scope="module") -def topology(request): - global installation1_prefix - if installation1_prefix: - args_instance[SER_DEPLOYED_DIR] = installation1_prefix - - # Creating standalone instance ... - standalone = DirSrv(verbose=False) - args_instance[SER_HOST] = HOST_STANDALONE - args_instance[SER_PORT] = PORT_STANDALONE - args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE - args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX - args_standalone = args_instance.copy() - standalone.allocate(args_standalone) - instance_standalone = standalone.exists() - if instance_standalone: - standalone.delete() - standalone.create() - standalone.open() - - # Clear out the tmp dir - standalone.clearTmpDir(__file__) - - return TopologyStandalone(standalone) - - -def test_ticket48026(topology): - ''' - Test that multiple attribute uniqueness works correctly. - ''' - # Configure the plugin - inst = topology.standalone - inst.plugins.enable(name=PLUGIN_ATTR_UNIQUENESS) - - try: - # This plugin enable / disable doesn't seem to create the nsslapd-pluginId correctly? - inst.modify_s('cn=' + PLUGIN_ATTR_UNIQUENESS + ',cn=plugins,cn=config', - [(ldap.MOD_REPLACE, 'uniqueness-attribute-name', 'mail'), - (ldap.MOD_ADD, 'uniqueness-attribute-name', - 'mailAlternateAddress'), - ]) - except ldap.LDAPError as e: - log.fatal('test_ticket48026: Failed to configure plugin for "mail": error ' + e.message['desc']) - assert False - - inst.restart(timeout=30) - - # Add an entry - try: - inst.add_s(Entry((USER1_DN, {'objectclass': "top extensibleObject".split(), - 'sn': '1', - 'cn': 'user 1', - 'uid': 'user1', - 'mail': 'user1@example.com', - 'mailAlternateAddress' : 'user1@alt.example.com', - 'userpassword': 'password'}))) - except ldap.LDAPError as e: - log.fatal('test_ticket48026: Failed to add test user' + USER1_DN + ': error ' + e.message['desc']) - assert False - - try: - inst.add_s(Entry((USER2_DN, {'objectclass': "top extensibleObject".split(), - 'sn': '2', - 'cn': 'user 2', - 'uid': 'user2', - 'mail': 'user1@example.com', - 'userpassword': 'password'}))) - except ldap.CONSTRAINT_VIOLATION: - pass - else: - log.error('test_ticket48026: Adding of 1st entry(mail v mail) incorrectly succeeded') - assert False - - try: - inst.add_s(Entry((USER2_DN, {'objectclass': "top extensibleObject".split(), - 'sn': '2', - 'cn': 'user 2', - 'uid': 'user2', - 'mailAlternateAddress': 'user1@alt.example.com', - 'userpassword': 'password'}))) - except ldap.CONSTRAINT_VIOLATION: - pass - else: - log.error('test_ticket48026: Adding of 2nd entry(mailAlternateAddress v mailAlternateAddress) incorrectly succeeded') - assert False - - try: - inst.add_s(Entry((USER2_DN, {'objectclass': "top extensibleObject".split(), - 'sn': '2', - 'cn': 'user 2', - 'uid': 'user2', - 'mail': 'user1@alt.example.com', - 'userpassword': 'password'}))) - except ldap.CONSTRAINT_VIOLATION: - pass - else: - log.error('test_ticket48026: Adding of 3rd entry(mail v mailAlternateAddress) incorrectly succeeded') - assert False - - try: - inst.add_s(Entry((USER2_DN, {'objectclass': "top extensibleObject".split(), - 'sn': '2', - 'cn': 'user 2', - 'uid': 'user2', - 'mailAlternateAddress': 'user1@example.com', - 'userpassword': 'password'}))) - except ldap.CONSTRAINT_VIOLATION: - pass - else: - log.error('test_ticket48026: Adding of 4th entry(mailAlternateAddress v mail) incorrectly succeeded') - assert False - - log.info('Test complete') - - -def test_ticket48026_final(topology): - topology.standalone.delete() - log.info('Testcase PASSED') - - -def run_isolated(): - global installation1_prefix - installation1_prefix = None - - topo = topology(True) - test_ticket48026(topo) - test_ticket48026_final(topo) - - -if __name__ == '__main__': - run_isolated() - diff --git a/dirsrvtests/tickets/ticket48109_test.py b/dirsrvtests/tickets/ticket48109_test.py deleted file mode 100644 index e4091e0..0000000 --- a/dirsrvtests/tickets/ticket48109_test.py +++ /dev/null @@ -1,394 +0,0 @@ -# --- BEGIN COPYRIGHT BLOCK --- -# Copyright (C) 2015 Red Hat, Inc. -# All rights reserved. -# -# License: GPL (version 3 or any later version). -# See LICENSE for details. -# --- END COPYRIGHT BLOCK --- -# -import os -import sys -import time -import ldap -import logging -import pytest -from lib389 import DirSrv, Entry, tools, tasks -from lib389.tools import DirSrvTools -from lib389._constants import * -from lib389.properties import * -from lib389.tasks import * -from lib389.utils import * - -logging.getLogger(__name__).setLevel(logging.DEBUG) -log = logging.getLogger(__name__) - -installation1_prefix = None - -UID_INDEX = 'cn=uid,cn=index,cn=userRoot,cn=ldbm database,cn=plugins,cn=config' - -class TopologyStandalone(object): - def __init__(self, standalone): - standalone.open() - self.standalone = standalone - - -@pytest.fixture(scope="module") -def topology(request): - global installation1_prefix - if installation1_prefix: - args_instance[SER_DEPLOYED_DIR] = installation1_prefix - - # Creating standalone instance ... - standalone = DirSrv(verbose=False) - args_instance[SER_HOST] = HOST_STANDALONE - args_instance[SER_PORT] = PORT_STANDALONE - args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE - args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX - args_standalone = args_instance.copy() - standalone.allocate(args_standalone) - instance_standalone = standalone.exists() - if instance_standalone: - standalone.delete() - standalone.create() - standalone.open() - - # Clear out the tmp dir - standalone.clearTmpDir(__file__) - - return TopologyStandalone(standalone) - - -def test_ticket48109_0(topology): - ''' - Set SubStr lengths to cn=uid,cn=index,... - objectClass: extensibleObject - nsIndexType: sub - nsSubStrBegin: 2 - nsSubStrEnd: 2 - ''' - log.info('Test case 0') - # add substr setting to UID_INDEX - try: - topology.standalone.modify_s(UID_INDEX, - [(ldap.MOD_ADD, 'objectClass', 'extensibleObject'), - (ldap.MOD_ADD, 'nsIndexType', 'sub'), - (ldap.MOD_ADD, 'nsSubStrBegin', '2'), - (ldap.MOD_ADD, 'nsSubStrEnd', '2')]) - except ldap.LDAPError as e: - log.error('Failed to add substr lengths: error ' + e.message['desc']) - assert False - - # restart the server to apply the indexing - topology.standalone.restart(timeout=10) - - # add a test user - UID = 'auser0' - USER_DN = 'uid=%s,%s' % (UID, SUFFIX) - try: - topology.standalone.add_s(Entry((USER_DN, { - 'objectclass': 'top person organizationalPerson inetOrgPerson'.split(), - 'cn': 'a user0', - 'sn': 'user0', - 'givenname': 'a', - 'mail': UID}))) - except ldap.LDAPError as e: - log.error('Failed to add ' + USER_DN + ': error ' + e.message['desc']) - assert False - - entries = topology.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, '(uid=a*)') - assert len(entries) == 1 - - # restart the server to check the access log - topology.standalone.restart(timeout=10) - - cmdline = 'egrep %s %s | egrep "uid=a\*"' % (SUFFIX, topology.standalone.accesslog) - p = os.popen(cmdline, "r") - l0 = p.readline() - if l0 == "": - log.error('Search with "(uid=a*)" is not logged in ' + topology.standalone.accesslog) - assert False - else: - #regex = re.compile('\(conn=[0-9]* op=[0-9]*\) SRCH .*') - regex = re.compile(r'.*\s+(conn=\d+ op=\d+)\s+SRCH .*') - match = regex.match(l0) - log.info('match: %s' % match.group(1)) - cmdline = 'egrep "%s" %s | egrep "RESULT"' % (match.group(1), topology.standalone.accesslog) - p = os.popen(cmdline, "r") - l1 = p.readline() - if l1 == "": - log.error('Search result of "(uid=a*)" is not logged in ' + topology.standalone.accesslog) - assert False - else: - log.info('l1: %s' % l1) - regex = re.compile(r'.*nentries=(\d+)\s+.*') - match = regex.match(l1) - log.info('match: nentires=%s' % match.group(1)) - if match.group(1) == "0": - log.error('Entry uid=a* not found.') - assert False - else: - log.info('Entry uid=a* found.') - regex = re.compile(r'.*(notes=[AU]).*') - match = regex.match(l1) - if match: - log.error('%s - substr index was not used' % match.group(1)) - assert False - else: - log.info('Test case 0 - OK - substr index used') - - # clean up substr setting to UID_INDEX - try: - topology.standalone.modify_s(UID_INDEX, - [(ldap.MOD_DELETE, 'objectClass', 'extensibleObject'), - (ldap.MOD_DELETE, 'nsIndexType', 'sub'), - (ldap.MOD_DELETE, 'nsSubStrBegin', '2'), - (ldap.MOD_DELETE, 'nsSubStrEnd', '2')]) - except ldap.LDAPError as e: - log.error('Failed to delete substr lengths: error ' + e.message['desc']) - assert False - - -def test_ticket48109_1(topology): - ''' - Set SubStr lengths to cn=uid,cn=index,... - nsIndexType: sub - nsMatchingRule: nsSubStrBegin=2 - nsMatchingRule: nsSubStrEnd=2 - ''' - log.info('Test case 1') - # add substr setting to UID_INDEX - try: - topology.standalone.modify_s(UID_INDEX, - [(ldap.MOD_ADD, 'nsIndexType', 'sub'), - (ldap.MOD_ADD, 'nsMatchingRule', 'nssubstrbegin=2'), - (ldap.MOD_ADD, 'nsMatchingRule', 'nssubstrend=2')]) - except ldap.LDAPError as e: - log.error('Failed to add substr lengths: error ' + e.message['desc']) - assert False - - # restart the server to apply the indexing - topology.standalone.restart(timeout=10) - - # add a test user - UID = 'buser1' - USER_DN = 'uid=%s,%s' % (UID, SUFFIX) - try: - topology.standalone.add_s(Entry((USER_DN, { - 'objectclass': 'top person organizationalPerson inetOrgPerson'.split(), - 'cn': 'b user1', - 'sn': 'user1', - 'givenname': 'b', - 'mail': UID}))) - except ldap.LDAPError as e: - log.error('Failed to add ' + USER_DN + ': error ' + e.message['desc']) - assert False - - entries = topology.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, '(uid=b*)') - assert len(entries) == 1 - - # restart the server to check the access log - topology.standalone.restart(timeout=10) - - cmdline = 'egrep %s %s | egrep "uid=b\*"' % (SUFFIX, topology.standalone.accesslog) - p = os.popen(cmdline, "r") - l0 = p.readline() - if l0 == "": - log.error('Search with "(uid=b*)" is not logged in ' + topology.standalone.accesslog) - assert False - else: - #regex = re.compile('\(conn=[0-9]* op=[0-9]*\) SRCH .*') - regex = re.compile(r'.*\s+(conn=\d+ op=\d+)\s+SRCH .*') - match = regex.match(l0) - log.info('match: %s' % match.group(1)) - cmdline = 'egrep "%s" %s | egrep "RESULT"' % (match.group(1), topology.standalone.accesslog) - p = os.popen(cmdline, "r") - l1 = p.readline() - if l1 == "": - log.error('Search result of "(uid=*b)" is not logged in ' + topology.standalone.accesslog) - assert False - else: - log.info('l1: %s' % l1) - regex = re.compile(r'.*nentries=(\d+)\s+.*') - match = regex.match(l1) - log.info('match: nentires=%s' % match.group(1)) - if match.group(1) == "0": - log.error('Entry uid=*b not found.') - assert False - else: - log.info('Entry uid=*b found.') - regex = re.compile(r'.*(notes=[AU]).*') - match = regex.match(l1) - if match: - log.error('%s - substr index was not used' % match.group(1)) - assert False - else: - log.info('Test case 1 - OK - substr index used') - - # clean up substr setting to UID_INDEX - try: - topology.standalone.modify_s(UID_INDEX, - [(ldap.MOD_DELETE, 'nsIndexType', 'sub'), - (ldap.MOD_DELETE, 'nsMatchingRule', 'nssubstrbegin=2'), - (ldap.MOD_DELETE, 'nsMatchingRule', 'nssubstrend=2')]) - except ldap.LDAPError as e: - log.error('Failed to delete substr lengths: error ' + e.message['desc']) - assert False - - -def test_ticket48109_2(topology): - ''' - Set SubStr conflict formats/lengths to cn=uid,cn=index,... - objectClass: extensibleObject - nsIndexType: sub - nsMatchingRule: nsSubStrBegin=3 - nsMatchingRule: nsSubStrEnd=3 - nsSubStrBegin: 2 - nsSubStrEnd: 2 - nsSubStr{Begin,End} are honored. - ''' - log.info('Test case 2') - - # add substr setting to UID_INDEX - try: - topology.standalone.modify_s(UID_INDEX, - [(ldap.MOD_ADD, 'nsIndexType', 'sub'), - (ldap.MOD_ADD, 'nsMatchingRule', 'nssubstrbegin=3'), - (ldap.MOD_ADD, 'nsMatchingRule', 'nssubstrend=3'), - (ldap.MOD_ADD, 'objectClass', 'extensibleObject'), - (ldap.MOD_ADD, 'nsSubStrBegin', '2'), - (ldap.MOD_ADD, 'nsSubStrEnd', '2')]) - except ldap.LDAPError as e: - log.error('Failed to add substr lengths: error ' + e.message['desc']) - assert False - - # restart the server to apply the indexing - topology.standalone.restart(timeout=10) - - # add a test user - UID = 'cuser2' - USER_DN = 'uid=%s,%s' % (UID, SUFFIX) - try: - topology.standalone.add_s(Entry((USER_DN, { - 'objectclass': 'top person organizationalPerson inetOrgPerson'.split(), - 'cn': 'c user2', - 'sn': 'user2', - 'givenname': 'c', - 'mail': UID}))) - except ldap.LDAPError as e: - log.error('Failed to add ' + USER_DN + ': error ' + e.message['desc']) - assert False - - entries = topology.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, '(uid=c*)') - assert len(entries) == 1 - - entries = topology.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, '(uid=*2)') - assert len(entries) == 1 - - # restart the server to check the access log - topology.standalone.restart(timeout=10) - - cmdline = 'egrep %s %s | egrep "uid=c\*"' % (SUFFIX, topology.standalone.accesslog) - p = os.popen(cmdline, "r") - l0 = p.readline() - if l0 == "": - log.error('Search with "(uid=c*)" is not logged in ' + topology.standalone.accesslog) - assert False - else: - #regex = re.compile('\(conn=[0-9]* op=[0-9]*\) SRCH .*') - regex = re.compile(r'.*\s+(conn=\d+ op=\d+)\s+SRCH .*') - match = regex.match(l0) - log.info('match: %s' % match.group(1)) - cmdline = 'egrep "%s" %s | egrep "RESULT"' % (match.group(1), topology.standalone.accesslog) - p = os.popen(cmdline, "r") - l1 = p.readline() - if l1 == "": - log.error('Search result of "(uid=c*)" is not logged in ' + topology.standalone.accesslog) - assert False - else: - log.info('l1: %s' % l1) - regex = re.compile(r'.*nentries=(\d+)\s+.*') - match = regex.match(l1) - log.info('match: nentires=%s' % match.group(1)) - if match.group(1) == "0": - log.error('Entry uid=c* not found.') - assert False - else: - log.info('Entry uid=c* found.') - regex = re.compile(r'.*(notes=[AU]).*') - match = regex.match(l1) - if match: - log.error('%s - substr index was not used' % match.group(1)) - assert False - else: - log.info('Test case 2-1 - OK - correct substr index used') - - cmdline = 'egrep %s %s | egrep "uid=\*2"' % (SUFFIX, topology.standalone.accesslog) - p = os.popen(cmdline, "r") - l0 = p.readline() - if l0 == "": - log.error('Search with "(uid=*2)" is not logged in ' + topology.standalone.accesslog) - assert False - else: - #regex = re.compile('\(conn=[0-9]* op=[0-9]*\) SRCH .*') - regex = re.compile(r'.*\s+(conn=\d+ op=\d+)\s+SRCH .*') - match = regex.match(l0) - log.info('match: %s' % match.group(1)) - cmdline = 'egrep "%s" %s | egrep "RESULT"' % (match.group(1), topology.standalone.accesslog) - p = os.popen(cmdline, "r") - l1 = p.readline() - if l1 == "": - log.error('Search result of "(uid=*2)" is not logged in ' + topology.standalone.accesslog) - assert False - else: - log.info('l1: %s' % l1) - regex = re.compile(r'.*nentries=(\d+)\s+.*') - match = regex.match(l1) - log.info('match: nentires=%s' % match.group(1)) - if match.group(1) == "0": - log.error('Entry uid=*2 not found.') - assert False - else: - log.info('Entry uid=*2 found.') - regex = re.compile(r'.*(notes=[AU]).*') - match = regex.match(l1) - if match: - log.error('%s - substr index was not used' % match.group(1)) - assert False - else: - log.info('Test case 2-2 - OK - correct substr index used') - - # clean up substr setting to UID_INDEX - try: - topology.standalone.modify_s(UID_INDEX, - [(ldap.MOD_DELETE, 'nsIndexType', 'sub'), - (ldap.MOD_DELETE, 'nsMatchingRule', 'nssubstrbegin=3'), - (ldap.MOD_DELETE, 'nsMatchingRule', 'nssubstrend=3'), - (ldap.MOD_DELETE, 'objectClass', 'extensibleObject'), - (ldap.MOD_DELETE, 'nsSubStrBegin', '2'), - (ldap.MOD_DELETE, 'nsSubStrEnd', '2')]) - except ldap.LDAPError as e: - log.error('Failed to delete substr lengths: error ' + e.message['desc']) - assert False - - log.info('Test complete') - - -def test_ticket48109_final(topology): - topology.standalone.delete() - log.info('Testcase PASSED') - - -def run_isolated(): - global installation1_prefix - installation1_prefix = None - - topo = topology(True) - test_ticket48109_0(topo) - test_ticket48109_1(topo) - test_ticket48109_2(topo) - test_ticket48109_final(topo) - - -if __name__ == '__main__': - run_isolated() - diff --git a/dirsrvtests/tickets/ticket48170_test.py b/dirsrvtests/tickets/ticket48170_test.py deleted file mode 100644 index cc71e37..0000000 --- a/dirsrvtests/tickets/ticket48170_test.py +++ /dev/null @@ -1,96 +0,0 @@ -# --- BEGIN COPYRIGHT BLOCK --- -# Copyright (C) 2015 Red Hat, Inc. -# All rights reserved. -# -# License: GPL (version 3 or any later version). -# See LICENSE for details. -# --- END COPYRIGHT BLOCK --- -# -import os -import sys -import time -import ldap -import logging -import pytest -from lib389 import DirSrv, Entry, tools, tasks -from lib389.tools import DirSrvTools -from lib389._constants import * -from lib389.properties import * -from lib389.tasks import * -from lib389.utils import * - -logging.getLogger(__name__).setLevel(logging.DEBUG) -log = logging.getLogger(__name__) - -installation1_prefix = None - - -class TopologyStandalone(object): - def __init__(self, standalone): - standalone.open() - self.standalone = standalone - - -@pytest.fixture(scope="module") -def topology(request): - global installation1_prefix - if installation1_prefix: - args_instance[SER_DEPLOYED_DIR] = installation1_prefix - - # Creating standalone instance ... - standalone = DirSrv(verbose=False) - args_instance[SER_HOST] = HOST_STANDALONE - args_instance[SER_PORT] = PORT_STANDALONE - args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE - args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX - args_standalone = args_instance.copy() - standalone.allocate(args_standalone) - instance_standalone = standalone.exists() - if instance_standalone: - standalone.delete() - standalone.create() - standalone.open() - - # Clear out the tmp dir - standalone.clearTmpDir(__file__) - - return TopologyStandalone(standalone) - - -def test_ticket48170(topology): - ''' - Attempt to add a nsIndexType wikth an invalid value: "eq,pres" - ''' - - INDEX_DN = 'cn=cn,cn=index,cn=userroot,cn=ldbm database,cn=plugins,cn=config' - REJECTED = False - try: - topology.standalone.modify_s(INDEX_DN, [(ldap.MOD_ADD, 'nsINdexType', 'eq,pres')]) - except ldap.UNWILLING_TO_PERFORM: - log.info('Index update correctly rejected') - REJECTED = True - - if not REJECTED: - log.fatal('Invalid nsIndexType value was incorrectly accepted.') - assert False - - log.info('Test complete') - - -def test_ticket48170_final(topology): - topology.standalone.delete() - log.info('Testcase PASSED') - - -def run_isolated(): - global installation1_prefix - installation1_prefix = None - - topo = topology(True) - test_ticket48170(topo) - test_ticket48170_final(topo) - - -if __name__ == '__main__': - run_isolated() - diff --git a/dirsrvtests/tickets/ticket48191_test.py b/dirsrvtests/tickets/ticket48191_test.py deleted file mode 100644 index 000975a..0000000 --- a/dirsrvtests/tickets/ticket48191_test.py +++ /dev/null @@ -1,323 +0,0 @@ -# --- BEGIN COPYRIGHT BLOCK --- -# Copyright (C) 2015 Red Hat, Inc. -# All rights reserved. -# -# License: GPL (version 3 or any later version). -# See LICENSE for details. -# --- END COPYRIGHT BLOCK --- -# -import os -import sys -import time -import ldap -import logging -import pytest -from lib389 import DirSrv, Entry, tools, tasks -from lib389.tools import DirSrvTools -from lib389._constants import * -from lib389.properties import * -from lib389.tasks import * -from ldap.controls import SimplePagedResultsControl -from ldap.controls.simple import GetEffectiveRightsControl - -log = logging.getLogger(__name__) - -installation_prefix = None - -CONFIG_DN = 'cn=config' -MYSUFFIX = 'o=ticket48191.org' -MYSUFFIXBE = 'ticket48191' - -_MYLDIF = 'ticket48191.ldif' - -SEARCHFILTER = '(objectclass=*)' - - -class TopologyStandalone(object): - def __init__(self, standalone): - standalone.open() - self.standalone = standalone - - -@pytest.fixture(scope="module") -def topology(request): - ''' - This fixture is used to standalone topology for the 'module'. - ''' - global installation_prefix - - if installation_prefix: - args_instance[SER_DEPLOYED_DIR] = installation_prefix - - standalone = DirSrv(verbose=False) - - # Args for the standalone instance - args_instance[SER_HOST] = HOST_STANDALONE - args_instance[SER_PORT] = PORT_STANDALONE - args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE - args_standalone = args_instance.copy() - standalone.allocate(args_standalone) - - # Get the status of the instance and restart it if it exists - instance_standalone = standalone.exists() - - # Remove the instance - if instance_standalone: - standalone.delete() - - # Create the instance - standalone.create() - - # Used to retrieve configuration information (dbdir, confdir...) - standalone.open() - - # clear the tmp directory - standalone.clearTmpDir(__file__) - - # Here we have standalone instance up and running - return TopologyStandalone(standalone) - - -def test_ticket48191_setup(topology): - """ - Import 20 entries - Set nsslapd-maxsimplepaged-per-conn in cn=config - If the val is negative, no limit. - If the value is 0, the simple paged results is disabled. - If the value is positive, the value is the max simple paged results requests per connection. - The setting has to be dynamic. - """ - log.info('Testing Ticket 48191 - Config parameter nsslapd-maxsimplepaged-per-conn') - - # bind as directory manager - topology.standalone.log.info("Bind as %s" % DN_DM) - topology.standalone.simple_bind_s(DN_DM, PASSWORD) - - topology.standalone.log.info("\n\n######################### SETUP SUFFIX o=ticket48191.org ######################\n") - - topology.standalone.backend.create(MYSUFFIX, {BACKEND_NAME: MYSUFFIXBE}) - topology.standalone.mappingtree.create(MYSUFFIX, bename=MYSUFFIXBE) - - topology.standalone.log.info("\n\n######################### Generate Test data ######################\n") - - # get tmp dir - mytmp = topology.standalone.getDir(__file__, TMP_DIR) - if mytmp is None: - mytmp = "/tmp" - - MYLDIF = '%s%s' % (mytmp, _MYLDIF) - os.system('ls %s' % MYLDIF) - os.system('rm -f %s' % MYLDIF) - if hasattr(topology.standalone, 'prefix'): - prefix = topology.standalone.prefix - else: - prefix = None - dbgen_prog = prefix + '/bin/dbgen.pl' - topology.standalone.log.info('dbgen_prog: %s' % dbgen_prog) - os.system('%s -s %s -o %s -n 14' % (dbgen_prog, MYSUFFIX, MYLDIF)) - cmdline = 'egrep dn: %s | wc -l' % MYLDIF - p = os.popen(cmdline, "r") - dnnumstr = p.readline() - global dnnum - dnnum = int(dnnumstr) - topology.standalone.log.info("We have %d entries.\n", dnnum) - - topology.standalone.log.info("\n\n######################### Import Test data ######################\n") - - args = {TASK_WAIT: True} - importTask = Tasks(topology.standalone) - importTask.importLDIF(MYSUFFIX, MYSUFFIXBE, MYLDIF, args) - - topology.standalone.log.info("\n\n######################### SEARCH ALL ######################\n") - topology.standalone.log.info("Bind as %s and add the READ/SEARCH SELFDN aci" % DN_DM) - topology.standalone.simple_bind_s(DN_DM, PASSWORD) - - global entries - entries = topology.standalone.search_s(MYSUFFIX, ldap.SCOPE_SUBTREE, SEARCHFILTER) - topology.standalone.log.info("Returned %d entries.\n", len(entries)) - - #print entries - - assert dnnum == len(entries) - - topology.standalone.log.info('%d entries are successfully imported.' % dnnum) - - -def test_ticket48191_run_0(topology): - topology.standalone.log.info("\n\n######################### SEARCH WITH SIMPLE PAGED RESULTS CONTROL (no nsslapd-maxsimplepaged-per-conn) ######################\n") - - page_size = 4 - spr_req_ctrl = SimplePagedResultsControl(True, size=page_size, cookie='') - - known_ldap_resp_ctrls = { - SimplePagedResultsControl.controlType: SimplePagedResultsControl, - } - - topology.standalone.log.info("Calling search_ext...") - msgid = topology.standalone.search_ext(MYSUFFIX, - ldap.SCOPE_SUBTREE, - SEARCHFILTER, - ['cn'], - serverctrls=[spr_req_ctrl]) - pageddncnt = 0 - pages = 0 - while True: - pages += 1 - - topology.standalone.log.info("Getting page %d" % pages) - rtype, rdata, rmsgid, responcectrls = topology.standalone.result3(msgid, resp_ctrl_classes=known_ldap_resp_ctrls) - topology.standalone.log.info("%d results" % len(rdata)) - pageddncnt += len(rdata) - - topology.standalone.log.info("Results:") - for dn, attrs in rdata: - topology.standalone.log.info("dn: %s" % dn) - - pctrls = [ - c for c in responcectrls if c.controlType == SimplePagedResultsControl.controlType - ] - if not pctrls: - topology.standalone.log.info('Warning: Server ignores RFC 2696 control.') - break - - if pctrls[0].cookie: - spr_req_ctrl.cookie = pctrls[0].cookie - topology.standalone.log.info("cookie: %s" % spr_req_ctrl.cookie) - msgid = topology.standalone.search_ext(MYSUFFIX, - ldap.SCOPE_SUBTREE, - SEARCHFILTER, - ['cn'], - serverctrls=[spr_req_ctrl]) - else: - topology.standalone.log.info("No cookie") - break - - topology.standalone.log.info("Paged result search returned %d entries in %d pages.\n", pageddncnt, pages) - - global dnnum - global entries - assert dnnum == len(entries) - assert pages == (dnnum / page_size) - - -def test_ticket48191_run_1(topology): - topology.standalone.log.info("\n\n######################### SEARCH WITH SIMPLE PAGED RESULTS CONTROL (nsslapd-maxsimplepaged-per-conn: 0) ######################\n") - - topology.standalone.modify_s(CONFIG_DN, [(ldap.MOD_REPLACE, 'nsslapd-maxsimplepaged-per-conn', '0')]) - - page_size = 4 - spr_req_ctrl = SimplePagedResultsControl(True, size=page_size, cookie='') - - known_ldap_resp_ctrls = { - SimplePagedResultsControl.controlType: SimplePagedResultsControl, - } - - topology.standalone.log.info("Calling search_ext...") - msgid = topology.standalone.search_ext(MYSUFFIX, - ldap.SCOPE_SUBTREE, - SEARCHFILTER, - ['cn'], - serverctrls=[spr_req_ctrl]) - - topology.standalone.log.fatal('Unexpected success') - try: - rtype, rdata, rmsgid, responcectrls = topology.standalone.result3(msgid, resp_ctrl_classes=known_ldap_resp_ctrls) - except ldap.UNWILLING_TO_PERFORM as e: - topology.standalone.log.info('Returned the expected RC UNWILLING_TO_PERFORM') - return - except ldap.LDAPError as e: - topology.standalone.log.fatal('Unexpected error: ' + e.message['desc']) - assert False - topology.standalone.log.info("Type %d" % rtype) - topology.standalone.log.info("%d results" % len(rdata)) - assert False - - -def test_ticket48191_run_2(topology): - topology.standalone.log.info("\n\n######################### SEARCH WITH SIMPLE PAGED RESULTS CONTROL (nsslapd-maxsimplepaged-per-conn: 1000) ######################\n") - - topology.standalone.modify_s(CONFIG_DN, [(ldap.MOD_REPLACE, 'nsslapd-maxsimplepaged-per-conn', '1000')]) - - page_size = 4 - spr_req_ctrl = SimplePagedResultsControl(True, size=page_size, cookie='') - - known_ldap_resp_ctrls = { - SimplePagedResultsControl.controlType: SimplePagedResultsControl, - } - - topology.standalone.log.info("Calling search_ext...") - msgid = topology.standalone.search_ext(MYSUFFIX, - ldap.SCOPE_SUBTREE, - SEARCHFILTER, - ['cn'], - serverctrls=[spr_req_ctrl]) - pageddncnt = 0 - pages = 0 - while True: - pages += 1 - - topology.standalone.log.info("Getting page %d" % pages) - rtype, rdata, rmsgid, responcectrls = topology.standalone.result3(msgid, resp_ctrl_classes=known_ldap_resp_ctrls) - topology.standalone.log.info("%d results" % len(rdata)) - pageddncnt += len(rdata) - - topology.standalone.log.info("Results:") - for dn, attrs in rdata: - topology.standalone.log.info("dn: %s" % dn) - - pctrls = [ - c for c in responcectrls if c.controlType == SimplePagedResultsControl.controlType - ] - if not pctrls: - topology.standalone.log.info('Warning: Server ignores RFC 2696 control.') - break - - if pctrls[0].cookie: - spr_req_ctrl.cookie = pctrls[0].cookie - topology.standalone.log.info("cookie: %s" % spr_req_ctrl.cookie) - msgid = topology.standalone.search_ext(MYSUFFIX, - ldap.SCOPE_SUBTREE, - SEARCHFILTER, - ['cn'], - serverctrls=[spr_req_ctrl]) - else: - topology.standalone.log.info("No cookie") - break - - topology.standalone.log.info("Paged result search returned %d entries in %d pages.\n", pageddncnt, pages) - - global dnnum - global entries - assert dnnum == len(entries) - assert pages == (dnnum / page_size) - - topology.standalone.log.info("ticket48191 was successfully verified.") - - -def test_ticket48191_final(topology): - topology.standalone.delete() - log.info('Testcase PASSED') - - -def run_isolated(): - ''' - run_isolated is used to run these test cases independently of a test scheduler (xunit, py.test..) - To run isolated without py.test, you need to - - edit this file and comment '@pytest.fixture' line before 'topology' function. - - set the installation prefix - - run this program - ''' - global installation_prefix - installation_prefix = None - - topo = topology(True) - test_ticket48191_setup(topo) - test_ticket48191_run_0(topo) - test_ticket48191_run_1(topo) - test_ticket48191_run_2(topo) - test_ticket48191_final(topo) - - -if __name__ == '__main__': - run_isolated() - diff --git a/dirsrvtests/tickets/ticket48194_test.py b/dirsrvtests/tickets/ticket48194_test.py deleted file mode 100644 index 17e179a..0000000 --- a/dirsrvtests/tickets/ticket48194_test.py +++ /dev/null @@ -1,499 +0,0 @@ -# --- BEGIN COPYRIGHT BLOCK --- -# Copyright (C) 2015 Red Hat, Inc. -# All rights reserved. -# -# License: GPL (version 3 or any later version). -# See LICENSE for details. -# --- END COPYRIGHT BLOCK --- -# -import os -import sys -import subprocess -import time -import ldap -import logging -import pytest -import shutil -from lib389 import DirSrv, Entry, tools -from lib389 import DirSrvTools -from lib389.tools import DirSrvTools -from lib389._constants import * -from lib389.properties import * - -log = logging.getLogger(__name__) - -installation_prefix = None - -CONFIG_DN = 'cn=config' -ENCRYPTION_DN = 'cn=encryption,%s' % CONFIG_DN -RSA = 'RSA' -RSA_DN = 'cn=%s,%s' % (RSA, ENCRYPTION_DN) -LDAPSPORT = '10636' -SERVERCERT = 'Server-Cert' -plus_all_ecount = 0 -plus_all_dcount = 0 -plus_all_ecount_noweak = 0 -plus_all_dcount_noweak = 0 - -class TopologyStandalone(object): - def __init__(self, standalone): - standalone.open() - self.standalone = standalone - - -@pytest.fixture(scope="module") -def topology(request): - ''' - This fixture is used to standalone topology for the 'module'. - ''' - global installation_prefix - - if installation_prefix: - args_instance[SER_DEPLOYED_DIR] = installation_prefix - - standalone = DirSrv(verbose=False) - - # Args for the standalone instance - args_instance[SER_HOST] = HOST_STANDALONE - args_instance[SER_PORT] = PORT_STANDALONE - args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE - args_standalone = args_instance.copy() - standalone.allocate(args_standalone) - - # Get the status of the instance and restart it if it exists - instance_standalone = standalone.exists() - - # Remove the instance - if instance_standalone: - standalone.delete() - - # Create the instance - standalone.create() - - # Used to retrieve configuration information (dbdir, confdir...) - standalone.open() - - # clear the tmp directory - standalone.clearTmpDir(__file__) - - # Here we have standalone instance up and running - return TopologyStandalone(standalone) - - -def _header(topology, label): - topology.standalone.log.info("\n\n###############################################") - topology.standalone.log.info("####### %s" % label) - topology.standalone.log.info("###############################################") - - -def test_ticket48194_init(topology): - """ - Generate self signed cert and import it to the DS cert db. - Enable SSL - """ - _header(topology, 'Testing Ticket 48194 - harden the list of ciphers available by default') - - conf_dir = topology.standalone.confdir - - log.info("\n######################### Checking existing certs ######################\n") - os.system('certutil -L -d %s -n "CA certificate"' % conf_dir) - os.system('certutil -L -d %s -n "%s"' % (conf_dir, SERVERCERT)) - - log.info("\n######################### Create a password file ######################\n") - pwdfile = '%s/pwdfile.txt' % (conf_dir) - opasswd = os.popen("(ps -ef ; w ) | sha1sum | awk '{print $1}'", "r") - passwd = opasswd.readline() - pwdfd = open(pwdfile, "w") - pwdfd.write(passwd) - pwdfd.close() - - log.info("\n######################### Create a noise file ######################\n") - noisefile = '%s/noise.txt' % (conf_dir) - noise = os.popen("(w ; ps -ef ; date ) | sha1sum | awk '{print $1}'", "r") - noisewdfd = open(noisefile, "w") - noisewdfd.write(noise.readline()) - noisewdfd.close() - - log.info("\n######################### Create key3.db and cert8.db database ######################\n") - os.system("ls %s" % pwdfile) - os.system("cat %s" % pwdfile) - os.system('certutil -N -d %s -f %s' % (conf_dir, pwdfile)) - - log.info("\n######################### Creating encryption key for CA ######################\n") - os.system('certutil -G -d %s -z %s -f %s' % (conf_dir, noisefile, pwdfile)) - - log.info("\n######################### Creating self-signed CA certificate ######################\n") - os.system('( echo y ; echo ; echo y ) | certutil -S -n "CA certificate" -s "cn=CAcert" -x -t "CT,," -m 1000 -v 120 -d %s -z %s -f %s -2' % (conf_dir, noisefile, pwdfile)) - - log.info("\n######################### Exporting the CA certificate to cacert.asc ######################\n") - cafile = '%s/cacert.asc' % conf_dir - catxt = os.popen('certutil -L -d %s -n "CA certificate" -a' % conf_dir) - cafd = open(cafile, "w") - while True: - line = catxt.readline() - if (line == ''): - break - cafd.write(line) - cafd.close() - - log.info("\n######################### Generate the server certificate ######################\n") - ohostname = os.popen('hostname --fqdn', "r") - myhostname = ohostname.readline() - os.system('certutil -S -n "%s" -s "cn=%s,ou=389 Directory Server" -c "CA certificate" -t "u,u,u" -m 1001 -v 120 -d %s -z %s -f %s' % (SERVERCERT, myhostname.rstrip(), conf_dir, noisefile, pwdfile)) - - log.info("\n######################### create the pin file ######################\n") - pinfile = '%s/pin.txt' % (conf_dir) - pintxt = 'Internal (Software) Token:%s' % passwd - pinfd = open(pinfile, "w") - pinfd.write(pintxt) - pinfd.close() - - log.info("\n######################### enable SSL in the directory server with all ciphers ######################\n") - topology.standalone.simple_bind_s(DN_DM, PASSWORD) - topology.standalone.modify_s(ENCRYPTION_DN, [(ldap.MOD_REPLACE, 'nsSSL3', 'off'), - (ldap.MOD_REPLACE, 'nsTLS1', 'on'), - (ldap.MOD_REPLACE, 'nsSSLClientAuth', 'allowed'), - (ldap.MOD_REPLACE, 'allowWeakCipher', 'on'), - (ldap.MOD_REPLACE, 'nsSSL3Ciphers', '+all')]) - - topology.standalone.modify_s(CONFIG_DN, [(ldap.MOD_REPLACE, 'nsslapd-security', 'on'), - (ldap.MOD_REPLACE, 'nsslapd-ssl-check-hostname', 'off'), - (ldap.MOD_REPLACE, 'nsslapd-secureport', LDAPSPORT)]) - - topology.standalone.add_s(Entry((RSA_DN, {'objectclass': "top nsEncryptionModule".split(), - 'cn': RSA, - 'nsSSLPersonalitySSL': SERVERCERT, - 'nsSSLToken': 'internal (software)', - 'nsSSLActivation': 'on'}))) - -def connectWithOpenssl(topology, cipher, expect): - """ - Connect with the given cipher - Condition: - If expect is True, the handshake should be successful. - If expect is False, the handshake should be refused with - access log: "Cannot communicate securely with peer: - no common encryption algorithm(s)." - """ - log.info("Testing %s -- expect to handshake %s", cipher,"successfully" if expect else "failed") - - myurl = 'localhost:%s' % LDAPSPORT - cmdline = ['/usr/bin/openssl', 's_client', '-connect', myurl, '-cipher', cipher] - - strcmdline = '/usr/bin/openssl s_client -connect localhost:%s -cipher %s' % (LDAPSPORT, cipher) - log.info("Running cmdline: %s", strcmdline) - - try: - proc = subprocess.Popen(cmdline, stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.STDOUT) - except ValueError: - log.info("%s failed: %s", cmdline, ValueError) - proc.kill() - - while True: - l = proc.stdout.readline() - if l == "": - break - if 'Cipher is' in l: - log.info("Found: %s", l) - if expect: - if '(NONE)' in l: - assert False - else: - proc.stdin.close() - assert True - else: - if '(NONE)' in l: - assert True - else: - proc.stdin.close() - assert False - -def test_ticket48194_run_0(topology): - """ - Check nsSSL3Ciphers: +all - All ciphers are enabled except null. - Note: allowWeakCipher: on - """ - _header(topology, 'Test Case 1 - Check the ciphers availability for "+all"; allowWeakCipher: on') - - topology.standalone.simple_bind_s(DN_DM, PASSWORD) - topology.standalone.modify_s(CONFIG_DN, [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', '64')]) - - log.info("\n######################### Restarting the server ######################\n") - topology.standalone.restart(timeout=120) - - connectWithOpenssl(topology, 'RC4-SHA', True) - connectWithOpenssl(topology, 'AES256-SHA256', True) - -def test_ticket48194_run_1(topology): - """ - Check nsSSL3Ciphers: +all - All ciphers are enabled except null. - Note: default allowWeakCipher (i.e., off) for +all - """ - _header(topology, 'Test Case 2 - Check the ciphers availability for "+all" with default allowWeakCiphers') - - topology.standalone.simple_bind_s(DN_DM, PASSWORD) - topology.standalone.modify_s(CONFIG_DN, [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', '64')]) - # Make sure allowWeakCipher is not set. - topology.standalone.modify_s(ENCRYPTION_DN, [(ldap.MOD_DELETE, 'allowWeakCipher', None)]) - - log.info("\n######################### Restarting the server ######################\n") - topology.standalone.stop(timeout=10) - os.system('mv %s %s.48194_0' % (topology.standalone.errlog, topology.standalone.errlog)) - os.system('touch %s' % (topology.standalone.errlog)) - topology.standalone.start(timeout=120) - - connectWithOpenssl(topology, 'RC4-SHA', False) - connectWithOpenssl(topology, 'AES256-SHA256', True) - -def test_ticket48194_run_2(topology): - """ - Check nsSSL3Ciphers: +rsa_aes_128_sha,+rsa_aes_256_sha - rsa_aes_128_sha, tls_rsa_aes_128_sha, rsa_aes_256_sha, tls_rsa_aes_256_sha are enabled. - default allowWeakCipher - """ - _header(topology, 'Test Case 3 - Check the ciphers availability for "+rsa_aes_128_sha,+rsa_aes_256_sha" with default allowWeakCipher') - - topology.standalone.simple_bind_s(DN_DM, PASSWORD) - topology.standalone.modify_s(ENCRYPTION_DN, [(ldap.MOD_REPLACE, 'nsSSL3Ciphers', '+rsa_aes_128_sha,+rsa_aes_256_sha')]) - - log.info("\n######################### Restarting the server ######################\n") - topology.standalone.stop(timeout=10) - os.system('mv %s %s.48194_1' % (topology.standalone.errlog, topology.standalone.errlog)) - os.system('touch %s' % (topology.standalone.errlog)) - topology.standalone.start(timeout=120) - - connectWithOpenssl(topology, 'RC4-SHA', False) - connectWithOpenssl(topology, 'AES256-SHA256', False) - connectWithOpenssl(topology, 'AES128-SHA', True) - connectWithOpenssl(topology, 'AES256-SHA', True) - -def test_ticket48194_run_3(topology): - """ - Check nsSSL3Ciphers: -all - All ciphers are disabled. - default allowWeakCipher - """ - _header(topology, 'Test Case 4 - Check the ciphers availability for "-all"') - - topology.standalone.simple_bind_s(DN_DM, PASSWORD) - topology.standalone.modify_s(ENCRYPTION_DN, [(ldap.MOD_REPLACE, 'nsSSL3Ciphers', '-all')]) - - log.info("\n######################### Restarting the server ######################\n") - topology.standalone.stop(timeout=10) - os.system('mv %s %s.48194_2' % (topology.standalone.errlog, topology.standalone.errlog)) - os.system('touch %s' % (topology.standalone.errlog)) - topology.standalone.start(timeout=120) - - connectWithOpenssl(topology, 'RC4-SHA', False) - connectWithOpenssl(topology, 'AES256-SHA256', False) - -def test_ticket48194_run_4(topology): - """ - Check no nsSSL3Ciphers - Default ciphers are enabled. - default allowWeakCipher - """ - _header(topology, 'Test Case 5 - Check no nsSSL3Ciphers (-all) with default allowWeakCipher') - - topology.standalone.simple_bind_s(DN_DM, PASSWORD) - topology.standalone.modify_s(ENCRYPTION_DN, [(ldap.MOD_DELETE, 'nsSSL3Ciphers', '-all')]) - - log.info("\n######################### Restarting the server ######################\n") - topology.standalone.stop(timeout=10) - os.system('mv %s %s.48194_3' % (topology.standalone.errlog, topology.standalone.errlog)) - os.system('touch %s' % (topology.standalone.errlog)) - topology.standalone.start(timeout=120) - - connectWithOpenssl(topology, 'RC4-SHA', False) - connectWithOpenssl(topology, 'AES256-SHA256', True) - -def test_ticket48194_run_5(topology): - """ - Check nsSSL3Ciphers: default - Default ciphers are enabled. - default allowWeakCipher - """ - _header(topology, 'Test Case 6 - Check default nsSSL3Ciphers (default setting) with default allowWeakCipher') - - topology.standalone.simple_bind_s(DN_DM, PASSWORD) - topology.standalone.modify_s(ENCRYPTION_DN, [(ldap.MOD_REPLACE, 'nsSSL3Ciphers', 'default')]) - - log.info("\n######################### Restarting the server ######################\n") - topology.standalone.stop(timeout=10) - os.system('mv %s %s.48194_4' % (topology.standalone.errlog, topology.standalone.errlog)) - os.system('touch %s' % (topology.standalone.errlog)) - topology.standalone.start(timeout=120) - - connectWithOpenssl(topology, 'RC4-SHA', False) - connectWithOpenssl(topology, 'AES256-SHA256', True) - -def test_ticket48194_run_6(topology): - """ - Check nsSSL3Ciphers: +all,-TLS_RSA_WITH_AES_256_CBC_SHA256 - All ciphers are disabled. - default allowWeakCipher - """ - _header(topology, 'Test Case 7 - Check nsSSL3Ciphers: +all,-TLS_RSA_WITH_AES_256_CBC_SHA256 with default allowWeakCipher') - - topology.standalone.simple_bind_s(DN_DM, PASSWORD) - topology.standalone.modify_s(ENCRYPTION_DN, [(ldap.MOD_REPLACE, 'nsSSL3Ciphers', '+all,-TLS_RSA_WITH_AES_256_CBC_SHA256')]) - - log.info("\n######################### Restarting the server ######################\n") - topology.standalone.stop(timeout=10) - os.system('mv %s %s.48194_5' % (topology.standalone.errlog, topology.standalone.errlog)) - os.system('touch %s' % (topology.standalone.errlog)) - topology.standalone.start(timeout=120) - - connectWithOpenssl(topology, 'RC4-SHA', False) - connectWithOpenssl(topology, 'AES256-SHA256', False) - connectWithOpenssl(topology, 'AES128-SHA', True) - -def test_ticket48194_run_7(topology): - """ - Check nsSSL3Ciphers: -all,+rsa_rc4_128_md5 - All ciphers are disabled. - default allowWeakCipher - """ - _header(topology, 'Test Case 8 - Check nsSSL3Ciphers: -all,+rsa_rc4_128_md5 with default allowWeakCipher') - - topology.standalone.simple_bind_s(DN_DM, PASSWORD) - topology.standalone.modify_s(ENCRYPTION_DN, [(ldap.MOD_REPLACE, 'nsSSL3Ciphers', '-all,+rsa_rc4_128_md5')]) - - log.info("\n######################### Restarting the server ######################\n") - topology.standalone.stop(timeout=10) - os.system('mv %s %s.48194_6' % (topology.standalone.errlog, topology.standalone.errlog)) - os.system('touch %s' % (topology.standalone.errlog)) - topology.standalone.start(timeout=120) - - connectWithOpenssl(topology, 'RC4-SHA', False) - connectWithOpenssl(topology, 'AES256-SHA256', False) - connectWithOpenssl(topology, 'RC4-MD5', True) - -def test_ticket48194_run_8(topology): - """ - Check nsSSL3Ciphers: default + allowWeakCipher: off - Strong Default ciphers are enabled. - """ - _header(topology, 'Test Case 9 - Check default nsSSL3Ciphers (default setting + allowWeakCipher: off)') - - topology.standalone.simple_bind_s(DN_DM, PASSWORD) - topology.standalone.modify_s(ENCRYPTION_DN, [(ldap.MOD_REPLACE, 'nsSSL3Ciphers', 'default'), - (ldap.MOD_REPLACE, 'allowWeakCipher', 'off')]) - - log.info("\n######################### Restarting the server ######################\n") - topology.standalone.stop(timeout=10) - os.system('mv %s %s.48194_7' % (topology.standalone.errlog, topology.standalone.errlog)) - os.system('touch %s' % (topology.standalone.errlog)) - topology.standalone.start(timeout=120) - - connectWithOpenssl(topology, 'RC4-SHA', False) - connectWithOpenssl(topology, 'AES256-SHA256', True) - -def test_ticket48194_run_9(topology): - """ - Check no nsSSL3Ciphers - Default ciphers are enabled. - allowWeakCipher: on - nsslapd-errorlog-level: 0 - """ - _header(topology, 'Test Case 10 - Check no nsSSL3Ciphers (default setting) with no errorlog-level & allowWeakCipher on') - - topology.standalone.simple_bind_s(DN_DM, PASSWORD) - topology.standalone.modify_s(ENCRYPTION_DN, [(ldap.MOD_REPLACE, 'nsSSL3Ciphers', None), - (ldap.MOD_REPLACE, 'allowWeakCipher', 'on')]) - topology.standalone.modify_s(CONFIG_DN, [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', None)]) - - log.info("\n######################### Restarting the server ######################\n") - topology.standalone.stop(timeout=10) - os.system('mv %s %s.48194_8' % (topology.standalone.errlog, topology.standalone.errlog)) - os.system('touch %s' % (topology.standalone.errlog)) - topology.standalone.start(timeout=120) - - connectWithOpenssl(topology, 'RC4-SHA', True) - connectWithOpenssl(topology, 'AES256-SHA256', True) - -def test_ticket48194_run_10(topology): - """ - Check nsSSL3Ciphers: -TLS_RSA_WITH_NULL_MD5,+TLS_RSA_WITH_RC4_128_MD5, - +TLS_RSA_EXPORT_WITH_RC4_40_MD5,+TLS_RSA_EXPORT_WITH_RC2_CBC_40_MD5, - +TLS_DHE_RSA_WITH_DES_CBC_SHA,+SSL_RSA_FIPS_WITH_DES_CBC_SHA, - +TLS_DHE_RSA_WITH_3DES_EDE_CBC_SHA,+SSL_RSA_FIPS_WITH_3DES_EDE_CBC_SHA, - +TLS_RSA_EXPORT1024_WITH_RC4_56_SHA,+TLS_RSA_EXPORT1024_WITH_DES_CBC_SHA, - -SSL_CK_RC4_128_WITH_MD5,-SSL_CK_RC4_128_EXPORT40_WITH_MD5, - -SSL_CK_RC2_128_CBC_WITH_MD5,-SSL_CK_RC2_128_CBC_EXPORT40_WITH_MD5, - -SSL_CK_DES_64_CBC_WITH_MD5,-SSL_CK_DES_192_EDE3_CBC_WITH_MD5 - allowWeakCipher: on - nsslapd-errorlog-level: 0 - """ - _header(topology, 'Test Case 11 - Check nsSSL3Ciphers: long list using the NSS Cipher Suite name with allowWeakCipher on') - - topology.standalone.simple_bind_s(DN_DM, PASSWORD) - topology.standalone.modify_s(ENCRYPTION_DN, [(ldap.MOD_REPLACE, 'nsSSL3Ciphers', - '-TLS_RSA_WITH_NULL_MD5,+TLS_RSA_WITH_RC4_128_MD5,+TLS_RSA_EXPORT_WITH_RC4_40_MD5,+TLS_RSA_EXPORT_WITH_RC2_CBC_40_MD5,+TLS_DHE_RSA_WITH_DES_CBC_SHA,+SSL_RSA_FIPS_WITH_DES_CBC_SHA,+TLS_DHE_RSA_WITH_3DES_EDE_CBC_SHA,+SSL_RSA_FIPS_WITH_3DES_EDE_CBC_SHA,+TLS_RSA_EXPORT1024_WITH_RC4_56_SHA,+TLS_RSA_EXPORT1024_WITH_DES_CBC_SHA,-SSL_CK_RC4_128_WITH_MD5,-SSL_CK_RC4_128_EXPORT40_WITH_MD5,-SSL_CK_RC2_128_CBC_WITH_MD5,-SSL_CK_RC2_128_CBC_EXPORT40_WITH_MD5,-SSL_CK_DES_64_CBC_WITH_MD5,-SSL_CK_DES_192_EDE3_CBC_WITH_MD5')]) - - log.info("\n######################### Restarting the server ######################\n") - topology.standalone.stop(timeout=10) - os.system('mv %s %s.48194_9' % (topology.standalone.errlog, topology.standalone.errlog)) - os.system('touch %s' % (topology.standalone.errlog)) - topology.standalone.start(timeout=120) - - connectWithOpenssl(topology, 'RC4-SHA', False) - connectWithOpenssl(topology, 'RC4-MD5', True) - connectWithOpenssl(topology, 'AES256-SHA256', False) - -def test_ticket48194_run_11(topology): - """ - Check nsSSL3Ciphers: +fortezza - SSL_GetImplementedCiphers does not return this as a secuire cipher suite - """ - _header(topology, 'Test Case 12 - Check nsSSL3Ciphers: +fortezza, which is not supported') - - topology.standalone.simple_bind_s(DN_DM, PASSWORD) - topology.standalone.modify_s(ENCRYPTION_DN, [(ldap.MOD_REPLACE, 'nsSSL3Ciphers', '+fortezza')]) - - log.info("\n######################### Restarting the server ######################\n") - topology.standalone.stop(timeout=10) - os.system('mv %s %s.48194_10' % (topology.standalone.errlog, topology.standalone.errlog)) - os.system('touch %s' % (topology.standalone.errlog)) - topology.standalone.start(timeout=120) - - connectWithOpenssl(topology, 'RC4-SHA', False) - connectWithOpenssl(topology, 'AES256-SHA256', False) - -def test_ticket48194_final(topology): - topology.standalone.delete() - log.info('Testcase PASSED') - -def run_isolated(): - ''' - run_isolated is used to run these test cases independently of a test scheduler (xunit, py.test..) - To run isolated without py.test, you need to - - edit this file and comment '@pytest.fixture' line before 'topology' function. - - set the installation prefix - - run this program - ''' - global installation_prefix - installation_prefix = None - - topo = topology(True) - test_ticket48194_init(topo) - - test_ticket48194_run_0(topo) - test_ticket48194_run_1(topo) - test_ticket48194_run_2(topo) - test_ticket48194_run_3(topo) - test_ticket48194_run_4(topo) - test_ticket48194_run_5(topo) - test_ticket48194_run_6(topo) - test_ticket48194_run_7(topo) - test_ticket48194_run_8(topo) - test_ticket48194_run_9(topo) - test_ticket48194_run_10(topo) - test_ticket48194_run_11(topo) - - test_ticket48194_final(topo) - -if __name__ == '__main__': - run_isolated() diff --git a/dirsrvtests/tickets/ticket48212_test.py b/dirsrvtests/tickets/ticket48212_test.py deleted file mode 100644 index c3c8c8f..0000000 --- a/dirsrvtests/tickets/ticket48212_test.py +++ /dev/null @@ -1,210 +0,0 @@ -import os -import sys -import time -import ldap -import logging -import pytest -from lib389 import DirSrv, Entry, tools, tasks -from lib389.tools import DirSrvTools -from lib389._constants import * -from lib389.properties import * -from lib389.tasks import * -from ldap.controls import SimplePagedResultsControl - -log = logging.getLogger(__name__) - -installation_prefix = None - -MYSUFFIX = 'dc=example,dc=com' -MYSUFFIXBE = 'userRoot' -_MYLDIF = 'example1k_posix.ldif' -UIDNUMBERDN = "cn=uidnumber,cn=index,cn=userroot,cn=ldbm database,cn=plugins,cn=config" - -class TopologyStandalone(object): - def __init__(self, standalone): - standalone.open() - self.standalone = standalone - - -@pytest.fixture(scope="module") -def topology(request): - ''' - This fixture is used to standalone topology for the 'module'. - ''' - global installation_prefix - - if installation_prefix: - args_instance[SER_DEPLOYED_DIR] = installation_prefix - - standalone = DirSrv(verbose=False) - - # Args for the standalone instance - args_instance[SER_HOST] = HOST_STANDALONE - args_instance[SER_PORT] = PORT_STANDALONE - args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE - args_standalone = args_instance.copy() - standalone.allocate(args_standalone) - - # Get the status of the instance and restart it if it exists - instance_standalone = standalone.exists() - - # Remove the instance - if instance_standalone: - standalone.delete() - - # Create the instance - standalone.create() - - # Used to retrieve configuration information (dbdir, confdir...) - standalone.open() - - # clear the tmp directory - standalone.clearTmpDir(__file__) - - # Here we have standalone instance up and running - return TopologyStandalone(standalone) - -def runDbVerify(topology): - topology.standalone.log.info("\n\n +++++ dbverify +++++\n") - dbverifyCMD = topology.standalone.sroot + "/slapd-" + topology.standalone.inst + "/dbverify -V" - dbverifyOUT = os.popen(dbverifyCMD, "r") - topology.standalone.log.info("Running %s" % dbverifyCMD) - running = True - error = False - while running: - l = dbverifyOUT.readline() - if l == "": - running = False - elif "libdb:" in l: - running = False - error = True - topology.standalone.log.info("%s" % l) - elif "verify failed" in l: - error = True - running = False - topology.standalone.log.info("%s" % l) - - if error: - topology.standalone.log.fatal("dbverify failed") - assert False - else: - topology.standalone.log.info("dbverify passed") - -def reindexUidNumber(topology): - topology.standalone.log.info("\n\n +++++ reindex uidnumber +++++\n") - indexCMD = topology.standalone.sroot + "/slapd-" + topology.standalone.inst + "/db2index.pl -D \"" + DN_DM + "\" -w \"" + PASSWORD + "\" -n " + MYSUFFIXBE + " -t uidnumber" - - indexOUT = os.popen(indexCMD, "r") - topology.standalone.log.info("Running %s" % indexCMD) - - time.sleep(10) - - tailCMD = "tail -n 3 " + topology.standalone.errlog - tailOUT = os.popen(tailCMD, "r") - running = True - done = False - while running: - l = tailOUT.readline() - if l == "": - running = False - elif "Finished indexing" in l: - running = False - done = True - topology.standalone.log.info("%s" % l) - - if done: - topology.standalone.log.info("%s done" % indexCMD) - else: - topology.standalone.log.fatal("%s did not finish" % indexCMD) - assert False - -def test_ticket48212_run(topology): - """ - Import posixAccount entries. - Index uidNumber - add nsMatchingRule: integerOrderingMatch - run dbverify to see if it reports the db corruption or not - delete nsMatchingRule: integerOrderingMatch - run dbverify to see if it reports the db corruption or not - if no corruption is reported, the bug fix was verified. - """ - log.info('Testing Ticket 48212 - Dynamic nsMatchingRule changes had no effect on the attrinfo thus following reindexing, as well.') - - # bind as directory manager - topology.standalone.log.info("Bind as %s" % DN_DM) - topology.standalone.simple_bind_s(DN_DM, PASSWORD) - - - data_dir_path = topology.standalone.getDir(__file__, DATA_DIR) - ldif_file = data_dir_path + "ticket48212/" + _MYLDIF - topology.standalone.log.info("\n\n######################### Import Test data (%s) ######################\n" % ldif_file) - args = {TASK_WAIT: True} - importTask = Tasks(topology.standalone) - importTask.importLDIF(MYSUFFIX, MYSUFFIXBE, ldif_file, args) - args = {TASK_WAIT: True} - - runDbVerify(topology) - - topology.standalone.log.info("\n\n######################### Add index by uidnumber ######################\n") - try: - topology.standalone.add_s(Entry((UIDNUMBERDN, {'objectclass': "top nsIndex".split(), - 'cn': 'uidnumber', - 'nsSystemIndex': 'false', - 'nsIndexType': "pres eq".split()}))) - except ValueError: - topology.standalone.log.fatal("add_s failed: %s", ValueError) - - topology.standalone.log.info("\n\n######################### reindexing... ######################\n") - reindexUidNumber(topology) - - runDbVerify(topology) - - topology.standalone.log.info("\n\n######################### Add nsMatchingRule ######################\n") - try: - topology.standalone.modify_s(UIDNUMBERDN, [(ldap.MOD_ADD, 'nsMatchingRule', 'integerOrderingMatch')]) - except ValueError: - topology.standalone.log.fatal("modify_s failed: %s", ValueError) - - topology.standalone.log.info("\n\n######################### reindexing... ######################\n") - reindexUidNumber(topology) - - runDbVerify(topology) - - topology.standalone.log.info("\n\n######################### Delete nsMatchingRule ######################\n") - try: - topology.standalone.modify_s(UIDNUMBERDN, [(ldap.MOD_DELETE, 'nsMatchingRule', 'integerOrderingMatch')]) - except ValueError: - topology.standalone.log.fatal("modify_s failed: %s", ValueError) - - reindexUidNumber(topology) - - runDbVerify(topology) - - topology.standalone.log.info("ticket48212 was successfully verified.") - - -def test_ticket48212_final(topology): - topology.standalone.delete() - log.info('Testcase PASSED') - - -def run_isolated(): - ''' - run_isolated is used to run these test cases independently of a test scheduler (xunit, py.test..) - To run isolated without py.test, you need to - - edit this file and comment '@pytest.fixture' line before 'topology' function. - - set the installation prefix - - run this program - ''' - global installation_prefix - installation_prefix = None - - topo = topology(True) - test_ticket48212_run(topo) - - test_ticket48212_final(topo) - - -if __name__ == '__main__': - run_isolated() - diff --git a/dirsrvtests/tickets/ticket48214_test.py b/dirsrvtests/tickets/ticket48214_test.py deleted file mode 100644 index 14bf392..0000000 --- a/dirsrvtests/tickets/ticket48214_test.py +++ /dev/null @@ -1,171 +0,0 @@ -import os -import sys -import time -import ldap -import logging -import pytest -from lib389 import DirSrv, Entry, tools, tasks -from lib389.tools import DirSrvTools -from lib389._constants import * -from lib389.properties import * -from lib389.tasks import * -from ldap.controls import SimplePagedResultsControl - -log = logging.getLogger(__name__) - -installation_prefix = None - -MYSUFFIX = 'dc=example,dc=com' -MYSUFFIXBE = 'userRoot' - -class TopologyStandalone(object): - def __init__(self, standalone): - standalone.open() - self.standalone = standalone - - -@pytest.fixture(scope="module") -def topology(request): - ''' - This fixture is used to standalone topology for the 'module'. - ''' - global installation_prefix - - if installation_prefix: - args_instance[SER_DEPLOYED_DIR] = installation_prefix - - standalone = DirSrv(verbose=False) - - # Args for the standalone instance - args_instance[SER_HOST] = HOST_STANDALONE - args_instance[SER_PORT] = PORT_STANDALONE - args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE - args_standalone = args_instance.copy() - standalone.allocate(args_standalone) - - # Get the status of the instance and restart it if it exists - instance_standalone = standalone.exists() - - # Remove the instance - if instance_standalone: - standalone.delete() - - # Create the instance - standalone.create() - - # Used to retrieve configuration information (dbdir, confdir...) - standalone.open() - - # clear the tmp directory - standalone.clearTmpDir(__file__) - - # Here we have standalone instance up and running - return TopologyStandalone(standalone) - -def getMaxBerSizeFromDseLdif(topology): - topology.standalone.log.info(" +++++ Get maxbersize from dse.ldif +++++\n") - dse_ldif = topology.standalone.confdir + '/dse.ldif' - grepMaxBerCMD = "egrep nsslapd-maxbersize " + dse_ldif - topology.standalone.log.info(" Run CMD: %s\n" % grepMaxBerCMD) - grepMaxBerOUT = os.popen(grepMaxBerCMD, "r") - running = True - maxbersize = -1 - while running: - l = grepMaxBerOUT.readline() - if l == "": - topology.standalone.log.info(" Empty: %s\n" % l) - running = False - elif "nsslapd-maxbersize:" in l.lower(): - running = False - fields = l.split() - if len(fields) >= 2: - maxbersize = fields[1] - topology.standalone.log.info(" Right format - %s %s\n" % (fields[0], fields[1])) - else: - topology.standalone.log.info(" Wrong format - %s\n" % l) - else: - topology.standalone.log.info(" Else?: %s\n" % l) - return maxbersize - -def checkMaxBerSize(topology): - topology.standalone.log.info(" +++++ Check Max Ber Size +++++\n") - maxbersizestr = getMaxBerSizeFromDseLdif(topology) - maxbersize = int(maxbersizestr) - isdefault = True - defaultvalue = 2097152 - if maxbersize < 0: - topology.standalone.log.info(" No nsslapd-maxbersize found in dse.ldif\n") - elif maxbersize == 0: - topology.standalone.log.info(" nsslapd-maxbersize: %d\n" % maxbersize) - else: - isdefault = False - topology.standalone.log.info(" nsslapd-maxbersize: %d\n" % maxbersize) - - try: - entry = topology.standalone.search_s('cn=config', ldap.SCOPE_BASE, - "(cn=*)", - ['nsslapd-maxbersize']) - if entry: - searchedsize = entry[0].getValue('nsslapd-maxbersize') - topology.standalone.log.info(" ldapsearch returned nsslapd-maxbersize: %s\n" % searchedsize) - else: - topology.standalone.log.fatal('ERROR: cn=config is not found?') - assert False - except ldap.LDAPError as e: - topology.standalone.log.error('ERROR: Failed to search for user entry: ' + e.message['desc']) - assert False - - if isdefault: - topology.standalone.log.info(" Checking %d vs %d\n" % (int(searchedsize), defaultvalue)) - assert int(searchedsize) == defaultvalue - - -def test_ticket48214_run(topology): - """ - Check ldapsearch returns the correct maxbersize when it is not explicitly set. - """ - log.info('Testing Ticket 48214 - ldapsearch on nsslapd-maxbersize returns 0 instead of current value') - - # bind as directory manager - topology.standalone.log.info("Bind as %s" % DN_DM) - topology.standalone.simple_bind_s(DN_DM, PASSWORD) - - topology.standalone.log.info("\n\n######################### Out of Box ######################\n") - checkMaxBerSize(topology) - - topology.standalone.log.info("\n\n######################### Add nsslapd-maxbersize: 0 ######################\n") - topology.standalone.modify_s('cn=config', [(ldap.MOD_REPLACE, 'nsslapd-maxbersize', '0')]) - checkMaxBerSize(topology) - - topology.standalone.log.info("\n\n######################### Add nsslapd-maxbersize: 10000 ######################\n") - topology.standalone.modify_s('cn=config', [(ldap.MOD_REPLACE, 'nsslapd-maxbersize', '10000')]) - checkMaxBerSize(topology) - - topology.standalone.log.info("ticket48214 was successfully verified.") - - -def test_ticket48214_final(topology): - topology.standalone.delete() - log.info('Testcase PASSED') - - -def run_isolated(): - ''' - run_isolated is used to run these test cases independently of a test scheduler (xunit, py.test..) - To run isolated without py.test, you need to - - edit this file and comment '@pytest.fixture' line before 'topology' function. - - set the installation prefix - - run this program - ''' - global installation_prefix - installation_prefix = None - - topo = topology(True) - test_ticket48214_run(topo) - - test_ticket48214_final(topo) - - -if __name__ == '__main__': - run_isolated() - diff --git a/dirsrvtests/tickets/ticket48226_test.py b/dirsrvtests/tickets/ticket48226_test.py deleted file mode 100644 index 6e244af..0000000 --- a/dirsrvtests/tickets/ticket48226_test.py +++ /dev/null @@ -1,249 +0,0 @@ -# --- BEGIN COPYRIGHT BLOCK --- -# Copyright (C) 2015 Red Hat, Inc. -# All rights reserved. -# -# License: GPL (version 3 or any later version). -# See LICENSE for details. -# --- END COPYRIGHT BLOCK --- -# -import os -import sys -import time -import ldap -import logging -import pytest -from lib389 import DirSrv, Entry, tools, tasks -from lib389.tools import DirSrvTools -from lib389._constants import * -from lib389.properties import * -from lib389.tasks import * -from lib389.utils import * - -logging.getLogger(__name__).setLevel(logging.DEBUG) -log = logging.getLogger(__name__) - -installation1_prefix = None - - -class TopologyReplication(object): - def __init__(self, master1, master2): - master1.open() - self.master1 = master1 - master2.open() - self.master2 = master2 - - -@pytest.fixture(scope="module") -def topology(request): - global installation1_prefix - os.environ['USE_VALGRIND'] = '1' - if installation1_prefix: - args_instance[SER_DEPLOYED_DIR] = installation1_prefix - - # Creating master 1... - master1 = DirSrv(verbose=False) - if installation1_prefix: - args_instance[SER_DEPLOYED_DIR] = installation1_prefix - args_instance[SER_HOST] = HOST_MASTER_1 - args_instance[SER_PORT] = PORT_MASTER_1 - args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1 - args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX - args_master = args_instance.copy() - master1.allocate(args_master) - instance_master1 = master1.exists() - if instance_master1: - master1.delete() - master1.create() - master1.open() - master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_1) - - # Creating master 2... - master2 = DirSrv(verbose=False) - if installation1_prefix: - args_instance[SER_DEPLOYED_DIR] = installation1_prefix - args_instance[SER_HOST] = HOST_MASTER_2 - args_instance[SER_PORT] = PORT_MASTER_2 - args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_2 - args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX - args_master = args_instance.copy() - master2.allocate(args_master) - instance_master2 = master2.exists() - if instance_master2: - master2.delete() - master2.create() - master2.open() - master2.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_2) - - # - # Create all the agreements - # - # Creating agreement from master 1 to master 2 - properties = {RA_NAME: r'meTo_$host:$port', - RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], - RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], - RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], - RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} - m1_m2_agmt = master1.agreement.create(suffix=SUFFIX, host=master2.host, port=master2.port, properties=properties) - if not m1_m2_agmt: - log.fatal("Fail to create a master -> master replica agreement") - sys.exit(1) - log.debug("%s created" % m1_m2_agmt) - - # Creating agreement from master 2 to master 1 - properties = {RA_NAME: r'meTo_$host:$port', - RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], - RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], - RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], - RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} - m2_m1_agmt = master2.agreement.create(suffix=SUFFIX, host=master1.host, port=master1.port, properties=properties) - if not m2_m1_agmt: - log.fatal("Fail to create a master -> master replica agreement") - sys.exit(1) - log.debug("%s created" % m2_m1_agmt) - - # Allow the replicas to get situated with the new agreements... - time.sleep(5) - - # - # Initialize all the agreements - # - master1.agreement.init(SUFFIX, HOST_MASTER_2, PORT_MASTER_2) - master1.waitForReplInit(m1_m2_agmt) - - # Check replication is working... - if master1.testReplication(DEFAULT_SUFFIX, master2): - log.info('Replication is working.') - else: - log.fatal('Replication is not working.') - assert False - - # Clear out the tmp dir - master1.clearTmpDir(__file__) - - def fin(): - master1.delete() - master2.delete() - sbin_dir = get_sbin_dir(prefix=master2.prefix) - valgrind_disable(sbin_dir) - request.addfinalizer(fin) - - return TopologyReplication(master1, master2) - - -def test_ticket48226_set_purgedelay(topology): - args = {REPLICA_PURGE_DELAY: '5', - REPLICA_PURGE_INTERVAL: '5'} - try: - topology.master1.replica.setProperties(DEFAULT_SUFFIX, None, None, args) - except: - log.fatal('Failed to configure replica') - assert False - try: - topology.master2.replica.setProperties(DEFAULT_SUFFIX, None, None, args) - except: - log.fatal('Failed to configure replica') - assert False - topology.master1.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-auditlog-logging-enabled', 'on')]) - topology.master2.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-auditlog-logging-enabled', 'on')]) - topology.master1.restart(10) - topology.master2.restart(10) - - -def test_ticket48226_1(topology): - name = 'test_entry' - dn = "cn=%s,%s" % (name, SUFFIX) - - topology.master1.add_s(Entry((dn, {'objectclass': "top person".split(), - 'sn': name, - 'cn': name}))) - - # First do an update that is replicated - mods = [(ldap.MOD_ADD, 'description', '5')] - topology.master1.modify_s(dn, mods) - - nbtry = 0 - while (nbtry <= 10): - try: - ent = topology.master2.getEntry(dn, ldap.SCOPE_BASE, "(objectclass=*)", ['description']) - if ent.hasAttr('description') and ent.getValue('description') == '5': - break - except ldap.NO_SUCH_OBJECT: - pass - nbtry = nbtry + 1 - time.sleep(1) - assert nbtry <= 10 - - # Stop M2 so that it will not receive the next update - topology.master2.stop(10) - - # ADD a new value that is not replicated - mods = [(ldap.MOD_DELETE, 'description', '5')] - topology.master1.modify_s(dn, mods) - - # Stop M1 so that it will keep del '5' that is unknown from master2 - topology.master1.stop(10) - - # Get the sbin directory so we know where to replace 'ns-slapd' - sbin_dir = get_sbin_dir(prefix=topology.master2.prefix) - - # Enable valgrind - valgrind_enable(sbin_dir) - - # start M2 to do the next updates - topology.master2.start(60) - - # ADD 'description' by '5' - mods = [(ldap.MOD_DELETE, 'description', '5')] - topology.master2.modify_s(dn, mods) - - # DEL 'description' by '5' - mods = [(ldap.MOD_ADD, 'description', '5')] - topology.master2.modify_s(dn, mods) - - # sleep of purgedelay so that the next update will purge the CSN_7 - time.sleep(6) - - # ADD 'description' by '6' that purge the state info - mods = [(ldap.MOD_ADD, 'description', '6')] - topology.master2.modify_s(dn, mods) - - # Restart master1 - topology.master1.start(10) - - # Get the results file - results_file = valgrind_get_results_file(topology.master2) - - # Stop master2 - topology.master2.stop(10) - - # Check for leak - if valgrind_check_file(results_file, VALGRIND_LEAK_STR, 'csnset_dup'): - log.info('Valgrind reported leak in csnset_dup!') - assert False - else: - log.info('Valgrind is happy!') - - # Check for invalid read/write - if valgrind_check_file(results_file, VALGRIND_INVALID_STR, 'csnset_dup'): - log.info('Valgrind reported invalid!') - assert False - else: - log.info('Valgrind is happy!') - - # Check for invalid read/write - if valgrind_check_file(results_file, VALGRIND_INVALID_STR, 'csnset_free'): - log.info('Valgrind reported invalid!') - assert False - else: - log.info('Valgrind is happy!') - - topology.master1.start(10) - log.info('Testcase PASSED') - - -if __name__ == '__main__': - # Run isolated - # -s for DEBUG mode - CURRENT_FILE = os.path.realpath(__file__) - pytest.main("-s %s" % CURRENT_FILE) - diff --git a/dirsrvtests/tickets/ticket48228_test.py b/dirsrvtests/tickets/ticket48228_test.py deleted file mode 100644 index bb20620..0000000 --- a/dirsrvtests/tickets/ticket48228_test.py +++ /dev/null @@ -1,336 +0,0 @@ -# --- BEGIN COPYRIGHT BLOCK --- -# Copyright (C) 2015 Red Hat, Inc. -# All rights reserved. -# -# License: GPL (version 3 or any later version). -# See LICENSE for details. -# --- END COPYRIGHT BLOCK --- -# -import os -import sys -import time -import ldap -import logging -import pytest -from lib389 import DirSrv, Entry, tools, tasks -from lib389.tools import DirSrvTools -from lib389._constants import * -from lib389.properties import * -from lib389.tasks import * - -log = logging.getLogger(__name__) - -installation_prefix = None - -# Assuming DEFAULT_SUFFIX is "dc=example,dc=com", otherwise it does not work... :( -SUBTREE_CONTAINER = 'cn=nsPwPolicyContainer,' + DEFAULT_SUFFIX -SUBTREE_PWPDN = 'cn=nsPwPolicyEntry,' + DEFAULT_SUFFIX -SUBTREE_PWP = 'cn=cn\3DnsPwPolicyEntry\2Cdc\3Dexample\2Cdc\3Dcom,' + SUBTREE_CONTAINER -SUBTREE_COS_TMPLDN = 'cn=nsPwTemplateEntry,' + DEFAULT_SUFFIX -SUBTREE_COS_TMPL = 'cn=cn\3DnsPwTemplateEntry\2Cdc\3Dexample\2Cdc\3Dcom,' + SUBTREE_CONTAINER -SUBTREE_COS_DEF = 'cn=nsPwPolicy_CoS,' + DEFAULT_SUFFIX - -USER1_DN = 'uid=user1,' + DEFAULT_SUFFIX -USER2_DN = 'uid=user2,' + DEFAULT_SUFFIX - - -class TopologyStandalone(object): - def __init__(self, standalone): - standalone.open() - self.standalone = standalone - - -@pytest.fixture(scope="module") -def topology(request): - ''' - This fixture is used to standalone topology for the 'module'. - ''' - global installation_prefix - - if installation_prefix: - args_instance[SER_DEPLOYED_DIR] = installation_prefix - - standalone = DirSrv(verbose=False) - - # Args for the standalone instance - args_instance[SER_HOST] = HOST_STANDALONE - args_instance[SER_PORT] = PORT_STANDALONE - args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE - args_standalone = args_instance.copy() - standalone.allocate(args_standalone) - - # Get the status of the instance and restart it if it exists - instance_standalone = standalone.exists() - - # Remove the instance - if instance_standalone: - standalone.delete() - - # Create the instance - standalone.create() - - # Used to retrieve configuration information (dbdir, confdir...) - standalone.open() - - # clear the tmp directory - standalone.clearTmpDir(__file__) - - # Here we have standalone instance up and running - return TopologyStandalone(standalone) - - -def set_global_pwpolicy(topology, inhistory): - log.info(" +++++ Enable global password policy +++++\n") - topology.standalone.simple_bind_s(DN_DM, PASSWORD) - # Enable password policy - try: - topology.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-pwpolicy-local', 'on')]) - except ldap.LDAPError as e: - log.error('Failed to set pwpolicy-local: error ' + e.message['desc']) - assert False - - log.info(" Set global password history on\n") - try: - topology.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'passwordHistory', 'on')]) - except ldap.LDAPError as e: - log.error('Failed to set passwordHistory: error ' + e.message['desc']) - assert False - - log.info(" Set global passwords in history\n") - try: - count = "%d" % inhistory - topology.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'passwordInHistory', count)]) - except ldap.LDAPError as e: - log.error('Failed to set passwordInHistory: error ' + e.message['desc']) - assert False - - -def set_subtree_pwpolicy(topology): - log.info(" +++++ Enable subtree level password policy +++++\n") - topology.standalone.simple_bind_s(DN_DM, PASSWORD) - log.info(" Add the container") - try: - topology.standalone.add_s(Entry((SUBTREE_CONTAINER, {'objectclass': 'top nsContainer'.split(), - 'cn': 'nsPwPolicyContainer'}))) - except ldap.LDAPError as e: - log.error('Failed to add subtree container: error ' + e.message['desc']) - assert False - - log.info(" Add the password policy subentry {passwordHistory: on, passwordInHistory: 6}") - try: - topology.standalone.add_s(Entry((SUBTREE_PWP, {'objectclass': 'top ldapsubentry passwordpolicy'.split(), - 'cn': SUBTREE_PWPDN, - 'passwordMustChange': 'off', - 'passwordExp': 'off', - 'passwordHistory': 'on', - 'passwordInHistory': '6', - 'passwordMinAge': '0', - 'passwordChange': 'on', - 'passwordStorageScheme': 'clear'}))) - except ldap.LDAPError as e: - log.error('Failed to add passwordpolicy: error ' + e.message['desc']) - assert False - - log.info(" Add the COS template") - try: - topology.standalone.add_s(Entry((SUBTREE_COS_TMPL, {'objectclass': 'top ldapsubentry costemplate extensibleObject'.split(), - 'cn': SUBTREE_PWPDN, - 'cosPriority': '1', - 'cn': SUBTREE_COS_TMPLDN, - 'pwdpolicysubentry': SUBTREE_PWP}))) - except ldap.LDAPError as e: - log.error('Failed to add COS template: error ' + e.message['desc']) - assert False - - log.info(" Add the COS definition") - try: - topology.standalone.add_s(Entry((SUBTREE_COS_DEF, {'objectclass': 'top ldapsubentry cosSuperDefinition cosPointerDefinition'.split(), - 'cn': SUBTREE_PWPDN, - 'costemplatedn': SUBTREE_COS_TMPL, - 'cosAttribute': 'pwdpolicysubentry default operational-default'}))) - except ldap.LDAPError as e: - log.error('Failed to add COS def: error ' + e.message['desc']) - assert False - - -def check_passwd_inhistory(topology, user, cpw, passwd): - inhistory = 0 - log.info(" Bind as {%s,%s}" % (user, cpw)) - topology.standalone.simple_bind_s(user, cpw) - try: - topology.standalone.modify_s(user, [(ldap.MOD_REPLACE, 'userpassword', passwd)]) - except ldap.LDAPError as e: - log.info(' The password ' + passwd + ' of user' + USER1_DN + ' in history: error ' + e.message['desc']) - inhistory = 1 - return inhistory - - -def update_passwd(topology, user, passwd, times): - cpw = passwd - loop = 0 - while loop < times: - log.info(" Bind as {%s,%s}" % (user, cpw)) - topology.standalone.simple_bind_s(user, cpw) - cpw = 'password%d' % loop - try: - topology.standalone.modify_s(user, [(ldap.MOD_REPLACE, 'userpassword', cpw)]) - except ldap.LDAPError as e: - log.fatal('test_ticket48228: Failed to update the password ' + cpw + ' of user ' + user + ': error ' + e.message['desc']) - assert False - loop += 1 - - # checking the first password, which is supposed to be in history - inhistory = check_passwd_inhistory(topology, user, cpw, passwd) - assert inhistory == 1 - - -def test_ticket48228_test_global_policy(topology): - """ - Check global password policy - """ - - log.info(' Set inhistory = 6') - set_global_pwpolicy(topology, 6) - - log.info(' Bind as directory manager') - log.info("Bind as %s" % DN_DM) - topology.standalone.simple_bind_s(DN_DM, PASSWORD) - - log.info(' Add an entry' + USER1_DN) - try: - topology.standalone.add_s(Entry((USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(), - 'sn': '1', - 'cn': 'user 1', - 'uid': 'user1', - 'givenname': 'user', - 'mail': 'user1@example.com', - 'userpassword': 'password'}))) - except ldap.LDAPError as e: - log.fatal('test_ticket48228: Failed to add user' + USER1_DN + ': error ' + e.message['desc']) - assert False - - log.info(' Update the password of ' + USER1_DN + ' 6 times') - update_passwd(topology, USER1_DN, 'password', 6) - - log.info(' Set inhistory = 4') - set_global_pwpolicy(topology, 4) - - log.info(' checking the first password, which is supposed NOT to be in history any more') - cpw = 'password%d' % 5 - tpw = 'password' - inhistory = check_passwd_inhistory(topology, USER1_DN, cpw, tpw) - assert inhistory == 0 - - log.info(' checking the second password, which is supposed NOT to be in history any more') - cpw = tpw - tpw = 'password%d' % 0 - inhistory = check_passwd_inhistory(topology, USER1_DN, cpw, tpw) - assert inhistory == 0 - - log.info(' checking the second password, which is supposed NOT to be in history any more') - cpw = tpw - tpw = 'password%d' % 1 - inhistory = check_passwd_inhistory(topology, USER1_DN, cpw, tpw) - assert inhistory == 0 - - log.info(' checking the third password, which is supposed to be in history') - cpw = tpw - tpw = 'password%d' % 2 - inhistory = check_passwd_inhistory(topology, USER1_DN, cpw, tpw) - assert inhistory == 1 - - log.info("Global policy was successfully verified.") - - -def test_ticket48228_test_subtree_policy(topology): - """ - Check subtree level password policy - """ - - log.info(' Set inhistory = 6') - set_subtree_pwpolicy(topology) - - log.info(' Bind as directory manager') - log.info("Bind as %s" % DN_DM) - topology.standalone.simple_bind_s(DN_DM, PASSWORD) - - log.info(' Add an entry' + USER2_DN) - try: - topology.standalone.add_s(Entry((USER2_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(), - 'sn': '2', - 'cn': 'user 2', - 'uid': 'user2', - 'givenname': 'user', - 'mail': 'user2@example.com', - 'userpassword': 'password'}))) - except ldap.LDAPError as e: - log.fatal('test_ticket48228: Failed to add user' + USER2_DN + ': error ' + e.message['desc']) - assert False - - log.info(' Update the password of ' + USER2_DN + ' 6 times') - update_passwd(topology, USER2_DN, 'password', 6) - - log.info(' Set inhistory = 4') - topology.standalone.simple_bind_s(DN_DM, PASSWORD) - try: - topology.standalone.modify_s(SUBTREE_PWP, [(ldap.MOD_REPLACE, 'passwordInHistory', '4')]) - except ldap.LDAPError as e: - log.error('Failed to set pwpolicy-local: error ' + e.message['desc']) - assert False - - log.info(' checking the first password, which is supposed NOT to be in history any more') - cpw = 'password%d' % 5 - tpw = 'password' - inhistory = check_passwd_inhistory(topology, USER2_DN, cpw, tpw) - assert inhistory == 0 - - log.info(' checking the second password, which is supposed NOT to be in history any more') - cpw = tpw - tpw = 'password%d' % 0 - inhistory = check_passwd_inhistory(topology, USER2_DN, cpw, tpw) - assert inhistory == 0 - - log.info(' checking the second password, which is supposed NOT to be in history any more') - cpw = tpw - tpw = 'password%d' % 1 - inhistory = check_passwd_inhistory(topology, USER2_DN, cpw, tpw) - assert inhistory == 0 - - log.info(' checking the third password, which is supposed to be in history') - cpw = tpw - tpw = 'password%d' % 2 - inhistory = check_passwd_inhistory(topology, USER2_DN, cpw, tpw) - assert inhistory == 1 - - log.info("Subtree level policy was successfully verified.") - - -def test_ticket48228_final(topology): - topology.standalone.delete() - log.info('Testcase PASSED') - - -def run_isolated(): - ''' - run_isolated is used to run these test cases independently of a test scheduler (xunit, py.test..) - To run isolated without py.test, you need to - - edit this file and comment '@pytest.fixture' line before 'topology' function. - - set the installation prefix - - run this program - ''' - global installation_prefix - installation_prefix = None - - topo = topology(True) - log.info('Testing Ticket 48228 - wrong password check if passwordInHistory is decreased') - - test_ticket48228_test_global_policy(topo) - - test_ticket48228_test_subtree_policy(topo) - - test_ticket48228_final(topo) - - -if __name__ == '__main__': - run_isolated() - diff --git a/dirsrvtests/tickets/ticket48233_test.py b/dirsrvtests/tickets/ticket48233_test.py deleted file mode 100644 index d9b0aae..0000000 --- a/dirsrvtests/tickets/ticket48233_test.py +++ /dev/null @@ -1,105 +0,0 @@ -import os -import sys -import time -import ldap -import logging -import pytest -from lib389 import DirSrv, Entry, tools, tasks -from lib389.tools import DirSrvTools -from lib389._constants import * -from lib389.properties import * -from lib389.tasks import * -from lib389.utils import * - -logging.getLogger(__name__).setLevel(logging.DEBUG) -log = logging.getLogger(__name__) - -installation1_prefix = None - - -class TopologyStandalone(object): - def __init__(self, standalone): - standalone.open() - self.standalone = standalone - - -@pytest.fixture(scope="module") -def topology(request): - global installation1_prefix - if installation1_prefix: - args_instance[SER_DEPLOYED_DIR] = installation1_prefix - - # Creating standalone instance ... - standalone = DirSrv(verbose=False) - args_instance[SER_HOST] = HOST_STANDALONE - args_instance[SER_PORT] = PORT_STANDALONE - args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE - args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX - args_standalone = args_instance.copy() - standalone.allocate(args_standalone) - instance_standalone = standalone.exists() - if instance_standalone: - standalone.delete() - standalone.create() - standalone.open() - - # Delete each instance in the end - def fin(): - standalone.delete() - request.addfinalizer(fin) - - # Clear out the tmp dir - standalone.clearTmpDir(__file__) - - return TopologyStandalone(standalone) - - -def test_ticket48233(topology): - """Test that ACI's that use IP restrictions do not crash the server at - shutdown - """ - - # Add aci to restrict access my ip - aci_text = ('(targetattr != "userPassword")(version 3.0;acl ' + - '"Enable anonymous access - IP"; allow (read,compare,search)' + - '(userdn = "ldap:///anyone") and (ip="127.0.0.1");)') - - try: - topology.standalone.modify_s(DEFAULT_SUFFIX, [(ldap.MOD_ADD, 'aci', aci_text)]) - except ldap.LDAPError as e: - log.error('Failed to add aci: (%s) error %s' % (aci_text, e.message['desc'])) - assert False - time.sleep(1) - - # Anonymous search to engage the aci - try: - topology.standalone.simple_bind_s("", "") - except ldap.LDAPError as e: - log.error('Failed to anonymously bind -error %s' % (e.message['desc'])) - assert False - - try: - entries = topology.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, 'objectclass=*') - if not entries: - log.fatal('Failed return an entries from search') - assert False - except ldap.LDAPError as e: - log.fatal('Search failed: ' + e.message['desc']) - assert False - - # Restart the server - topology.standalone.restart(timeout=10) - - # Check for crash - if topology.standalone.detectDisorderlyShutdown(): - log.fatal('Server crashed!') - assert False - - log.info('Test complete') - - -if __name__ == '__main__': - # Run isolated - # -s for DEBUG mode - CURRENT_FILE = os.path.realpath(__file__) - pytest.main("-s %s" % CURRENT_FILE) \ No newline at end of file diff --git a/dirsrvtests/tickets/ticket48252_test.py b/dirsrvtests/tickets/ticket48252_test.py deleted file mode 100644 index 5970d70..0000000 --- a/dirsrvtests/tickets/ticket48252_test.py +++ /dev/null @@ -1,178 +0,0 @@ -# --- BEGIN COPYRIGHT BLOCK --- -# Copyright (C) 2015 Red Hat, Inc. -# All rights reserved. -# -# License: GPL (version 3 or any later version). -# See LICENSE for details. -# --- END COPYRIGHT BLOCK --- -# -import os -import sys -import time -import ldap -import logging -import pytest -from lib389 import DirSrv, Entry, tools, tasks -from lib389.tools import DirSrvTools -from lib389._constants import * -from lib389.properties import * -from lib389.tasks import * - -log = logging.getLogger(__name__) - -installation_prefix = None - -# Assuming DEFAULT_SUFFIX is "dc=example,dc=com", otherwise it does not work... :( -USER_NUM = 10 -TEST_USER = "test_user" - -class TopologyStandalone(object): - def __init__(self, standalone): - standalone.open() - self.standalone = standalone - - -@pytest.fixture(scope="module") -def topology(request): - ''' - This fixture is used to standalone topology for the 'module'. - ''' - global installation_prefix - - if installation_prefix: - args_instance[SER_DEPLOYED_DIR] = installation_prefix - - standalone = DirSrv(verbose=False) - - # Args for the standalone instance - args_instance[SER_HOST] = HOST_STANDALONE - args_instance[SER_PORT] = PORT_STANDALONE - args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE - args_standalone = args_instance.copy() - standalone.allocate(args_standalone) - - # Get the status of the instance and restart it if it exists - instance_standalone = standalone.exists() - - # Remove the instance - if instance_standalone: - standalone.delete() - - # Create the instance - standalone.create() - - # Used to retrieve configuration information (dbdir, confdir...) - standalone.open() - - # clear the tmp directory - standalone.clearTmpDir(__file__) - - # Here we have standalone instance up and running - return TopologyStandalone(standalone) - -def test_ticket48252_setup(topology): - """ - Enable USN plug-in for enabling tombstones - Add test entries - """ - - log.info("Enable the USN plugin...") - try: - topology.standalone.plugins.enable(name=PLUGIN_USN) - except e: - log.error("Failed to enable USN Plugin: error " + e.message['desc']) - assert False - - log.info("Adding test entries...") - for id in range(USER_NUM): - name = "%s%d" % (TEST_USER, id) - topology.standalone.add_s(Entry(("cn=%s,%s" % (name, SUFFIX), { - 'objectclass': "top person".split(), - 'sn': name, - 'cn': name}))) -def in_index_file(topology, id, index): - key = "%s%s" % (TEST_USER, id) - log.info(" dbscan - checking %s is in index file %s..." % (key, index)) - dbscanOut = topology.standalone.dbscan(DEFAULT_BENAME, index) - - if key in dbscanOut: - found = True - topology.standalone.log.info("Found key %s in dbscan output" % key) - else: - found = False - topology.standalone.log.info("Did not found key %s in dbscan output" % key) - - return found - -def test_ticket48252_run_0(topology): - """ - Delete an entry cn=test_entry0 - Check it is not in the 'cn' index file - """ - log.info("Case 1 - Check deleted entry is not in the 'cn' index file") - del_rdn = "cn=%s0" % TEST_USER - del_entry = "%s,%s" % (del_rdn, SUFFIX) - log.info(" Deleting a test entry %s..." % del_entry) - topology.standalone.delete_s(del_entry) - - assert in_index_file(topology, 0, 'cn') == False - - log.info(" db2index - reindexing %s ..." % 'cn') - assert topology.standalone.db2index(DEFAULT_BENAME, 'cn') - - assert in_index_file(topology, 0, 'cn') == False - log.info(" entry %s is not in the cn index file after reindexed." % del_entry) - log.info('Case 1 - PASSED') - -def test_ticket48252_run_1(topology): - """ - Delete an entry cn=test_entry1 - Check it is in the 'objectclass' index file as a tombstone entry - """ - log.info("Case 2 - Check deleted entry is in the 'objectclass' index file as a tombstone entry") - del_rdn = "cn=%s1" % TEST_USER - del_entry = "%s,%s" % (del_rdn, SUFFIX) - log.info(" Deleting a test entry %s..." % del_entry) - topology.standalone.delete_s(del_entry) - - entry = topology.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, '(&(objectclass=nstombstone)(%s))' % del_rdn) - assert len(entry) == 1 - log.info(" entry %s is in the objectclass index file." % del_entry) - - log.info(" db2index - reindexing %s ..." % 'objectclass') - assert topology.standalone.db2index(DEFAULT_BENAME, 'objectclass') - - entry = topology.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, '(&(objectclass=nstombstone)(%s))' % del_rdn) - assert len(entry) == 1 - log.info(" entry %s is in the objectclass index file after reindexed." % del_entry) - log.info('Case 2 - PASSED') - -def test_ticket48252_final(topology): - topology.standalone.delete() - log.info('Testing Ticket 48252 - PASSED.') - -def run_isolated(): - ''' - run_isolated is used to run these test cases independently of a test scheduler (xunit, py.test..) - To run isolated without py.test, you need to - - edit this file and comment '@pytest.fixture' line before 'topology' function. - - set the installation prefix - - run this program - ''' - global installation_prefix - installation_prefix = None - - topo = topology(True) - log.info('Testing Ticket 48252 - db2index creates index entry from deleted records') - - test_ticket48252_setup(topo) - - test_ticket48252_run_0(topo) - test_ticket48252_run_1(topo) - - test_ticket48252_final(topo) - - -if __name__ == '__main__': - run_isolated() - diff --git a/dirsrvtests/tickets/ticket48265_test.py b/dirsrvtests/tickets/ticket48265_test.py deleted file mode 100644 index fb695c5..0000000 --- a/dirsrvtests/tickets/ticket48265_test.py +++ /dev/null @@ -1,130 +0,0 @@ -# --- BEGIN COPYRIGHT BLOCK --- -# Copyright (C) 2015 Red Hat, Inc. -# All rights reserved. -# -# License: GPL (version 3 or any later version). -# See LICENSE for details. -# --- END COPYRIGHT BLOCK --- -# -import os -import sys -import time -import ldap -import logging -import pytest -import threading -from lib389 import DirSrv, Entry, tools, tasks -from lib389.tools import DirSrvTools -from lib389._constants import * -from lib389.properties import * -from lib389.tasks import * -from lib389.utils import * - -logging.getLogger(__name__).setLevel(logging.DEBUG) -log = logging.getLogger(__name__) - -installation1_prefix = None - -USER_NUM = 20 -TEST_USER = 'test_user' - -class TopologyStandalone(object): - def __init__(self, standalone): - standalone.open() - self.standalone = standalone - - -@pytest.fixture(scope="module") -def topology(request): - global installation1_prefix - if installation1_prefix: - args_instance[SER_DEPLOYED_DIR] = installation1_prefix - - # Creating standalone instance ... - standalone = DirSrv(verbose=False) - args_instance[SER_HOST] = HOST_STANDALONE - args_instance[SER_PORT] = PORT_STANDALONE - args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE - args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX - args_standalone = args_instance.copy() - standalone.allocate(args_standalone) - instance_standalone = standalone.exists() - if instance_standalone: - standalone.delete() - standalone.create() - standalone.open() - - return TopologyStandalone(standalone) - - -def test_ticket48265_test(topology): - """ - Complex filter issues - Ticket 47521 type complex filter: - (&(|(uid=tuser*)(cn=Test user*))(&(givenname=test*3))(mail=tuser@example.com)(&(description=*))) - Ticket 48264 type complex filter: - (&(&(|(l=EU)(l=AP)(l=NA))(|(c=SE)(c=DE)))(|(uid=*test*)(cn=*test*))(l=eu)) - """ - - log.info("Adding %d test entries..." % USER_NUM) - for id in range(USER_NUM): - name = "%s%d" % (TEST_USER, id) - mail = "%s@example.com" % name - secretary = "cn=%s,ou=secretary,%s" % (name, SUFFIX) - topology.standalone.add_s(Entry(("cn=%s,%s" % (name, SUFFIX), { - 'objectclass': "top person organizationalPerson inetOrgPerson".split(), - 'sn': name, - 'cn': name, - 'uid': name, - 'givenname': 'test', - 'mail': mail, - 'description': 'description', - 'secretary': secretary, - 'l': 'MV', - 'title': 'Engineer'}))) - - log.info("Search with Ticket 47521 type complex filter") - for id in range(USER_NUM): - name = "%s%d" % (TEST_USER, id) - mail = "%s@example.com" % name - filter47521 = '(&(|(uid=%s*)(cn=%s*))(&(givenname=test))(mail=%s)(&(description=*)))' % (TEST_USER, TEST_USER, mail) - entry = topology.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, filter47521) - assert len(entry) == 1 - - log.info("Search with Ticket 48265 type complex filter") - for id in range(USER_NUM): - name = "%s%d" % (TEST_USER, id) - mail = "%s@example.com" % name - filter48265 = '(&(&(|(l=AA)(l=BB)(l=MV))(|(title=admin)(title=engineer)))(|(uid=%s)(mail=%s))(description=description))' % (name, mail) - entry = topology.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, filter48265) - assert len(entry) == 1 - - log.info('Test 48265 complete\n') - - -def test_ticket48265_final(topology): - topology.standalone.delete() - log.info('Testcase PASSED') - - -def run_isolated(): - ''' - run_isolated is used to run these test cases independently of a test scheduler (xunit, py.test..) - To run isolated without py.test, you need to - - edit this file and comment '@pytest.fixture' line before 'topology' function. - - set the installation prefix - - run this program - ''' - global installation1_prefix - installation1_prefix = None - - topo = topology(True) - log.info('Testing Ticket 48265 - Complex filter in a search request does not work as expected') - - test_ticket48265_test(topo) - - test_ticket48265_final(topo) - - -if __name__ == '__main__': - run_isolated() diff --git a/dirsrvtests/tickets/ticket48312_test.py b/dirsrvtests/tickets/ticket48312_test.py deleted file mode 100644 index 0989279..0000000 --- a/dirsrvtests/tickets/ticket48312_test.py +++ /dev/null @@ -1,168 +0,0 @@ -import os -import sys -import time -import ldap -import logging -import pytest -from lib389 import DirSrv, Entry, tools, tasks -from lib389.tools import DirSrvTools -from lib389._constants import * -from lib389.properties import * -from lib389.tasks import * -from lib389.utils import * - -logging.getLogger(__name__).setLevel(logging.DEBUG) -log = logging.getLogger(__name__) - -installation1_prefix = None - - -class TopologyStandalone(object): - def __init__(self, standalone): - standalone.open() - self.standalone = standalone - - -@pytest.fixture(scope="module") -def topology(request): - global installation1_prefix - if installation1_prefix: - args_instance[SER_DEPLOYED_DIR] = installation1_prefix - - # Creating standalone instance ... - standalone = DirSrv(verbose=False) - args_instance[SER_HOST] = HOST_STANDALONE - args_instance[SER_PORT] = PORT_STANDALONE - args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE - args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX - args_standalone = args_instance.copy() - standalone.allocate(args_standalone) - instance_standalone = standalone.exists() - if instance_standalone: - standalone.delete() - standalone.create() - standalone.open() - - # Delete each instance in the end - def fin(): - standalone.delete() - - request.addfinalizer(fin) - - # Clear out the tmp dir - standalone.clearTmpDir(__file__) - - return TopologyStandalone(standalone) - - -def test_ticket48312(topology): - """ - Configure managed entries plugins(tempalte/definition), then perform a - modrdn(deleteoldrdn 1), and make sure the server does not crash. - """ - - GROUP_OU = 'ou=groups,' + DEFAULT_SUFFIX - PEOPLE_OU = 'ou=people,' + DEFAULT_SUFFIX - USER_DN = 'uid=user1,ou=people,' + DEFAULT_SUFFIX - CONFIG_DN = 'cn=config,cn=' + PLUGIN_MANAGED_ENTRY + ',cn=plugins,cn=config' - TEMPLATE_DN = 'cn=MEP Template,' + DEFAULT_SUFFIX - USER_NEWRDN = 'uid=\+user1' - - # - # First enable dynamic plugins - # - try: - topology.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-dynamic-plugins', 'on')]) - except ldap.LDAPError as e: - ldap.fatal('Failed to enable dynamic plugin!' + e.message['desc']) - assert False - topology.standalone.plugins.enable(name=PLUGIN_MANAGED_ENTRY) - - # - # Add our org units (they should already exist, but do it just in case) - # - try: - topology.standalone.add_s(Entry((PEOPLE_OU, { - 'objectclass': 'top extensibleObject'.split(), - 'ou': 'people'}))) - except ldap.ALREADY_EXISTS: - pass - except ldap.LDAPError as e: - log.fatal('test_mep: Failed to add people org unit: error ' + e.message['desc']) - assert False - - try: - topology.standalone.add_s(Entry((GROUP_OU, { - 'objectclass': 'top extensibleObject'.split(), - 'ou': 'people'}))) - except ldap.ALREADY_EXISTS: - pass - except ldap.LDAPError as e: - log.fatal('test_mep: Failed to add people org unit: error ' + e.message['desc']) - assert False - - # - # Add the template entry - # - try: - topology.standalone.add_s(Entry((TEMPLATE_DN, { - 'objectclass': 'top mepTemplateEntry extensibleObject'.split(), - 'cn': 'MEP Template', - 'mepRDNAttr': 'cn', - 'mepStaticAttr': ['objectclass: posixGroup', 'objectclass: extensibleObject'], - 'mepMappedAttr': ['cn: $uid', 'uid: $cn', 'gidNumber: $uidNumber'] - }))) - except ldap.LDAPError as e: - log.fatal('test_mep: Failed to add template entry: error ' + e.message['desc']) - assert False - - # - # Add the definition entry - # - try: - topology.standalone.add_s(Entry((CONFIG_DN, { - 'objectclass': 'top extensibleObject'.split(), - 'cn': 'config', - 'originScope': PEOPLE_OU, - 'originFilter': 'objectclass=posixAccount', - 'managedBase': GROUP_OU, - 'managedTemplate': TEMPLATE_DN - }))) - except ldap.LDAPError as e: - log.fatal('test_mep: Failed to add config entry: error ' + e.message['desc']) - assert False - - # - # Add an entry that meets the MEP scope - # - try: - topology.standalone.add_s(Entry((USER_DN, { - 'objectclass': 'top posixAccount extensibleObject'.split(), - 'uid': 'user1', - 'cn': 'user1', - 'uidNumber': '1', - 'gidNumber': '1', - 'homeDirectory': '/home/user1', - 'description': 'uiser description' - }))) - except ldap.LDAPError as e: - log.fatal('test_mep: Failed to user1: error ' + e.message['desc']) - assert False - - # - # Perform a modrdn on USER_DN - # - try: - topology.standalone.rename_s(USER_DN, USER_NEWRDN, delold=1) - except ldap.LDAPError as e: - log.error('Failed to modrdn: error ' + e.message['desc']) - assert False - - log.info('Test complete') - - -if __name__ == '__main__': - # Run isolated - # -s for DEBUG mode - CURRENT_FILE = os.path.realpath(__file__) - pytest.main("-s %s" % CURRENT_FILE) \ No newline at end of file diff --git a/dirsrvtests/tickets/ticket48325_test.py b/dirsrvtests/tickets/ticket48325_test.py deleted file mode 100644 index 3505d1a..0000000 --- a/dirsrvtests/tickets/ticket48325_test.py +++ /dev/null @@ -1,270 +0,0 @@ -import os -import sys -import time -import ldap -import logging -import pytest -from lib389 import DirSrv, Entry, tools, tasks -from lib389.tools import DirSrvTools -from lib389._constants import * -from lib389.properties import * -from lib389.tasks import * -from lib389.utils import * - -logging.getLogger(__name__).setLevel(logging.DEBUG) -log = logging.getLogger(__name__) - -installation1_prefix = None - - -class TopologyReplication(object): - def __init__(self, master1, hub1, consumer1): - master1.open() - self.master1 = master1 - hub1.open() - self.hub1 = hub1 - consumer1.open() - self.consumer1 = consumer1 - - -@pytest.fixture(scope="module") -def topology(request): - global installation1_prefix - if installation1_prefix: - args_instance[SER_DEPLOYED_DIR] = installation1_prefix - - # Creating master 1... - master1 = DirSrv(verbose=False) - args_instance[SER_HOST] = HOST_MASTER_1 - args_instance[SER_PORT] = PORT_MASTER_1 - args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1 - args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX - args_master = args_instance.copy() - master1.allocate(args_master) - instance_master1 = master1.exists() - if instance_master1: - master1.delete() - master1.create() - master1.open() - master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, - replicaId=REPLICAID_MASTER_1) - - # Creating hub 1... - hub1 = DirSrv(verbose=False) - args_instance[SER_HOST] = HOST_HUB_1 - args_instance[SER_PORT] = PORT_HUB_1 - args_instance[SER_SERVERID_PROP] = SERVERID_HUB_1 - args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX - args_hub = args_instance.copy() - hub1.allocate(args_hub) - instance_hub1 = hub1.exists() - if instance_hub1: - hub1.delete() - hub1.create() - hub1.open() - hub1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_HUB, - replicaId=REPLICAID_HUB_1) - - # Creating consumer 1... - consumer1 = DirSrv(verbose=False) - args_instance[SER_HOST] = HOST_CONSUMER_1 - args_instance[SER_PORT] = PORT_CONSUMER_1 - args_instance[SER_SERVERID_PROP] = SERVERID_CONSUMER_1 - args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX - args_consumer = args_instance.copy() - consumer1.allocate(args_consumer) - instance_consumer1 = consumer1.exists() - if instance_consumer1: - consumer1.delete() - consumer1.create() - consumer1.open() - consumer1.changelog.create() - consumer1.replica.enableReplication(suffix=SUFFIX, - role=REPLICAROLE_CONSUMER, - replicaId=CONSUMER_REPLICAID) - - # - # Create all the agreements - # - # Creating agreement from master 1 to hub 1 - properties = {RA_NAME: r'meTo_$host:$port', - RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], - RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], - RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], - RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} - m1_h1_agmt = master1.agreement.create(suffix=SUFFIX, host=hub1.host, - port=hub1.port, - properties=properties) - if not m1_h1_agmt: - log.fatal("Fail to create a master -> hub replica agreement") - sys.exit(1) - log.debug("%s created" % m1_h1_agmt) - - # Creating agreement from hub 1 to consumer 1 - properties = {RA_NAME: r'meTo_$host:$port', - RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], - RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], - RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], - RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} - h1_c1_agmt = hub1.agreement.create(suffix=SUFFIX, host=consumer1.host, - port=consumer1.port, - properties=properties) - if not h1_c1_agmt: - log.fatal("Fail to create a hub -> consumer replica agreement") - sys.exit(1) - log.debug("%s created" % h1_c1_agmt) - - # Allow the replicas to get situated with the new agreements... - time.sleep(5) - - # - # Initialize all the agreements - # - master1.agreement.init(SUFFIX, HOST_HUB_1, PORT_HUB_1) - master1.waitForReplInit(m1_h1_agmt) - hub1.agreement.init(SUFFIX, HOST_CONSUMER_1, PORT_CONSUMER_1) - hub1.waitForReplInit(h1_c1_agmt) - - # Check replication is working... - if master1.testReplication(DEFAULT_SUFFIX, consumer1): - log.info('Replication is working.') - else: - log.fatal('Replication is not working.') - assert False - - # Delete each instance in the end - def fin(): - master1.delete() - hub1.delete() - consumer1.delete() - pass - - request.addfinalizer(fin) - - # Clear out the tmp dir - master1.clearTmpDir(__file__) - - return TopologyReplication(master1, hub1, consumer1) - - -def checkFirstElement(ds, rid): - """ - Return True if the first RUV element is for the specified rid - """ - try: - entry = ds.search_s(DEFAULT_SUFFIX, - ldap.SCOPE_SUBTREE, - REPLICA_RUV_FILTER, - ['nsds50ruv']) - assert entry - entry = entry[0] - except ldap.LDAPError as e: - log.fatal('Failed to retrieve RUV entry: %s' % str(e)) - assert False - - ruv_elements = entry.getValues('nsds50ruv') - if ('replica %s ' % rid) in ruv_elements[1]: - return True - else: - return False - - -def test_ticket48325(topology): - """ - Test that the RUV element order is correctly maintained when promoting - a hub or consumer. - """ - - # - # Promote consumer to master - # - try: - DN = topology.consumer1.replica._get_mt_entry(DEFAULT_SUFFIX) - topology.consumer1.modify_s(DN, [(ldap.MOD_REPLACE, - 'nsDS5ReplicaType', - '3'), - (ldap.MOD_REPLACE, - 'nsDS5ReplicaID', - '1234'), - (ldap.MOD_REPLACE, - 'nsDS5Flags', - '1')]) - except ldap.LDAPError as e: - log.fatal('Failed to promote consuemr to master: error %s' % str(e)) - assert False - time.sleep(1) - - # - # Check ruv has been reordered - # - if not checkFirstElement(topology.consumer1, '1234'): - log.fatal('RUV was not reordered') - assert False - - # - # Create repl agreement from the newly promoted master to master1 - # - properties = {RA_NAME: r'meTo_$host:$port', - RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], - RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], - RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], - RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} - new_agmt = topology.consumer1.agreement.create(suffix=SUFFIX, - host=topology.master1.host, - port=topology.master1.port, - properties=properties) - - if not new_agmt: - log.fatal("Fail to create new agmt from old consumer to the master") - assert False - - # - # Test replication is working - # - if topology.consumer1.testReplication(DEFAULT_SUFFIX, topology.master1): - log.info('Replication is working.') - else: - log.fatal('Replication is not working.') - assert False - - # - # Promote hub to master - # - try: - DN = topology.hub1.replica._get_mt_entry(DEFAULT_SUFFIX) - topology.hub1.modify_s(DN, [(ldap.MOD_REPLACE, - 'nsDS5ReplicaType', - '3'), - (ldap.MOD_REPLACE, - 'nsDS5ReplicaID', - '5678')]) - except ldap.LDAPError as e: - log.fatal('Failed to promote consuemr to master: error %s' % str(e)) - assert False - time.sleep(1) - - # - # Check ruv has been reordered - # - if not checkFirstElement(topology.hub1, '5678'): - log.fatal('RUV was not reordered') - assert False - - # - # Test replication is working - # - if topology.hub1.testReplication(DEFAULT_SUFFIX, topology.master1): - log.info('Replication is working.') - else: - log.fatal('Replication is not working.') - assert False - - # Done - log.info('Test complete') - - -if __name__ == '__main__': - # Run isolated - # -s for DEBUG mode - CURRENT_FILE = os.path.realpath(__file__) - pytest.main("-s %s" % CURRENT_FILE) \ No newline at end of file diff --git a/dirsrvtests/tickets/ticket48362_test.py b/dirsrvtests/tickets/ticket48362_test.py deleted file mode 100644 index 1b5651f..0000000 --- a/dirsrvtests/tickets/ticket48362_test.py +++ /dev/null @@ -1,278 +0,0 @@ -import os -import sys -import time -import ldap -import logging -import pytest -from lib389 import DirSrv, Entry, tools, tasks -from lib389.tools import DirSrvTools -from lib389._constants import * -from lib389.properties import * -from lib389.tasks import * -from lib389.utils import * - -logging.getLogger(__name__).setLevel(logging.DEBUG) -log = logging.getLogger(__name__) - -installation1_prefix = None - - -PEOPLE_OU='people' -PEOPLE_DN = "ou=%s,%s" % (PEOPLE_OU, SUFFIX) -MAX_ACCOUNTS=5 - -BINDMETHOD_ATTR = 'dnaRemoteBindMethod' -BINDMETHOD_VALUE = "SASL/GSSAPI" -PROTOCOLE_ATTR = 'dnaRemoteConnProtocol' -PROTOCOLE_VALUE = 'LDAP' - -class TopologyReplication(object): - def __init__(self, master1, master2): - master1.open() - self.master1 = master1 - master2.open() - self.master2 = master2 - - -#@pytest.fixture(scope="module") -def topology(request): - global installation1_prefix - if installation1_prefix: - args_instance[SER_DEPLOYED_DIR] = installation1_prefix - - # Creating master 1... - master1 = DirSrv(verbose=False) - if installation1_prefix: - args_instance[SER_DEPLOYED_DIR] = installation1_prefix - args_instance[SER_HOST] = HOST_MASTER_1 - args_instance[SER_PORT] = PORT_MASTER_1 - args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1 - args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX - args_master = args_instance.copy() - master1.allocate(args_master) - instance_master1 = master1.exists() - if instance_master1: - master1.delete() - master1.create() - master1.open() - master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_1) - - # Creating master 2... - master2 = DirSrv(verbose=False) - if installation1_prefix: - args_instance[SER_DEPLOYED_DIR] = installation1_prefix - args_instance[SER_HOST] = HOST_MASTER_2 - args_instance[SER_PORT] = PORT_MASTER_2 - args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_2 - args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX - args_master = args_instance.copy() - master2.allocate(args_master) - instance_master2 = master2.exists() - if instance_master2: - master2.delete() - master2.create() - master2.open() - master2.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_2) - - # - # Create all the agreements - # - # Creating agreement from master 1 to master 2 - properties = {RA_NAME: r'meTo_$host:$port', - RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], - RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], - RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], - RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} - m1_m2_agmt = master1.agreement.create(suffix=SUFFIX, host=master2.host, port=master2.port, properties=properties) - if not m1_m2_agmt: - log.fatal("Fail to create a master -> master replica agreement") - sys.exit(1) - log.debug("%s created" % m1_m2_agmt) - - # Creating agreement from master 2 to master 1 - properties = {RA_NAME: r'meTo_$host:$port', - RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], - RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], - RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], - RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} - m2_m1_agmt = master2.agreement.create(suffix=SUFFIX, host=master1.host, port=master1.port, properties=properties) - if not m2_m1_agmt: - log.fatal("Fail to create a master -> master replica agreement") - sys.exit(1) - log.debug("%s created" % m2_m1_agmt) - - # Allow the replicas to get situated with the new agreements... - time.sleep(5) - - # - # Initialize all the agreements - # - master1.agreement.init(SUFFIX, HOST_MASTER_2, PORT_MASTER_2) - master1.waitForReplInit(m1_m2_agmt) - - # Check replication is working... - if master1.testReplication(DEFAULT_SUFFIX, master2): - log.info('Replication is working.') - else: - log.fatal('Replication is not working.') - assert False - - # Delete each instance in the end - def fin(): - master1.delete() - master2.delete() - #request.addfinalizer(fin) - - # Clear out the tmp dir - master1.clearTmpDir(__file__) - - return TopologyReplication(master1, master2) - - -def _dna_config(server, nextValue=500, maxValue=510): - log.info("Add dna plugin config entry...%s" % server) - - cfg_base_dn = 'cn=dna config,cn=Distributed Numeric Assignment Plugin,cn=plugins,cn=config' - - try: - server.add_s(Entry((cfg_base_dn, { - 'objectclass': 'top dnaPluginConfig'.split(), - 'dnaType': 'description', - 'dnaMagicRegen': '-1', - 'dnaFilter': '(objectclass=posixAccount)', - 'dnaScope': 'ou=people,%s' % SUFFIX, - 'dnaNextValue': str(nextValue), - 'dnaMaxValue' : str(nextValue+maxValue), - 'dnaSharedCfgDN': 'ou=ranges,%s' % SUFFIX - }))) - - except ldap.LDAPError as e: - log.error('Failed to add DNA config entry: error ' + e.message['desc']) - assert False - - log.info("Enable the DNA plugin...") - try: - server.plugins.enable(name=PLUGIN_DNA) - except e: - log.error("Failed to enable DNA Plugin: error " + e.message['desc']) - assert False - - log.info("Restarting the server...") - server.stop(timeout=120) - time.sleep(1) - server.start(timeout=120) - time.sleep(3) - - -SHARE_CFG_BASE = 'ou=ranges,' + SUFFIX - -def _wait_shared_cfg_servers(server, expected): - attempts = 0 - ents = [] - try: - ents = server.search_s(SHARE_CFG_BASE, ldap.SCOPE_ONELEVEL, "(objectclass=*)") - except ldap.NO_SUCH_OBJECT: - pass - except lib389.NoSuchEntryError: - pass - while (len(ents) != expected): - assert attempts < 10 - time.sleep(5) - try: - ents = server.search_s(SHARE_CFG_BASE, ldap.SCOPE_ONELEVEL, "(objectclass=*)") - except ldap.NO_SUCH_OBJECT: - pass - except lib389.NoSuchEntryError: - pass - -def _shared_cfg_server_update(server, method=BINDMETHOD_VALUE, transport=PROTOCOLE_VALUE): - log.info('\n======================== Update dnaPortNum=%d ============================\n'% server.port) - try: - ent = server.getEntry(SHARE_CFG_BASE, ldap.SCOPE_ONELEVEL, "(dnaPortNum=%d)" % server.port) - mod = [(ldap.MOD_REPLACE, BINDMETHOD_ATTR, method), - (ldap.MOD_REPLACE, PROTOCOLE_ATTR, transport)] - server.modify_s(ent.dn, mod) - - log.info('\n======================== Update done\n') - ent = server.getEntry(SHARE_CFG_BASE, ldap.SCOPE_ONELEVEL, "(dnaPortNum=%d)" % server.port) - except ldap.NO_SUCH_OBJECT: - log.fatal("Unknown host") - assert False - - -def test_ticket48362(topology): - """Write your replication testcase here. - - To access each DirSrv instance use: topology.master1, topology.master2, - ..., topology.hub1, ..., topology.consumer1, ... - - Also, if you need any testcase initialization, - please, write additional fixture for that(include finalizer). - """ - - try: - topology.master1.add_s(Entry((PEOPLE_DN, { - 'objectclass': "top extensibleObject".split(), - 'ou': 'people'}))) - except ldap.ALREADY_EXISTS: - pass - - topology.master1.add_s(Entry((SHARE_CFG_BASE, { - 'objectclass': 'top organizationalunit'.split(), - 'ou': 'ranges' - }))) - # master 1 will have a valid remaining range (i.e. 101) - # master 2 will not have a valid remaining range (i.e. 0) so dna servers list on master2 - # will not contain master 2. So at restart, master 2 is recreated without the method/protocol attribute - _dna_config(topology.master1, nextValue=1000, maxValue=100) - _dna_config(topology.master2, nextValue=2000, maxValue=-1) - - # check we have all the servers available - _wait_shared_cfg_servers(topology.master1, 2) - _wait_shared_cfg_servers(topology.master2, 2) - - # now force the method/transport on the servers entry - _shared_cfg_server_update(topology.master1) - _shared_cfg_server_update(topology.master2) - - - - log.info('\n======================== BEFORE RESTART ============================\n') - ent = topology.master1.getEntry(SHARE_CFG_BASE, ldap.SCOPE_ONELEVEL, "(dnaPortNum=%d)" % topology.master1.port) - log.info('\n======================== BEFORE RESTART ============================\n') - assert(ent.hasAttr(BINDMETHOD_ATTR) and ent.getValue(BINDMETHOD_ATTR) == BINDMETHOD_VALUE) - assert(ent.hasAttr(PROTOCOLE_ATTR) and ent.getValue(PROTOCOLE_ATTR) == PROTOCOLE_VALUE) - - - ent = topology.master1.getEntry(SHARE_CFG_BASE, ldap.SCOPE_ONELEVEL, "(dnaPortNum=%d)" % topology.master2.port) - log.info('\n======================== BEFORE RESTART ============================\n') - assert(ent.hasAttr(BINDMETHOD_ATTR) and ent.getValue(BINDMETHOD_ATTR) == BINDMETHOD_VALUE) - assert(ent.hasAttr(PROTOCOLE_ATTR) and ent.getValue(PROTOCOLE_ATTR) == PROTOCOLE_VALUE) - topology.master1.restart(10) - topology.master2.restart(10) - - # to allow DNA plugin to recreate the local host entry - time.sleep(40) - - log.info('\n=================== AFTER RESTART =================================\n') - ent = topology.master1.getEntry(SHARE_CFG_BASE, ldap.SCOPE_ONELEVEL, "(dnaPortNum=%d)" % topology.master1.port) - log.info('\n=================== AFTER RESTART =================================\n') - assert(ent.hasAttr(BINDMETHOD_ATTR) and ent.getValue(BINDMETHOD_ATTR) == BINDMETHOD_VALUE) - assert(ent.hasAttr(PROTOCOLE_ATTR) and ent.getValue(PROTOCOLE_ATTR) == PROTOCOLE_VALUE) - - ent = topology.master1.getEntry(SHARE_CFG_BASE, ldap.SCOPE_ONELEVEL, "(dnaPortNum=%d)" % topology.master2.port) - log.info('\n=================== AFTER RESTART =================================\n') - assert(ent.hasAttr(BINDMETHOD_ATTR) and ent.getValue(BINDMETHOD_ATTR) == BINDMETHOD_VALUE) - assert(ent.hasAttr(PROTOCOLE_ATTR) and ent.getValue(PROTOCOLE_ATTR) == PROTOCOLE_VALUE) - log.info('Test complete') - - -if __name__ == '__main__': - # Run isolated - # -s for DEBUG mode - global installation1_prefix - installation1_prefix='/home/tbordaz/install_1.3.4' - topo = topology(True) - test_ticket48362(topo) -# CURRENT_FILE = os.path.realpath(__file__) -# pytest.main("-s %s" % CURRENT_FILE) \ No newline at end of file diff --git a/dirsrvtests/tickets/ticket48369_test.py b/dirsrvtests/tickets/ticket48369_test.py deleted file mode 100644 index 0b65fa2..0000000 --- a/dirsrvtests/tickets/ticket48369_test.py +++ /dev/null @@ -1,124 +0,0 @@ -import os -import time -import ldap -import logging -import pytest -from lib389 import DirSrv, Entry -from lib389._constants import * -from lib389.properties import * -from lib389.tasks import * -from lib389.utils import * -from ldap.controls.ppolicy import PasswordPolicyControl - - -logging.getLogger(__name__).setLevel(logging.DEBUG) -log = logging.getLogger(__name__) - -installation1_prefix = None - - -class TopologyStandalone(object): - def __init__(self, standalone): - standalone.open() - self.standalone = standalone - - -@pytest.fixture(scope="module") -def topology(request): - global installation1_prefix - if installation1_prefix: - args_instance[SER_DEPLOYED_DIR] = installation1_prefix - - # Creating standalone instance ... - standalone = DirSrv(verbose=False) - args_instance[SER_HOST] = HOST_STANDALONE - args_instance[SER_PORT] = PORT_STANDALONE - args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE - args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX - args_standalone = args_instance.copy() - standalone.allocate(args_standalone) - instance_standalone = standalone.exists() - if instance_standalone: - standalone.delete() - standalone.create() - standalone.open() - - # Delete each instance in the end - def fin(): - standalone.delete() - - request.addfinalizer(fin) - - # Clear out the tmp dir - standalone.clearTmpDir(__file__) - - return TopologyStandalone(standalone) - - -def test_ticket48369(topology): - """ - Test RFE 48369 - return password policy controls by default without needing - to be requested. - """ - - DN = 'uid=test,' + DEFAULT_SUFFIX - - # - # Setup password policy - # - try: - topology.standalone.modify_s('cn=config', [(ldap.MOD_REPLACE, - 'passwordExp', - 'on'), - (ldap.MOD_REPLACE, - 'passwordMaxAge', - '864000'), - (ldap.MOD_REPLACE, - 'passwordSendExpiringTime', - 'on')]) - except ldap.LDAPError as e: - log.fatal('Failed to set config: %s' % str(e)) - assert False - - # - # Add entry - # - try: - topology.standalone.add_s(Entry((DN, - {'objectclass': 'top extensibleObject'.split(), - 'uid': 'test', - 'userpassword': 'password'}))) - except ldap.LDAPError as e: - log.fatal('Failed to add user entry: %s' % str(e)) - assert False - time.sleep(1) - - # - # Bind as the new user, and request the control - # - try: - msgid = topology.standalone.simple_bind(DN, "password", - serverctrls=[PasswordPolicyControl()]) - res_type, res_data, res_msgid, res_ctrls = \ - topology.standalone.result3(msgid) - except ldap.LDAPError as e: - log.fatal('Failed to bind: %s: Error %s' % (ctl_resp, str(e))) - assert False - - if res_ctrls[0].controlType == PasswordPolicyControl.controlType: - ppolicy_ctrl = res_ctrls[0] - else: - log.fatal('Control not found') - assert False - - log.info('Time until expiration (%s)' % - repr(ppolicy_ctrl.timeBeforeExpiration)) - - log.info('Test complete') - - -if __name__ == '__main__': - # Run isolated - # -s for DEBUG mode - CURRENT_FILE = os.path.realpath(__file__) - pytest.main("-s %s" % CURRENT_FILE) \ No newline at end of file diff --git a/dirsrvtests/tickets/ticket48370_test.py b/dirsrvtests/tickets/ticket48370_test.py deleted file mode 100644 index f5b1f47..0000000 --- a/dirsrvtests/tickets/ticket48370_test.py +++ /dev/null @@ -1,236 +0,0 @@ -import os -import ldap -import logging -import pytest -from lib389 import DirSrv, Entry -from lib389._constants import * -from lib389.properties import * -from lib389.tasks import * -from lib389.utils import * - -logging.getLogger(__name__).setLevel(logging.DEBUG) -log = logging.getLogger(__name__) - -installation1_prefix = None - - -class TopologyStandalone(object): - def __init__(self, standalone): - standalone.open() - self.standalone = standalone - - -@pytest.fixture(scope="module") -def topology(request): - global installation1_prefix - if installation1_prefix: - args_instance[SER_DEPLOYED_DIR] = installation1_prefix - - # Creating standalone instance ... - standalone = DirSrv(verbose=False) - args_instance[SER_HOST] = HOST_STANDALONE - args_instance[SER_PORT] = PORT_STANDALONE - args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE - args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX - args_standalone = args_instance.copy() - standalone.allocate(args_standalone) - instance_standalone = standalone.exists() - if instance_standalone: - standalone.delete() - standalone.create() - standalone.open() - - # Delete each instance in the end - def fin(): - standalone.delete() - request.addfinalizer(fin) - - # Clear out the tmp dir - standalone.clearTmpDir(__file__) - - return TopologyStandalone(standalone) - - -def test_ticket48370(topology): - """ - Deleting attirbute values and readding a value does not properly update - the pres index. The values are not actually deleted from the index - """ - - DN = 'uid=user0099,' + DEFAULT_SUFFIX - - # - # Add an entry - # - topology.standalone.add_s(Entry((DN, { - 'objectclass': ['top', 'person', - 'organizationalPerson', - 'inetorgperson', - 'posixAccount'], - 'givenname': 'test', - 'sn': 'user', - 'loginshell': '/bin/bash', - 'uidNumber': '10099', - 'gidNumber': '10099', - 'gecos': 'Test User', - 'mail': ['user0099@dev.null', - 'alias@dev.null', - 'user0099@redhat.com'], - 'cn': 'Test User', - 'homeDirectory': '/home/user0099', - 'uid': 'admin2', - 'userpassword': 'password'}))) - - # - # Perform modify (delete & add mail attributes) - # - try: - topology.standalone.modify_s(DN, [(ldap.MOD_DELETE, - 'mail', - 'user0099@dev.null'), - (ldap.MOD_DELETE, - 'mail', - 'alias@dev.null'), - (ldap.MOD_ADD, - 'mail', 'user0099@dev.null')]) - except ldap.LDAPError as e: - log.fatal('Failedto modify user: ' + str(e)) - assert False - - # - # Search using deleted attribute value- no entries should be returned - # - try: - entry = topology.standalone.search_s(DEFAULT_SUFFIX, - ldap.SCOPE_SUBTREE, - 'mail=alias@dev.null') - if entry: - log.fatal('Entry incorrectly returned') - assert False - except ldap.LDAPError as e: - log.fatal('Failed to search for user: ' + str(e)) - assert False - - # - # Search using existing attribute value - the entry should be returned - # - try: - entry = topology.standalone.search_s(DEFAULT_SUFFIX, - ldap.SCOPE_SUBTREE, - 'mail=user0099@dev.null') - if entry is None: - log.fatal('Entry not found, but it should have been') - assert False - except ldap.LDAPError as e: - log.fatal('Failed to search for user: ' + str(e)) - assert False - - # - # Delete the last values - # - try: - topology.standalone.modify_s(DN, [(ldap.MOD_DELETE, - 'mail', - 'user0099@dev.null'), - (ldap.MOD_DELETE, - 'mail', - 'user0099@redhat.com') - ]) - except ldap.LDAPError as e: - log.fatal('Failed to modify user: ' + str(e)) - assert False - - # - # Search using deleted attribute value - no entries should be returned - # - try: - entry = topology.standalone.search_s(DEFAULT_SUFFIX, - ldap.SCOPE_SUBTREE, - 'mail=user0099@redhat.com') - if entry: - log.fatal('Entry incorrectly returned') - assert False - except ldap.LDAPError as e: - log.fatal('Failed to search for user: ' + str(e)) - assert False - - # - # Make sure presence index is correctly updated - no entries should be - # returned - # - try: - entry = topology.standalone.search_s(DEFAULT_SUFFIX, - ldap.SCOPE_SUBTREE, - 'mail=*') - if entry: - log.fatal('Entry incorrectly returned') - assert False - except ldap.LDAPError as e: - log.fatal('Failed to search for user: ' + str(e)) - assert False - - # - # Now add the attributes back, and lets run a different set of tests with - # a different number of attributes - # - try: - topology.standalone.modify_s(DN, [(ldap.MOD_ADD, - 'mail', - ['user0099@dev.null', - 'alias@dev.null'])]) - except ldap.LDAPError as e: - log.fatal('Failedto modify user: ' + str(e)) - assert False - - # - # Remove and readd some attibutes - # - try: - topology.standalone.modify_s(DN, [(ldap.MOD_DELETE, - 'mail', - 'alias@dev.null'), - (ldap.MOD_DELETE, - 'mail', - 'user0099@dev.null'), - (ldap.MOD_ADD, - 'mail', 'user0099@dev.null')]) - except ldap.LDAPError as e: - log.fatal('Failedto modify user: ' + str(e)) - assert False - - # - # Search using deleted attribute value - no entries should be returned - # - try: - entry = topology.standalone.search_s(DEFAULT_SUFFIX, - ldap.SCOPE_SUBTREE, - 'mail=alias@dev.null') - if entry: - log.fatal('Entry incorrectly returned') - assert False - except ldap.LDAPError as e: - log.fatal('Failed to search for user: ' + str(e)) - assert False - - # - # Search using existing attribute value - the entry should be returned - # - try: - entry = topology.standalone.search_s(DEFAULT_SUFFIX, - ldap.SCOPE_SUBTREE, - 'mail=user0099@dev.null') - if entry is None: - log.fatal('Entry not found, but it should have been') - assert False - except ldap.LDAPError as e: - log.fatal('Failed to search for user: ' + str(e)) - assert False - - log.info('Test PASSED') - - -if __name__ == '__main__': - # Run isolated - # -s for DEBUG mode - CURRENT_FILE = os.path.realpath(__file__) - pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tmp/README b/dirsrvtests/tmp/README deleted file mode 100644 index 0e8f416..0000000 --- a/dirsrvtests/tmp/README +++ /dev/null @@ -1,10 +0,0 @@ -TMP DIRECTORY README - -This directory is used to store files(LDIFs, etc) that are created during the ticket script runtime. The script is also responsible for removing any files it places in this directory. This directory can be retrieved via getDir() from the DirSrv class. - -Example: - - tmp_dir_path = topology.standalone.getDir(__file__, TMP_DIR) - - new_ldif = tmp_dir_path + "export.ldif" -