#39 Update Azure Pipelines to run real FreeIPA tests
Merged 3 years ago by abbra. Opened 3 years ago by abbra.
abbra/slapi-nis update-azure-pipeline  into  master

@@ -0,0 +1,27 @@ 

+ FROM fedora:32

+ MAINTAINER [FreeIPA Developers freeipa-devel@lists.fedorahosted.org]

+ ENV container=docker LANG=en_US.utf8 LANGUAGE=en_US.utf8 LC_ALL=en_US.utf8

+ 

+ ADD dist /root

+ RUN echo 'deltarpm = false' >> /etc/dnf/dnf.conf \

+     && dnf update -y dnf \

+     && sed -i 's/%_install_langs \(.*\)/\0:fr/g' /etc/rpm/macros.image-language-conf \

+     && dnf install -y systemd openssh-server freeipa-server-dns freeipa-server-trust-ad python3-ipatests \

+     && dnf install -y \

+         firewalld \

+         glibc-langpack-fr \

+         glibc-langpack-en \

+         iptables \

+         nss-tools \

+         openssh-server \

+         sudo \

+         wget \

+         /root/packages/*.rpm \

+     && dnf clean all && rm -rf /root/packages /root/srpms \

+     && sed -i 's/.*PermitRootLogin .*/#&/g' /etc/ssh/sshd_config \

+     && echo 'PermitRootLogin yes' >> /etc/ssh/sshd_config \

+     && systemctl enable sshd 

+ 

+ STOPSIGNAL RTMIN+3

+ VOLUME ["/project", "/run", "/tmp"]

+ ENTRYPOINT [ "/usr/sbin/init" ]

@@ -0,0 +1,29 @@ 

+ # replace with 'fedora:rawhide' on fix:

+ # https://bugzilla.redhat.com/show_bug.cgi?id=1869612

+ FROM registry.fedoraproject.org/fedora:rawhide

+ MAINTAINER [FreeIPA Developers freeipa-devel@lists.fedorahosted.org]

+ ENV container=docker LANG=en_US.utf8 LANGUAGE=en_US.utf8 LC_ALL=en_US.utf8

+ 

+ ADD dist /root

+ RUN echo 'deltarpm = false' >> /etc/dnf/dnf.conf \

+     && dnf update -y dnf \

+     && sed -i 's/%_install_langs \(.*\)/\0:fr/g' /etc/rpm/macros.image-language-conf \

+     && dnf install -y systemd \

+     && dnf install -y \

+         firewalld \

+         glibc-langpack-fr \

+         glibc-langpack-en \

+         iptables \

+         nss-tools \

+         openssh-server \

+         sudo \

+         wget \

+         /root/packages/*.rpm \

+     && dnf clean all && rm -rf /root/packages* /root/srpms \

+     && sed -i 's/.*PermitRootLogin .*/#&/g' /etc/ssh/sshd_config \

+     && echo 'PermitRootLogin yes' >> /etc/ssh/sshd_config \

+     && systemctl enable sshd 

+ 

+ STOPSIGNAL RTMIN+3

+ VOLUME ["/project", "/run", "/tmp"]

+ ENTRYPOINT [ "/usr/sbin/init" ]

@@ -0,0 +1,58 @@ 

+ version: '2.1'

+ services:

+   master:

+     image: ${IPA_DOCKER_IMAGE}

+     build: .

+     cap_add:

+     - ALL

+     security_opt:

+     - apparmor:unconfined

+     - seccomp:./seccomp.json

+     mem_limit: 1900m

+     volumes:

+     - /sys/fs/cgroup/systemd:/sys/fs/cgroup/systemd

+     - ./ipa-test-config.yaml:/root/.ipa/ipa-test-config.yaml:ro

+     - ${BUILD_REPOSITORY_LOCALPATH}:${PROJECT_TESTS_REPO_PATH}

+ 

+     networks:

+     - ${IPA_NETWORK}

+ 

+   replica:

+     image: ${IPA_DOCKER_IMAGE}

+     build: .

+     cap_add:

+     - ALL

+     security_opt:

+     - apparmor:unconfined

+     - seccomp:./seccomp.json

+     mem_limit: 1900m

+     volumes:

+     - /sys/fs/cgroup/systemd:/sys/fs/cgroup/systemd

+     networks:

+     - ${IPA_NETWORK}

+ 

+   client:

+     image: ${IPA_DOCKER_IMAGE}

+     build: .

+     cap_add:

+     - ALL

+     security_opt:

+     - apparmor:unconfined

+     - seccomp:./seccomp.json

+     mem_limit: 536870912

+     volumes:

+     - /sys/fs/cgroup/systemd:/sys/fs/cgroup/systemd

+     # nfs server

+     - ./exports:/exports

+     - /lib/modules:/lib/modules:ro

+     networks:

+     - ${IPA_NETWORK}

+ 

+ networks:

+   ipanet:

+     driver: bridge

+     enable_ipv6: true

+     ipam:

+       driver: default

+       config:

+       - subnet: ${IPA_IPV6_SUBNET}

@@ -0,0 +1,787 @@ 

+ {

+ 	"defaultAction": "SCMP_ACT_ERRNO",

+ 	"archMap": [

+ 		{

+ 			"architecture": "SCMP_ARCH_X86_64",

+ 			"subArchitectures": [

+ 				"SCMP_ARCH_X86",

+ 				"SCMP_ARCH_X32"

+ 			]

+ 		},

+ 		{

+ 			"architecture": "SCMP_ARCH_AARCH64",

+ 			"subArchitectures": [

+ 				"SCMP_ARCH_ARM"

+ 			]

+ 		},

+ 		{

+ 			"architecture": "SCMP_ARCH_MIPS64",

+ 			"subArchitectures": [

+ 				"SCMP_ARCH_MIPS",

+ 				"SCMP_ARCH_MIPS64N32"

+ 			]

+ 		},

+ 		{

+ 			"architecture": "SCMP_ARCH_MIPS64N32",

+ 			"subArchitectures": [

+ 				"SCMP_ARCH_MIPS",

+ 				"SCMP_ARCH_MIPS64"

+ 			]

+ 		},

+ 		{

+ 			"architecture": "SCMP_ARCH_MIPSEL64",

+ 			"subArchitectures": [

+ 				"SCMP_ARCH_MIPSEL",

+ 				"SCMP_ARCH_MIPSEL64N32"

+ 			]

+ 		},

+ 		{

+ 			"architecture": "SCMP_ARCH_MIPSEL64N32",

+ 			"subArchitectures": [

+ 				"SCMP_ARCH_MIPSEL",

+ 				"SCMP_ARCH_MIPSEL64"

+ 			]

+ 		},

+ 		{

+ 			"architecture": "SCMP_ARCH_S390X",

+ 			"subArchitectures": [

+ 				"SCMP_ARCH_S390"

+ 			]

+ 		}

+ 	],

+ 	"syscalls": [

+ 		{

+ 			"names": [

+ 				"accept",

+ 				"accept4",

+ 				"access",

+ 				"adjtimex",

+ 				"alarm",

+ 				"bind",

+ 				"brk",

+ 				"capget",

+ 				"capset",

+ 				"chdir",

+ 				"chmod",

+ 				"chown",

+ 				"chown32",

+ 				"clock_adjtime",

+ 				"clock_getres",

+ 				"clock_gettime",

+ 				"clock_nanosleep",

+ 				"close",

+ 				"connect",

+ 				"copy_file_range",

+ 				"creat",

+ 				"dup",

+ 				"dup2",

+ 				"dup3",

+ 				"epoll_create",

+ 				"epoll_create1",

+ 				"epoll_ctl",

+ 				"epoll_ctl_old",

+ 				"epoll_pwait",

+ 				"epoll_wait",

+ 				"epoll_wait_old",

+ 				"eventfd",

+ 				"eventfd2",

+ 				"execve",

+ 				"execveat",

+ 				"exit",

+ 				"exit_group",

+ 				"faccessat",

+ 				"fadvise64",

+ 				"fadvise64_64",

+ 				"fallocate",

+ 				"fanotify_mark",

+ 				"fchdir",

+ 				"fchmod",

+ 				"fchmodat",

+ 				"fchown",

+ 				"fchown32",

+ 				"fchownat",

+ 				"fcntl",

+ 				"fcntl64",

+ 				"fdatasync",

+ 				"fgetxattr",

+ 				"flistxattr",

+ 				"flock",

+ 				"fork",

+ 				"fremovexattr",

+ 				"fsetxattr",

+ 				"fstat",

+ 				"fstat64",

+ 				"fstatat64",

+ 				"fstatfs",

+ 				"fstatfs64",

+ 				"fsync",

+ 				"ftruncate",

+ 				"ftruncate64",

+ 				"futex",

+ 				"futimesat",

+ 				"getcpu",

+ 				"getcwd",

+ 				"getdents",

+ 				"getdents64",

+ 				"getegid",

+ 				"getegid32",

+ 				"geteuid",

+ 				"geteuid32",

+ 				"getgid",

+ 				"getgid32",

+ 				"getgroups",

+ 				"getgroups32",

+ 				"getitimer",

+ 				"getpeername",

+ 				"getpgid",

+ 				"getpgrp",

+ 				"getpid",

+ 				"getppid",

+ 				"getpriority",

+ 				"getrandom",

+ 				"getresgid",

+ 				"getresgid32",

+ 				"getresuid",

+ 				"getresuid32",

+ 				"getrlimit",

+ 				"get_robust_list",

+ 				"getrusage",

+ 				"getsid",

+ 				"getsockname",

+ 				"getsockopt",

+ 				"get_thread_area",

+ 				"gettid",

+ 				"gettimeofday",

+ 				"getuid",

+ 				"getuid32",

+ 				"getxattr",

+ 				"inotify_add_watch",

+ 				"inotify_init",

+ 				"inotify_init1",

+ 				"inotify_rm_watch",

+ 				"io_cancel",

+ 				"ioctl",

+ 				"io_destroy",

+ 				"io_getevents",

+ 				"ioprio_get",

+ 				"ioprio_set",

+ 				"io_setup",

+ 				"io_submit",

+ 				"ipc",

+ 				"kill",

+ 				"lchown",

+ 				"lchown32",

+ 				"lgetxattr",

+ 				"link",

+ 				"linkat",

+ 				"listen",

+ 				"listxattr",

+ 				"llistxattr",

+ 				"_llseek",

+ 				"lremovexattr",

+ 				"lseek",

+ 				"lsetxattr",

+ 				"lstat",

+ 				"lstat64",

+ 				"madvise",

+ 				"memfd_create",

+ 				"mincore",

+ 				"mkdir",

+ 				"mkdirat",

+ 				"mknod",

+ 				"mknodat",

+ 				"mlock",

+ 				"mlock2",

+ 				"mlockall",

+ 				"mmap",

+ 				"mmap2",

+ 				"mprotect",

+ 				"mq_getsetattr",

+ 				"mq_notify",

+ 				"mq_open",

+ 				"mq_timedreceive",

+ 				"mq_timedsend",

+ 				"mq_unlink",

+ 				"mremap",

+ 				"msgctl",

+ 				"msgget",

+ 				"msgrcv",

+ 				"msgsnd",

+ 				"msync",

+ 				"munlock",

+ 				"munlockall",

+ 				"munmap",

+ 				"nanosleep",

+ 				"newfstatat",

+ 				"_newselect",

+ 				"open",

+ 				"openat",

+ 				"pause",

+ 				"pipe",

+ 				"pipe2",

+ 				"poll",

+ 				"ppoll",

+ 				"prctl",

+ 				"pread64",

+ 				"preadv",

+ 				"preadv2",

+ 				"prlimit64",

+ 				"pselect6",

+ 				"pwrite64",

+ 				"pwritev",

+ 				"pwritev2",

+ 				"read",

+ 				"readahead",

+ 				"readlink",

+ 				"readlinkat",

+ 				"readv",

+ 				"recv",

+ 				"recvfrom",

+ 				"recvmmsg",

+ 				"recvmsg",

+ 				"remap_file_pages",

+ 				"removexattr",

+ 				"rename",

+ 				"renameat",

+ 				"renameat2",

+ 				"restart_syscall",

+ 				"rmdir",

+ 				"rt_sigaction",

+ 				"rt_sigpending",

+ 				"rt_sigprocmask",

+ 				"rt_sigqueueinfo",

+ 				"rt_sigreturn",

+ 				"rt_sigsuspend",

+ 				"rt_sigtimedwait",

+ 				"rt_tgsigqueueinfo",

+ 				"sched_getaffinity",

+ 				"sched_getattr",

+ 				"sched_getparam",

+ 				"sched_get_priority_max",

+ 				"sched_get_priority_min",

+ 				"sched_getscheduler",

+ 				"sched_rr_get_interval",

+ 				"sched_setaffinity",

+ 				"sched_setattr",

+ 				"sched_setparam",

+ 				"sched_setscheduler",

+ 				"sched_yield",

+ 				"seccomp",

+ 				"select",

+ 				"semctl",

+ 				"semget",

+ 				"semop",

+ 				"semtimedop",

+ 				"send",

+ 				"sendfile",

+ 				"sendfile64",

+ 				"sendmmsg",

+ 				"sendmsg",

+ 				"sendto",

+ 				"setfsgid",

+ 				"setfsgid32",

+ 				"setfsuid",

+ 				"setfsuid32",

+ 				"setgid",

+ 				"setgid32",

+ 				"setgroups",

+ 				"setgroups32",

+ 				"setitimer",

+ 				"setpgid",

+ 				"setpriority",

+ 				"setregid",

+ 				"setregid32",

+ 				"setresgid",

+ 				"setresgid32",

+ 				"setresuid",

+ 				"setresuid32",

+ 				"setreuid",

+ 				"setreuid32",

+ 				"setrlimit",

+ 				"set_robust_list",

+ 				"setsid",

+ 				"setsockopt",

+ 				"set_thread_area",

+ 				"set_tid_address",

+ 				"setuid",

+ 				"setuid32",

+ 				"setxattr",

+ 				"shmat",

+ 				"shmctl",

+ 				"shmdt",

+ 				"shmget",

+ 				"shutdown",

+ 				"sigaltstack",

+ 				"signalfd",

+ 				"signalfd4",

+ 				"sigreturn",

+ 				"socket",

+ 				"socketcall",

+ 				"socketpair",

+ 				"splice",

+ 				"stat",

+ 				"stat64",

+ 				"statfs",

+ 				"statfs64",

+ 				"statx",

+ 				"symlink",

+ 				"symlinkat",

+ 				"sync",

+ 				"sync_file_range",

+ 				"syncfs",

+ 				"sysinfo",

+ 				"tee",

+ 				"tgkill",

+ 				"time",

+ 				"timer_create",

+ 				"timer_delete",

+ 				"timerfd_create",

+ 				"timerfd_gettime",

+ 				"timerfd_settime",

+ 				"timer_getoverrun",

+ 				"timer_gettime",

+ 				"timer_settime",

+ 				"times",

+ 				"tkill",

+ 				"truncate",

+ 				"truncate64",

+ 				"ugetrlimit",

+ 				"umask",

+ 				"uname",

+ 				"unlink",

+ 				"unlinkat",

+ 				"utime",

+ 				"utimensat",

+ 				"utimes",

+ 				"vfork",

+ 				"vmsplice",

+ 				"wait4",

+ 				"waitid",

+ 				"waitpid",

+ 				"write",

+ 				"writev",

+ 				"mount",

+ 				"umount2",

+ 				"reboot",

+ 				"name_to_handle_at",

+ 				"unshare"

+ 			],

+ 			"action": "SCMP_ACT_ALLOW",

+ 			"args": [],

+ 			"comment": "",

+ 			"includes": {},

+ 			"excludes": {}

+ 		},

+ 		{

+ 			"names": [

+ 				"personality"

+ 			],

+ 			"action": "SCMP_ACT_ALLOW",

+ 			"args": [

+ 				{

+ 					"index": 0,

+ 					"value": 0,

+ 					"valueTwo": 0,

+ 					"op": "SCMP_CMP_EQ"

+ 				}

+ 			],

+ 			"comment": "",

+ 			"includes": {},

+ 			"excludes": {}

+ 		},

+ 		{

+ 			"names": [

+ 				"personality"

+ 			],

+ 			"action": "SCMP_ACT_ALLOW",

+ 			"args": [

+ 				{

+ 					"index": 0,

+ 					"value": 8,

+ 					"valueTwo": 0,

+ 					"op": "SCMP_CMP_EQ"

+ 				}

+ 			],

+ 			"comment": "",

+ 			"includes": {},

+ 			"excludes": {}

+ 		},

+ 		{

+ 			"names": [

+ 				"personality"

+ 			],

+ 			"action": "SCMP_ACT_ALLOW",

+ 			"args": [

+ 				{

+ 					"index": 0,

+ 					"value": 131072,

+ 					"valueTwo": 0,

+ 					"op": "SCMP_CMP_EQ"

+ 				}

+ 			],

+ 			"comment": "",

+ 			"includes": {},

+ 			"excludes": {}

+ 		},

+ 		{

+ 			"names": [

+ 				"personality"

+ 			],

+ 			"action": "SCMP_ACT_ALLOW",

+ 			"args": [

+ 				{

+ 					"index": 0,

+ 					"value": 131080,

+ 					"valueTwo": 0,

+ 					"op": "SCMP_CMP_EQ"

+ 				}

+ 			],

+ 			"comment": "",

+ 			"includes": {},

+ 			"excludes": {}

+ 		},

+ 		{

+ 			"names": [

+ 				"personality"

+ 			],

+ 			"action": "SCMP_ACT_ALLOW",

+ 			"args": [

+ 				{

+ 					"index": 0,

+ 					"value": 4294967295,

+ 					"valueTwo": 0,

+ 					"op": "SCMP_CMP_EQ"

+ 				}

+ 			],

+ 			"comment": "",

+ 			"includes": {},

+ 			"excludes": {}

+ 		},

+ 		{

+ 			"names": [

+ 				"sync_file_range2"

+ 			],

+ 			"action": "SCMP_ACT_ALLOW",

+ 			"args": [],

+ 			"comment": "",

+ 			"includes": {

+ 				"arches": [

+ 					"ppc64le"

+ 				]

+ 			},

+ 			"excludes": {}

+ 		},

+ 		{

+ 			"names": [

+ 				"arm_fadvise64_64",

+ 				"arm_sync_file_range",

+ 				"sync_file_range2",

+ 				"breakpoint",

+ 				"cacheflush",

+ 				"set_tls"

+ 			],

+ 			"action": "SCMP_ACT_ALLOW",

+ 			"args": [],

+ 			"comment": "",

+ 			"includes": {

+ 				"arches": [

+ 					"arm",

+ 					"arm64"

+ 				]

+ 			},

+ 			"excludes": {}

+ 		},

+ 		{

+ 			"names": [

+ 				"arch_prctl"

+ 			],

+ 			"action": "SCMP_ACT_ALLOW",

+ 			"args": [],

+ 			"comment": "",

+ 			"includes": {

+ 				"arches": [

+ 					"amd64",

+ 					"x32"

+ 				]

+ 			},

+ 			"excludes": {}

+ 		},

+ 		{

+ 			"names": [

+ 				"modify_ldt"

+ 			],

+ 			"action": "SCMP_ACT_ALLOW",

+ 			"args": [],

+ 			"comment": "",

+ 			"includes": {

+ 				"arches": [

+ 					"amd64",

+ 					"x32",

+ 					"x86"

+ 				]

+ 			},

+ 			"excludes": {}

+ 		},

+ 		{

+ 			"names": [

+ 				"s390_pci_mmio_read",

+ 				"s390_pci_mmio_write",

+ 				"s390_runtime_instr"

+ 			],

+ 			"action": "SCMP_ACT_ALLOW",

+ 			"args": [],

+ 			"comment": "",

+ 			"includes": {

+ 				"arches": [

+ 					"s390",

+ 					"s390x"

+ 				]

+ 			},

+ 			"excludes": {}

+ 		},

+ 		{

+ 			"names": [

+ 				"open_by_handle_at"

+ 			],

+ 			"action": "SCMP_ACT_ALLOW",

+ 			"args": [],

+ 			"comment": "",

+ 			"includes": {

+ 				"caps": [

+ 					"CAP_DAC_READ_SEARCH"

+ 				]

+ 			},

+ 			"excludes": {}

+ 		},

+ 		{

+ 			"names": [

+ 				"bpf",

+ 				"clone",

+ 				"fanotify_init",

+ 				"lookup_dcookie",

+ 				"mount",

+ 				"name_to_handle_at",

+ 				"perf_event_open",

+ 				"quotactl",

+ 				"setdomainname",

+ 				"sethostname",

+ 				"setns",

+ 				"syslog",

+ 				"umount",

+ 				"umount2",

+ 				"unshare"

+ 			],

+ 			"action": "SCMP_ACT_ALLOW",

+ 			"args": [],

+ 			"comment": "",

+ 			"includes": {

+ 				"caps": [

+ 					"CAP_SYS_ADMIN"

+ 				]

+ 			},

+ 			"excludes": {}

+ 		},

+ 		{

+ 			"names": [

+ 				"clone"

+ 			],

+ 			"action": "SCMP_ACT_ALLOW",

+ 			"args": [

+ 				{

+ 					"index": 0,

+ 					"value": 2080505856,

+ 					"valueTwo": 0,

+ 					"op": "SCMP_CMP_MASKED_EQ"

+ 				}

+ 			],

+ 			"comment": "",

+ 			"includes": {},

+ 			"excludes": {

+ 				"caps": [

+ 					"CAP_SYS_ADMIN"

+ 				],

+ 				"arches": [

+ 					"s390",

+ 					"s390x"

+ 				]

+ 			}

+ 		},

+ 		{

+ 			"names": [

+ 				"clone"

+ 			],

+ 			"action": "SCMP_ACT_ALLOW",

+ 			"args": [

+ 				{

+ 					"index": 1,

+ 					"value": 2080505856,

+ 					"valueTwo": 0,

+ 					"op": "SCMP_CMP_MASKED_EQ"

+ 				}

+ 			],

+ 			"comment": "s390 parameter ordering for clone is different",

+ 			"includes": {

+ 				"arches": [

+ 					"s390",

+ 					"s390x"

+ 				]

+ 			},

+ 			"excludes": {

+ 				"caps": [

+ 					"CAP_SYS_ADMIN"

+ 				]

+ 			}

+ 		},

+ 		{

+ 			"names": [

+ 				"reboot"

+ 			],

+ 			"action": "SCMP_ACT_ALLOW",

+ 			"args": [],

+ 			"comment": "",

+ 			"includes": {

+ 				"caps": [

+ 					"CAP_SYS_BOOT"

+ 				]

+ 			},

+ 			"excludes": {}

+ 		},

+ 		{

+ 			"names": [

+ 				"chroot"

+ 			],

+ 			"action": "SCMP_ACT_ALLOW",

+ 			"args": [],

+ 			"comment": "",

+ 			"includes": {

+ 				"caps": [

+ 					"CAP_SYS_CHROOT"

+ 				]

+ 			},

+ 			"excludes": {}

+ 		},

+ 		{

+ 			"names": [

+ 				"delete_module",

+ 				"init_module",

+ 				"finit_module",

+ 				"query_module"

+ 			],

+ 			"action": "SCMP_ACT_ALLOW",

+ 			"args": [],

+ 			"comment": "",

+ 			"includes": {

+ 				"caps": [

+ 					"CAP_SYS_MODULE"

+ 				]

+ 			},

+ 			"excludes": {}

+ 		},

+ 		{

+ 			"names": [

+ 				"acct"

+ 			],

+ 			"action": "SCMP_ACT_ALLOW",

+ 			"args": [],

+ 			"comment": "",

+ 			"includes": {

+ 				"caps": [

+ 					"CAP_SYS_PACCT"

+ 				]

+ 			},

+ 			"excludes": {}

+ 		},

+ 		{

+ 			"names": [

+ 				"kcmp",

+ 				"process_vm_readv",

+ 				"process_vm_writev",

+ 				"ptrace"

+ 			],

+ 			"action": "SCMP_ACT_ALLOW",

+ 			"args": [],

+ 			"comment": "",

+ 			"includes": {

+ 				"caps": [

+ 					"CAP_SYS_PTRACE"

+ 				]

+ 			},

+ 			"excludes": {}

+ 		},

+ 		{

+ 			"names": [

+ 				"iopl",

+ 				"ioperm"

+ 			],

+ 			"action": "SCMP_ACT_ALLOW",

+ 			"args": [],

+ 			"comment": "",

+ 			"includes": {

+ 				"caps": [

+ 					"CAP_SYS_RAWIO"

+ 				]

+ 			},

+ 			"excludes": {}

+ 		},

+ 		{

+ 			"names": [

+ 				"settimeofday",

+ 				"stime",

+ 				"clock_settime"

+ 			],

+ 			"action": "SCMP_ACT_ALLOW",

+ 			"args": [],

+ 			"comment": "",

+ 			"includes": {

+ 				"caps": [

+ 					"CAP_SYS_TIME"

+ 				]

+ 			},

+ 			"excludes": {}

+ 		},

+ 		{

+ 			"names": [

+ 				"vhangup"

+ 			],

+ 			"action": "SCMP_ACT_ALLOW",

+ 			"args": [],

+ 			"comment": "",

+ 			"includes": {

+ 				"caps": [

+ 					"CAP_SYS_TTY_CONFIG"

+ 				]

+ 			},

+ 			"excludes": {}

+ 		},

+ 		{

+ 			"names": [

+ 				"get_mempolicy",

+ 				"mbind",

+ 				"set_mempolicy"

+ 			],

+ 			"action": "SCMP_ACT_ALLOW",

+ 			"args": [],

+ 			"comment": "",

+ 			"includes": {

+ 				"caps": [

+ 					"CAP_SYS_NICE"

+ 				]

+ 			},

+ 			"excludes": {}

+ 		},

+ 		{

+ 			"names": [

+ 				"syslog"

+ 			],

+ 			"action": "SCMP_ACT_ALLOW",

+ 			"args": [],

+ 			"comment": "",

+ 			"includes": {

+ 				"caps": [

+ 					"CAP_SYSLOG"

+ 				]

+ 			},

+ 			"excludes": {}

+ 		}

+ 	]

+ }

@@ -0,0 +1,4 @@ 

+ extends:

+   template: azure-pipelines.yml

+   parameters:

+     VARIABLES_FILE: 'templates/variables-rawhide.yml'

file modified
+67 -57
@@ -1,70 +1,80 @@ 

+ parameters:

+ - name: VARIABLES_FILE

+   default: 'templates/variables.yml'

+ 

+ trigger:

+ - master

+ 

+ variables:

+ - template: templates/variables-common.yml

+ # platform specific variables, links to

+ - template: ${{ parameters.VARIABLES_FILE }}

+ 

  jobs:

  - job: Build

    pool:

-     vmImage: 'Ubuntu-16.04'

-   variables:

-     builddir: /__w/1/s

+     vmImage: $(VM_IMAGE)

    container:

-     image: f30/fedora-toolbox

-     endpoint: fedora-project-registry

+     image: $(DOCKER_BUILD_IMAGE)

+     options: --cap-add=SYS_PTRACE --security-opt seccomp=unconfined --privileged --env container=docker

    steps:

+     - template: templates/${{ variables.PREPARE_BUILD_TEMPLATE }}

      - script: |

-         sudo rm -rf /var/cache/dnf/*

-         cd $(builddir)

-         sudo dnf makecache || :

-         sudo dnf -y install dnf-plugins-core rpm-build autoconf automake libtool \

-                 /usr/bin/rpcgen 389-ds-base-devel libnsl2-devel \

-                 libsss_nss_idmap-devel nspr-devel nss-devel \

-                 openldap-devel pam-devel gdb gcc annobin make

-         sudo dnf builddep -y --spec slapi-nis.spec --best --allowerasing --setopt=install_weak_deps=False

-       displayName: Prepare build environment

-     - script: |

-         mkdir -p $(builddir)/dist

-         make -f .copr/Makefile outdir=$(builddir)/dist SUDO=sudo srpm

-         rpmbuild --rebuild \

-                 --define "_topdir $(builddir)/build" \

-                 --define "_sourcedir $(builddir)/build" \

-                 --define "_specdir $(builddir)/build" \

-                 --define "_builddir $(builddir)/build" \

-                 --define "_srcrpmdir $(builddir)/dist" \

-                 --define "_rpmdir $(builddir)/dist" \

-                 $(builddir)/dist/*.src.rpm

-       displayName: Build packages

-     - script: |

-         mv $(builddir)/dist/*/*.rpm $(builddir)/dist/

-         ls -la $(builddir)/dist

-       displayName: List packages

-     - task: PublishPipelineArtifact@0

-       displayName: Publish packages

-       inputs:

+         set -e

+         echo "Running autoconf generator"

+         ./autogen.sh

+       displayName: Configure the project

+     - template: templates/${{ variables.BUILD_TEMPLATE }}

+     - template: templates/publish-build.yml

+       parameters:

          artifactName: 'packages-$(Build.BuildId)-$(Agent.OS)-$(Agent.OSArchitecture)'

          targetPath: $(Build.Repository.LocalPath)/dist

-       condition: always()

+         displayName: Publish packages

  

- - job: Install_with_FreeIPA_packages

+     - script: |

+         set -e

+         mkdir container

+         cp -pr dist container/

+         cp $(PROJECT_TESTS_DOCKERFILES)/$(DOCKER_DOCKERFILE) container/Dockerfile

+         cd container

+         docker build -t project-azure-builder .

+         docker save project-azure-builder | gzip > '$(builddir)/project-azure-builder-container.tar.gz'

+       displayName: Create container image for test

+     - template: templates/publish-build.yml

+       parameters:

+         artifactName: 'image-$(Build.BuildId)-$(Agent.OS)-$(Agent.OSArchitecture)'

+         targetPath: $(Build.Repository.LocalPath)/project-azure-builder-container.tar.gz

+         displayName: Publish container image

+     - template: templates/generate-matrix.yml

+       parameters:

+         definition: 'tests/azure/azure_definitions/gating.yml'

+         displayName: Generate Matrix for Gating tests

+         name: gating_matrix

+     - template: templates/generate-matrix.yml

+       parameters:

+         definition: 'tests/azure/azure_definitions/base.yml'

+         displayName: Generate Matrix for Base tests

+         name: base_matrix

+ 

+ - job: BASE_XMLRPC

+   pool:

+     vmImage: $(VM_IMAGE)

    dependsOn: Build

    condition: succeeded()

-   pool:

-     vmImage: 'Ubuntu-16.04'

-   variables:

-     builddir: /__w/1/s

-   container:

-     image: f30/fedora-toolbox

-     endpoint: fedora-project-registry

+   strategy:

+     matrix: $[ dependencies.Build.outputs['base_matrix.matrix'] ]

    steps:

-     - checkout: none

-     - task: DownloadPipelineArtifact@0

-       displayName: Download pre-built packages

-       inputs:

-         artifactName: 'packages-$(Build.BuildId)-$(Agent.OS)-$(Agent.OSArchitecture)'

-         targetPath: $(Build.Repository.LocalPath)/dist

-     - script: |

-         rm -f $(builddir)/dist/*.src.rpm

-       displayName: Remove source package

-     - script: |

-         sudo rm -rf /var/cache/dnf/*

-         cd $(builddir)

-         sudo dnf makecache || :

-         sudo dnf -y install freeipa-server-dns freeipa-server $(builddir)/dist/*.rpm 

-       displayName: Attempt package installation

+     - template: templates/generate-job-variables.yml

+     - template: templates/test-jobs.yml

  

+ - job: GATING

+   pool:

+     vmImage: $(VM_IMAGE)

+   dependsOn: Build

+   condition: succeeded()

+   strategy:

+     matrix: $[ dependencies.Build.outputs['gating_matrix.matrix'] ]

+   timeoutInMinutes: 90

+   steps:

+     - template: templates/generate-job-variables.yml

+     - template: templates/test-jobs.yml

@@ -0,0 +1,12 @@ 

+ vms:

+ - vm_jobs:

+   - container_job: base

+     tests:

+     - test_cmdline

+     - test_install

+     type: base

+ 

+   - container_job: xmlrpc

+     tests:

+     - test_xmlrpc/test_netgroup_plugin.py

+     type: base

@@ -0,0 +1,23 @@ 

+ vms:

+ - vm_jobs:

+   - container_job: InstallMaster

+     tests:

+     - test_integration/test_installation.py::TestInstallMaster

+ 

+   - container_job: AD_trust_install

+     containers:

+       replicas: 1

+     tests:

+     - test_integration/test_adtrust_install.py

+ 

+ - vm_jobs:

+   - container_job: simple_replication

+     containers:

+       replicas: 1

+     tests:

+     - test_integration/test_simple_replication.py

+ 

+   - container_job: netgroup

+     tests:

+     - test_integration/test_netgroup.py

+ 

@@ -0,0 +1,114 @@ 

+ #!/bin/bash -eux

+ 

+ # this script is intended to be run within container

+ #

+ # distro-specifics

+ source "${PROJECT_TESTS_SCRIPTS}/variables.sh"

+ 

+ function collect_logs() {

+     if [ "$#" -ne 1 ]; then

+         printf "collect_logs: The path to output archive is required\n"

+         exit 1

+     fi

+     local out_file="$1"

+     printf "Collecting logs\n"

+     journalctl -b --no-pager > systemd_journal.log

+     tar --ignore-failed-read -czf "$out_file" \

+         /var/log/dirsrv \

+         "$HTTPD_LOGDIR" \

+         /var/log/ipa* \

+         /var/log/krb5kdc.log \

+         /var/log/pki \

+         /var/log/samba \

+         "$BIND_DATADIR" \

+         systemd_journal.log

+ }

+ 

+ server_password=Secret123

+ 

+ echo "Installing FreeIPA master for the domain ${PROJECT_TESTS_DOMAIN} and realm ${PROJECT_TESTS_REALM}"

+ 

+ install_result=1

+ { ipa-server-install -U \

+     --domain "$PROJECT_TESTS_DOMAIN" \

+     --realm "$PROJECT_TESTS_REALM" \

+     -p "$server_password" -a "$server_password" \

+     --setup-dns --setup-kra --auto-forwarders && install_result=0 ; } || \

+     install_result=$?

+ 

+ rm -rf "$PROJECT_TESTS_LOGSDIR"

+ mkdir "$PROJECT_TESTS_LOGSDIR"

+ pushd "$PROJECT_TESTS_LOGSDIR"

+ tests_result=1

+ 

+ if [ "$install_result" -eq 0 ] ; then

+     echo "Run IPA tests"

+     echo "Installation complete. Performance of individual steps:"

+     grep 'service duration:' /var/log/ipaserver-install.log | sed -e 's/DEBUG //g'

+ 

+     sed -ri "s/mode = production/mode = developer/" /etc/ipa/default.conf

+     systemctl restart "$HTTPD_SYSTEMD_NAME"

+     firewalld_cmd --add-service={freeipa-ldap,freeipa-ldaps,dns}

+ 

+     echo ${server_password} | kinit admin && ipa ping

+     mkdir -p ~/.ipa

+     cp -r /etc/ipa/* ~/.ipa/

+     echo ${server_password} > ~/.ipa/.dmpw

+     echo 'wait_for_dns=5' >> ~/.ipa/default.conf

+ 

+     ipa-test-config --help

+     ipa-test-task --help

+     ipa-run-tests --help

+ 

+     { ipa-run-tests \

+         --logging-level=debug \

+         --logfile-dir="$PROJECT_TESTS_LOGSDIR" \

+         --verbose \

+         --with-xunit \

+         '-k not test_dns_soa' \

+         $PROJECT_TESTS_TO_IGNORE \

+         $PROJECT_TESTS_TO_RUN && tests_result=0 ; } || \

+         tests_result=$?

+ else

+     echo "ipa-server-install failed with code ${install_result}, skip IPA tests"

+ fi

+ collect_logs ipaserver_install_logs.tar.gz

+ 

+ echo "Potential Python 3 incompatibilities in the IPA framework:"

+ grep -n -C5 BytesWarning "$HTTPD_ERRORLOG" || echo "Good, none detected"

+ 

+ echo "State of the directory server instance, httpd databases, PKI CA database:"

+ ls -laZ \

+     /etc/dirsrv/slapd-*/ \

+     "${HTTPD_ALIASDIR}/" \

+     /var/lib/ \

+     /etc/pki/pki-tomcat/alias/ \

+   ||:

+ ls -laZ \

+     /var/lib/ipa/certs/ \

+     /var/lib/ipa/passwds/ \

+     /var/lib/ipa/private/ \

+   ||:

+ 

+ echo "Uninstall the server"

+ ipa-server-install --uninstall -U

+ # second uninstall to verify that --uninstall without installation works

+ ipa-server-install --uninstall -U

+ 

+ collect_logs ipaserver_uninstall_logs.tar.gz

+ 

+ if [ "$install_result" -eq 0 ] ; then

+     firewalld_cmd --remove-service={freeipa-ldap,freeipa-ldaps,dns}

+ fi

+ 

+ echo "Report memory statistics"

+ cat /sys/fs/cgroup/memory/memory.memsw.failcnt

+ cat /sys/fs/cgroup/memory/memory.memsw.limit_in_bytes

+ cat /sys/fs/cgroup/memory/memory.memsw.max_usage_in_bytes

+ cat /sys/fs/cgroup/memory/memory.failcnt

+ cat /sys/fs/cgroup/memory/memory.max_usage_in_bytes

+ cat /sys/fs/cgroup/memory/memory.limit_in_bytes

+ cat /proc/sys/vm/swappiness

+ 

+ # Final result depends on the exit code of the ipa-run-tests

+ test "$tests_result" -eq 0 -a "$install_result" -eq 0

@@ -0,0 +1,38 @@ 

+ #!/bin/bash -eux

+ 

+ # this script is intended to be run within container

+ #

+ # distro-specifics

+ source "${PROJECT_TESTS_SCRIPTS}/variables.sh"

+ 

+ rm -rf "$PROJECT_TESTS_LOGSDIR"

+ mkdir "$PROJECT_TESTS_LOGSDIR"

+ pushd "$PROJECT_TESTS_LOGSDIR"

+ 

+ tests_result=1

+ { IPATEST_YAML_CONFIG=~/.ipa/ipa-test-config.yaml \

+     ipa-run-tests \

+     --logging-level=debug \

+     --logfile-dir="$PROJECT_TESTS_LOGSDIR" \

+     --with-xunit \

+     --verbose \

+     $PROJECT_TESTS_TO_IGNORE \

+     $PROJECT_TESTS_TO_RUN && tests_result=0 ; } || \

+     tests_result=$?

+ 

+ # fix permissions on logs to be readable by Azure's user (vsts)

+ chmod -R o+rX "$PROJECT_TESTS_LOGSDIR"

+ 

+ find "$PROJECT_TESTS_LOGSDIR" -mindepth 1 -maxdepth 1 -not -name '.*' -type d \

+     -exec tar --remove-files -czf {}.tar.gz {} \;

+ 

+ echo "Report memory statistics"

+ cat /sys/fs/cgroup/memory/memory.memsw.failcnt

+ cat /sys/fs/cgroup/memory/memory.memsw.limit_in_bytes

+ cat /sys/fs/cgroup/memory/memory.memsw.max_usage_in_bytes

+ cat /sys/fs/cgroup/memory/memory.failcnt

+ cat /sys/fs/cgroup/memory/memory.max_usage_in_bytes

+ cat /sys/fs/cgroup/memory/memory.limit_in_bytes

+ cat /proc/sys/vm/swappiness

+ 

+ exit $tests_result

@@ -0,0 +1,111 @@ 

+ #!/bin/bash -eux

+ 

+ if [ $# -ne 1 ]; then

+     echo "Docker environment ID is not provided"

+     exit 1

+ fi

+ 

+ PROJECT_ID="$1"

+ BUILD_REPOSITORY_LOCALPATH="${BUILD_REPOSITORY_LOCALPATH:-$(realpath .)}"

+ 

+ PROJECT_TESTS_TO_RUN_VARNAME="PROJECT_TESTS_TO_RUN_${PROJECT_ID}"

+ PROJECT_TESTS_TO_RUN="${!PROJECT_TESTS_TO_RUN_VARNAME:-}"

+ # in case of missing explicit list of tests to be run the Pytest run all the

+ # discovered tests, this is an error for this CI

+ [ -z "$PROJECT_TESTS_TO_RUN" ] && { echo 'Nothing to test'; exit 1; }

+ 

+ PROJECT_TESTS_ENV_NAME_VARNAME="PROJECT_TESTS_ENV_NAME_${PROJECT_ID}"

+ PROJECT_TESTS_ENV_NAME="${!PROJECT_TESTS_ENV_NAME_VARNAME:-}"

+ [ -z "$PROJECT_TESTS_ENV_NAME" ] && \

+     { echo "Project name is not set for project:${PROJECT_ID}"; exit 1 ;}

+ 

+ PROJECT_TESTS_TYPE_VARNAME="PROJECT_TESTS_TYPE_${PROJECT_ID}"

+ PROJECT_TESTS_TYPE="${!PROJECT_TESTS_TYPE_VARNAME:-integration}"

+ 

+ # Normalize spacing and expand the list afterwards. Remove {} for the single list element case

+ PROJECT_TESTS_TO_RUN=$(eval "echo {$(echo $PROJECT_TESTS_TO_RUN | sed -e 's/[ \t]+*/,/g')}" | tr -d '{}')

+ 

+ PROJECT_TESTS_TO_IGNORE_VARNAME="PROJECT_TESTS_TO_IGNORE_${PROJECT_ID}"

+ PROJECT_TESTS_TO_IGNORE="${!PROJECT_TESTS_TO_IGNORE_VARNAME:-}"

+ [ -n "$PROJECT_TESTS_TO_IGNORE" ] && \

+ PROJECT_TESTS_TO_IGNORE=$(eval "echo --ignore\ {$(echo $PROJECT_TESTS_TO_IGNORE | sed -e 's/[ \t]+*/,/g')}" | tr -d '{}')

+ 

+ PROJECT_TESTS_CLIENTS_VARNAME="PROJECT_TESTS_CLIENTS_${PROJECT_ID}"

+ PROJECT_TESTS_CLIENTS="${!PROJECT_TESTS_CLIENTS_VARNAME:-0}"

+ 

+ PROJECT_TESTS_REPLICAS_VARNAME="PROJECT_TESTS_REPLICAS_${PROJECT_ID}"

+ PROJECT_TESTS_REPLICAS="${!PROJECT_TESTS_REPLICAS_VARNAME:-0}"

+ 

+ PROJECT_TESTS_CONTROLLER="${PROJECT_ID}_master_1"

+ PROJECT_TESTS_LOGSDIR="${PROJECT_TESTS_REPO_PATH}/ipa_envs/${PROJECT_TESTS_ENV_NAME}/${CI_RUNNER_LOGS_DIR}"

+ 

+ PROJECT_TESTS_DOMAIN="${PROJECT_TESTS_DOMAIN:-ipa.test}"

+ # bash4

+ PROJECT_TESTS_REALM="${PROJECT_TESTS_DOMAIN^^}"

+ 

+ # for base tests only 1 master is needed even if another was specified

+ if [ "$PROJECT_TESTS_TYPE" == "base" ]; then

+     PROJECT_TESTS_CLIENTS="0"

+     PROJECT_TESTS_REPLICAS="0"

+ fi

+ 

+ project_dir="${PROJECT_TESTS_ENV_WORKING_DIR}/${PROJECT_TESTS_ENV_NAME}"

+ ln -sfr \

+     "${PROJECT_TESTS_DOCKERFILES}/docker-compose.yml" \

+     "$project_dir"/

+ 

+ ln -sfr \

+     "${PROJECT_TESTS_DOCKERFILES}/seccomp.json" \

+     "$project_dir"/

+ 

+ # will be generated later in setup_containers.py

+ touch "${project_dir}"/ipa-test-config.yaml

+ 

+ pushd "$project_dir"

+ 

+ BUILD_REPOSITORY_LOCALPATH="$BUILD_REPOSITORY_LOCALPATH" \

+ IPA_DOCKER_IMAGE="${IPA_DOCKER_IMAGE:-project-azure-builder}" \

+ IPA_NETWORK="${IPA_NETWORK:-ipanet}" \

+ IPA_IPV6_SUBNET="2001:db8:1:${PROJECT_ID}::/64" \

+ docker-compose -p "$PROJECT_ID" up \

+     --scale replica="$PROJECT_TESTS_REPLICAS" \

+     --scale client="$PROJECT_TESTS_CLIENTS" \

+     --force-recreate --remove-orphans -d

+ 

+ popd

+ 

+ PROJECT_TESTS_CLIENTS="$PROJECT_TESTS_CLIENTS" \

+ PROJECT_TESTS_REPLICAS="$PROJECT_TESTS_REPLICAS" \

+ PROJECT_TESTS_ENV_ID="$PROJECT_ID" \

+ PROJECT_TESTS_ENV_WORKING_DIR="$PROJECT_TESTS_ENV_WORKING_DIR" \

+ PROJECT_TESTS_ENV_NAME="$PROJECT_TESTS_ENV_NAME" \

+ IPA_TEST_CONFIG_TEMPLATE="${BUILD_REPOSITORY_LOCALPATH}/tests/azure/templates/ipa-test-config-template.yaml" \

+ PROJECT_TESTS_REPO_PATH="$PROJECT_TESTS_REPO_PATH" \

+ PROJECT_TESTS_DOMAIN="$PROJECT_TESTS_DOMAIN" \

+ python3 setup_containers.py

+ 

+ # path to runner within container

+ tests_runner="${PROJECT_TESTS_REPO_PATH}/${PROJECT_TESTS_SCRIPTS}/azure-run-${PROJECT_TESTS_TYPE}-tests.sh"

+ 

+ tests_result=1

+ { docker exec -t \

+     --env PROJECT_TESTS_SCRIPTS="${PROJECT_TESTS_REPO_PATH}/${PROJECT_TESTS_SCRIPTS}" \

+     --env IPA_PLATFORM="$IPA_PLATFORM" \

+     --env PROJECT_TESTS_DOMAIN="$PROJECT_TESTS_DOMAIN" \

+     --env PROJECT_TESTS_REALM="$PROJECT_TESTS_REALM" \

+     --env PROJECT_TESTS_LOGSDIR="$PROJECT_TESTS_LOGSDIR" \

+     --env PROJECT_TESTS_TO_RUN="$PROJECT_TESTS_TO_RUN" \

+     --env PROJECT_TESTS_TO_IGNORE="$PROJECT_TESTS_TO_IGNORE" \

+     "$PROJECT_TESTS_CONTROLLER" \

+     /bin/bash --noprofile --norc \

+     -eux "$tests_runner" && tests_result=0 ; } || tests_result=$?

+ 

+ pushd "$project_dir"

+ BUILD_REPOSITORY_LOCALPATH="$BUILD_REPOSITORY_LOCALPATH" \

+ IPA_DOCKER_IMAGE="${IPA_DOCKER_IMAGE:-project-azure-builder}" \

+ IPA_NETWORK="${IPA_NETWORK:-ipanet}" \

+ IPA_IPV6_SUBNET="2001:db8:1:${PROJECT_ID}::/64" \

+ docker-compose -p "$PROJECT_ID" down

+ popd

+ 

+ exit $tests_result

@@ -0,0 +1,54 @@ 

+ #!/bin/bash -eu

+ 

+ PROJECT_TESTS_ENV_WORKING_DIR="${PROJECT_TESTS_REPO_PATH}/ipa_envs"

+ COREDUMPS_DIR="${PROJECT_TESTS_ENV_WORKING_DIR}/${COREDUMPS_SUBDIR}"

+ 

+ since_time="$(cat '/coredumpctl.time.mark' || echo '-1h')"

+ debugger="/debugger.sh"

+ 

+ cat > "$debugger" <<EOF

+ #!/bin/bash -eux

+ 

+ debug_info="\$@"

+ gdb \

+     -ex 'set confirm off' \

+     -ex 'set pagination off' \

+     -ex 'thread apply all bt full' \

+     -ex 'quit' \

+     \$debug_info > "\${CORE_PID}.stacktrace" 2>&1

+ EOF

+ chmod +x "$debugger"

+ 

+ # make sure coredumpctl installed

+ which coredumpctl

+ coredumpctl \

+     --no-pager --directory="$HOST_JOURNAL" --since="$since_time" list ||:

+ 

+ rm -rvf "$COREDUMPS_DIR" ||:

+ mkdir "$COREDUMPS_DIR"

+ cd "$COREDUMPS_DIR"

+ 

+ pids="$(coredumpctl --no-pager --directory="$HOST_JOURNAL" --since="$since_time" -F COREDUMP_PID || echo '')"

+ for pid in $pids; do

+     # core dump

+     { coredumpctl \

+         --no-pager \

+         --since="$since_time" \

+         --directory="$HOST_JOURNAL" \

+         -o "${pid}.core" dump "$pid" && \

+       tar -czf "${pid}.core.tar.gz" --remove-files "${pid}.core" ; } ||:

+ 

+     # stacktrace

+     { CORE_PID="$pid" \

+         coredumpctl \

+         --no-pager \

+         --since="$since_time" \

+         --directory="$HOST_JOURNAL" \

+         --debugger="$debugger" \

+         debug "$pid" && \

+       tar \

+         -czf "${pid}.stacktrace.tar.gz" \

+         --remove-files "${pid}.stacktrace" ; } ||:

+ done

+ 

+ chmod a+rw -R "$COREDUMPS_DIR"

@@ -0,0 +1,49 @@ 

+ import argparse

+ import json

+ 

+ import yaml

+ 

+ parser = argparse.ArgumentParser(description='Generate Azure jobs matrix.')

+ parser.add_argument('azure_template', help='path to Azure template')

+ 

+ parser.add_argument('max_azure_env_jobs', type=int,

+                     help='maximum number of Docker envs within VM')

+ 

+ args = parser.parse_args()

+ 

+ with open(args.azure_template) as f:

+     data = yaml.safe_load(f)

+     matrix_jobs = {}

+     for vm in data['vms']:

+         vm_jobs = vm['vm_jobs']

+         jobs = {}

+         job_name = ''

+         for job_id, vm_job in enumerate(vm_jobs, 1):

+             if not job_name:

+                 job_name = f'{vm_job["container_job"]}_{job_id}'

+             jobs[f'project_tests_env_name_{job_id}'] = vm_job['container_job']

+             jobs[f'project_tests_to_run_{job_id}'] = ' '.join(vm_job['tests'])

+             jobs[f'project_tests_to_ignore_{job_id}'] = ' '.join(

+                 vm_job.get('ignore', ''))

+             jobs[f'project_tests_type_{job_id}'] = vm_job.get(

+                 'type', 'integration')

+ 

+             containers = vm_job.get('containers')

+             replicas = 0

+             clients = 0

+             if containers:

+                 replicas = containers.get('replicas', 0)

+                 clients = containers.get('clients', 0)

+             jobs[f'project_tests_replicas_{job_id}'] = replicas

+             jobs[f'project_tests_clients_{job_id}'] = clients

+ 

+         if len(vm_jobs) > args.max_azure_env_jobs:

+             raise ValueError(

+                 f"Number of defined jobs:{len(vm_jobs)} within VM:'{job_name}'"

+                 f" is greater than limit:{args.max_azure_env_jobs}")

+         job_name = f'{job_name}_to_{len(vm_jobs)}'

+         if job_name in matrix_jobs:

+             raise ValueError(f"Environment names should be unique:{job_name}")

+         matrix_jobs[job_name] = jobs

+     print("##vso[task.setVariable variable=matrix;isOutput=true]" +

+           json.dumps(matrix_jobs))

@@ -0,0 +1,20 @@ 

+ #!/bin/bash -eu

+ 

+ function install_debuginfo() {

+     dnf makecache ||:

+     dnf install -y \

+         ${PROJECT_TESTS_REPO_PATH}/packages/rpms_debuginfo/*.rpm \

+         gdb

+ 

+     dnf debuginfo-install -y \

+         389-ds-base \

+         bind \

+         bind-dyndb-ldap \

+         certmonger \

+         gssproxy \

+         httpd \

+         krb5-server \

+         krb5-workstation \

+         samba \

+         sssd

+ }

@@ -0,0 +1,8 @@ 

+ #!/bin/bash -eu

+ 

+ function install_debuginfo() { :; }

+ 

+ # override install_debuginfo for the platform specifics

+ source "${PROJECT_TESTS_SCRIPTS}/install-debuginfo-${IPA_PLATFORM}.sh"

+ 

+ install_debuginfo

@@ -0,0 +1,324 @@ 

+ import logging

+ import os

+ import subprocess

+ 

+ import docker

+ from jinja2 import Template

+ 

+ logging.basicConfig(level=logging.INFO, format='%(levelname)s: %(message)s')

+ 

+ PROJECT_TESTS_ENV_WORKING_DIR = os.environ.get('PROJECT_TESTS_ENV_WORKING_DIR')

+ PROJECT_TESTS_ENV_NAME = os.environ.get('PROJECT_TESTS_ENV_NAME')

+ PROJECT_TESTS_ENV_ID = os.environ.get('PROJECT_TESTS_ENV_ID', '1')

+ PROJECT_TESTS_CLIENTS = int(os.environ.get('PROJECT_TESTS_CLIENTS', 0))

+ PROJECT_TESTS_REPLICAS = int(os.environ.get('PROJECT_TESTS_REPLICAS', 0))

+ PROJECT_TESTS_DOMAIN = os.environ.get('PROJECT_TESTS_DOMAIN', 'ipa.test')

+ IPA_SSH_PRIV_KEY = os.environ.get('IPA_SSH_PRIV_KEY', '/root/.ssh/id_rsa')

+ IPA_DNS_FORWARDER = os.environ.get('IPA_DNS_FORWARDER', '8.8.8.8')

+ IPA_NETWORK = os.environ.get('IPA_NETWORK', 'ipanet')

+ IPA_CONTROLLER_TYPE = os.environ.get('IPA_CONTROLLER_TYPE', 'master')

+ IPA_TEST_CONFIG_TEMPLATE = os.environ.get(

+     'IPA_TEST_CONFIG_TEMPLATE', './templates/ipa-test-config-template.yaml')

+ 

+ PROJECT_TESTS_ENV_DIR = os.path.join(PROJECT_TESTS_ENV_WORKING_DIR, PROJECT_TESTS_ENV_NAME)

+ IPA_TEST_CONFIG = "ipa-test-config.yaml"

+ 

+ 

+ class Container:

+     """

+     Represents group of Docker container

+     """

+     def __init__(self, role, dns=IPA_DNS_FORWARDER, num=1,

+                  prefix=PROJECT_TESTS_ENV_ID, domain=PROJECT_TESTS_DOMAIN):

+         self.role = role

+         self.num = num

+         self.prefix = prefix

+         self.dns = dns

+         self.domain = domain

+         self.dclient = docker.from_env()

+ 

+     @property

+     def hostnames(self):

+         """

+         hostnames of containers within group

+         """

+         if not hasattr(self, '_hostnames'):

+             self._hostnames = ['{}{}.{}'.format(self.role, c, self.domain)

+                                for c in range(1, self.num + 1)]

+         return self._hostnames

+ 

+     @property

+     def names(self):

+         """

+         names of containers within group

+         """

+         if not hasattr(self, '_names'):

+             self._names = ['{}_{}_{}'.format(self.prefix, self.role, c)

+                            for c in range(1, self.num + 1)]

+         return self._names

+ 

+     def ip(self, name):

+         """

+         ipv4 address of container

+         """

+         ipanet = '{}_{}'.format(PROJECT_TESTS_ENV_ID, IPA_NETWORK)

+         dcont = self.dclient.containers.get(name)

+         return dcont.attrs['NetworkSettings']['Networks'][ipanet]['IPAddress']

+ 

+     @property

+     def ips(self):

+         """

+         ipv4 addresses of containers within group

+         """

+         if not hasattr(self, '_ips'):

+             self._ips = [self.ip(n) for n in self.names]

+         return self._ips

+ 

+     def umount_docker_resource(self, path):

+         """

+         Umount resource by its path

+         """

+         cmd = [

+             "/bin/umount", path

+         ]

+         self.execute_all(cmd)

+ 

+         cmd = [

+             "/bin/chmod",

+             "a-x",

+             path,

+         ]

+         self.execute_all(cmd)

+ 

+     def execute(self, name, args):

+         """

+         Exec an arbitrary command within container

+         """

+         dcont = self.dclient.containers.get(name)

+         logging.info("%s: run: %s", dcont.name, args)

+         result = dcont.exec_run(args, demux=True)

+         if result.output[0] is not None:

+             logging.info("%s: %s", dcont.name, result.output[0])

+         logging.info("%s: result: %s", dcont.name, result.exit_code)

+         if result.exit_code:

+             logging.error("stderr: %s", result.output[1].decode())

+             raise subprocess.CalledProcessError(

+                 result.exit_code, args,

+                 result.output[1]

+             )

+         return result

+ 

+     def execute_all(self, args):

+         """

+         Exec an arbitrary command within every container of group

+         """

+         results = []

+         for n in self.names:

+             results.append(self.execute(n, args))

+         return results

+ 

+     def add_ssh_pubkey(self, key):

+         """

+         Add ssh public key into every container of group

+         """

+         home_ssh_dir = "/root/.ssh"

+         auth_keys = os.path.join(home_ssh_dir, "authorized_keys")

+         cmd = [

+             "/bin/bash", "-c",

+             (f"mkdir {home_ssh_dir} "

+              f"; chmod 0700 {home_ssh_dir} "

+              f"&& touch {auth_keys} "

+              f"&& chmod 0600 {auth_keys} "

+              f"&& echo {key} >> {auth_keys}"

+              )

+         ]

+         self.execute_all(cmd)

+ 

+     def setup_hosts(self):

+         """

+         Overwrite hosts within every container of group

+         """

+         self.umount_docker_resource("/etc/hosts")

+         for n, i, h in zip(self.names, self.ips, self.hostnames):

+             hosts = "127.0.0.1 localhost\n::1 localhost\n{ip} {host}".format(

+                 ip=i, host=h,

+             )

+             cmd = [

+                 "/bin/bash", "-c",

+                 "echo -e '{hosts}' > /etc/hosts".format(hosts=hosts),

+             ]

+             self.execute(name=n, args=cmd)

+ 

+     def setup_hostname(self):

+         self.umount_docker_resource("/etc/hostname")

+         for n, h in zip(self.names, self.hostnames):

+             cmd = [

+                 "/bin/bash", "-c",

+                 "echo -e '{hostname}' > /etc/hostname".format(hostname=h),

+             ]

+             self.execute(name=n, args=cmd)

+ 

+             cmd = [

+                 "hostnamectl",

+                 "set-hostname", h,

+             ]

+             self.execute(name=n, args=cmd)

+ 

+     def setup_resolvconf(self):

+         """

+         Overwrite resolv conf within every container of group

+         """

+         self.umount_docker_resource("/etc/resolv.conf")

+         ns = "nameserver {dns}".format(dns=self.dns)

+         cmd = [

+             "/bin/bash", "-c",

+             "echo {ns} > /etc/resolv.conf".format(ns=ns),

+         ]

+         self.execute_all(cmd)

+ 

+     def ignore_service_in_container(self, service):

+         """

+         Amend systemd service configuration to be ignored in a container

+         """

+         service_dir = os.path.join(

+             "/etc/systemd/system", "{}.service.d".format(service))

+         override_file = os.path.join(service_dir, "ipa-override.conf")

+         cmds = [

+             "/bin/bash", "-c",

+             (f"mkdir -p {service_dir};"

+              f"echo '[Unit]' > {override_file};"

+              f"echo 'ConditionVirtualization=!container' >> {override_file}")

+         ]

+         self.execute_all(args=cmds)

+ 

+     def setup_container_overrides(self):

+         """

+         Set services known to not work in containers to be ignored

+         """

+         for service in ['nis-domainname',]:

+             self.ignore_service_in_container(service)

+ 

+         self.execute_all(args=["systemctl", "daemon-reload"])

+ 

+ 

+ class Controller(Container):

+     """

+     Manages groups of containers

+     """

+     def __init__(self, contr_type=IPA_CONTROLLER_TYPE):

+         self.containers = []

+         self.contr_type = contr_type

+         if self.contr_type == 'master':

+             self.master = None

+ 

+     def append(self, container):

+         self.containers.append(container)

+ 

+     def setup_ssh(self):

+         """

+         Generate ssh key pair and copy public part to all containers

+         """

+         cmd = ["rm", "-f", IPA_SSH_PRIV_KEY]

+         self.execute(args=cmd)

+ 

+         cmd = [

+             "ssh-keygen", "-q",

+             "-f", IPA_SSH_PRIV_KEY,

+             "-t", "rsa",

+             "-m", "PEM",

+             "-N", "",

+         ]

+         self.execute(args=cmd)

+ 

+         cmd = ["/bin/bash", "-c", "cat {}.pub".format(IPA_SSH_PRIV_KEY)]

+         key = self.execute(cmd).output[0].decode().rstrip()

+         for container in self.containers:

+             container.add_ssh_pubkey(key)

+ 

+     def execute(self, args):

+         """

+         Execute a command on controller (either master or local machine)

+         """

+         if self.contr_type == 'master':

+             if self.master is None:

+                 for container in self.containers:

+                     if container.role == "master":

+                         self.master = container

+                         break

+             return self.master.execute(name=master.names[0], args=args)

+ 

+         proc = subprocess.run(args, check=True, capture_output=True)

+         return [proc.stdout.decode().rstrip().strip("'")]

+ 

+     def setup_hosts(self):

+         """

+         Overwrite Docker's hosts

+         """

+         hosts = []

+         for container in self.containers:

+             container.setup_hosts()

+             for i, h in zip(container.ips, container.hostnames):

+                 hosts.append("{} {}".format(i, h))

+ 

+         cmd = [

+             "/bin/bash", "-c",

+             "echo -e '{hosts}' >> /etc/hosts".format(hosts='\n'.join(hosts)),

+         ]

+         self.execute(cmd)

+ 

+     def setup_hostname(self):

+         """

+         Overwrite Docker's hostname

+         """

+         for container in self.containers:

+             container.setup_hostname()

+ 

+     def setup_resolvconf(self):

+         """

+         Overwrite Docker's embedded DNS ns

+         """

+         for container in self.containers:

+             container.setup_resolvconf()

+ 

+     def generate_ipa_test_config(self, config):

+         with open(IPA_TEST_CONFIG_TEMPLATE, 'r') as f:

+             # assert foobar

+             template = Template(f.read(), trim_blocks=True, lstrip_blocks=True)

+ 

+         print(template.render(config))

+ 

+         with open(os.path.join(PROJECT_TESTS_ENV_DIR, IPA_TEST_CONFIG), 'w') as f:

+             f.write(template.render(config))

+ 

+     def setup_container_overrides(self):

+         """

+         Override services known to not work in containers

+         """

+         for container in self.containers:

+             container.setup_container_overrides()

+ 

+ 

+ controller = Controller()

+ master = Container(role='master')

+ clients = Container(role='client', num=PROJECT_TESTS_CLIENTS, dns=master.ips[0])

+ replicas = Container(role='replica', num=PROJECT_TESTS_REPLICAS, dns=master.ips[0])

+ 

+ controller.append(master)

+ controller.append(clients)

+ controller.append(replicas)

+ 

+ controller.setup_ssh()

+ controller.setup_hosts()

+ controller.setup_hostname()

+ controller.setup_resolvconf()

+ controller.setup_container_overrides()

+ 

+ config = {

+     'dns_forwarder': IPA_DNS_FORWARDER,

+     'ssh_private_key': IPA_SSH_PRIV_KEY,

+     'domain_name': PROJECT_TESTS_DOMAIN,

+     'master': master.ips,

+     'replicas': replicas.ips,

+     'clients': clients.ips,

+ }

+ controller.generate_ipa_test_config(config)

@@ -0,0 +1,7 @@ 

+ #!/bin/bash -eux

+ 

+ # Put the platform-specific definitions here

+ 

+ function firewalld_cmd() {

+     firewall-cmd $@

+ }

@@ -0,0 +1,14 @@ 

+ #!/bin/bash -eu

+ 

+ HTTPD_SYSTEMD_NAME='httpd.service'

+ HTTPD_LOGDIR='/var/log/httpd'

+ HTTPD_ERRORLOG="${HTTPD_LOGDIR}/error_log"

+ HTTPD_BASEDIR='/etc/httpd'

+ HTTPD_ALIASDIR="${HTTPD_BASEDIR}/alias"

+ BIND_BASEDIR='/var/named'

+ BIND_DATADIR="${BIND_BASEDIR}/data"

+ 

+ function firewalld_cmd() { :; }

+ 

+ # this should be the last to override base variables with platform specific

+ source "$PROJECT_TESTS_SCRIPTS/variables-${IPA_PLATFORM}.sh"

@@ -0,0 +1,28 @@ 

+ steps:

+ - script: |

+     set -e

+     echo "Running make target 'srpm'"

+     mkdir -p $(builddir)/{distx/{rpms,srpms},buildx}

+     make -f .copr/Makefile outdir=$(builddir)/buildx SUDO=sudo srpm

+     rpmbuild --rebuild \

+              --define "_topdir $(builddir)/buildx" \

+              --define "_sourcedir $(builddir)/buildx" \

+              --define "_specdir $(builddir)/buildx" \

+              --define "_builddir $(builddir)/buildx" \

+              --define "_srcrpmdir $(builddir)/distx/srpms" \

+              --define "_rpmdir $(builddir)/distx/rpms" \

+             $(builddir)/buildx/*.src.rpm

+     rm -rf $(builddir)/buildx

+   displayName: Build packages

+ - script: |

+     set -e

+     mkdir -p $(builddir)/distx/packages{,_debuginfo}

+     find $(builddir)/distx/rpms/ -type f \

+         \( -name "*-debuginfo-*.rpm" -o -name '*-debugsource-*.rpm' \) \

+         -exec mv {} $(builddir)/distx/packages_debuginfo/ \;

+     find $(builddir)/distx/rpms/ -type f \

+         \( -name "*.rpm" \) \

+         -exec mv {} $(builddir)/distx/packages/ \;

+     rm -rf $(builddir)/distx/rpms

+     mv $(builddir)/distx $(builddir)/dist

+   displayName: Move packages into the final location

@@ -0,0 +1,19 @@ 

+ steps:

+ - script: |

+     # don't set 'set -x' here because this breaks variables

+     # https://github.com/microsoft/azure-pipelines-yaml/blob/master/design/readonly-variables.md

+     set -eu

+     total_envs=0

+     for project in $(seq $(MAX_CONTAINER_ENVS)); do

+         # no more configured environments

+         tests_varname="PROJECT_TESTS_TO_RUN_${project}"

+         [ -z "${!tests_varname:-}" ] && break;

+         let "total_envs=total_envs+1"

+ 

+         name_varname="PROJECT_TESTS_ENV_NAME_${project}"

+         [ -z "${!name_varname:-}" ] && \

+             { echo "project_tests_env_name_${project} is mandatory."; exit 1; }

+     done

+     [ "$total_envs" -eq 0 ] && { echo 'Nothing to test'; env | sort ; exit 1; }

+     echo "##vso[task.setvariable variable=project_tests_total_envs]$total_envs"

+   displayName: Generate environment variables

@@ -0,0 +1,9 @@ 

+ parameters:

+     definition: ''

+     displayName: ''

+     name: ''

+ 

+ steps:

+   - script: python3 $(PROJECT_TESTS_SCRIPTS)/generate-matrix.py ${{ parameters.definition }} $(MAX_CONTAINER_ENVS)

+     name: ${{ parameters.name }}

+     displayName: ${{ parameters.displayName }}

@@ -0,0 +1,28 @@ 

+ admin_name: admin

+ admin_password: Secret123

+ debug: false

+ dirman_dn: cn=Directory Manager

+ dirman_password: Secret123

+ domain_level: 1

+ dns_forwarder: {{ dns_forwarder }}

+ root_ssh_key_filename: {{ ssh_private_key }}

+ domains:

+ - name: {{ domain_name }}

+   type: IPA

+   hosts:

+   - external_hostname: master1.{{ domain_name }}

+     name: master1.{{ domain_name }}

+     ip: {{ master[0] }}

+     role: master

+ {% for repl_ip in replicas: %}

+   - external_hostname: replica{{ loop.index }}.{{ domain_name }}

+     name: replica{{ loop.index }}.{{ domain_name }}

+     ip: {{ repl_ip }}

+     role: replica

+ {% endfor %}

+ {% for client_ip in clients: %}

+   - external_hostname: client{{ loop.index }}.{{ domain_name }}

+     name: client{{ loop.index }}.{{ domain_name }}

+     ip: {{ client_ip }}

+     role: client

+ {% endfor %}

@@ -0,0 +1,24 @@ 

+ steps:

+ - script: |

+     set -e

+     sudo rm -rf /var/cache/dnf/*

+     sudo dnf makecache || :

+     echo "Installing base development environment"

+     sudo dnf install -y \

+         'dnf-command(builddep)' \

+         gdb-minimal \

+         make \

+         autoconf \

+         rpm-build \

+         gettext-devel \

+         automake \

+         libtool \

+         docker \

+         python3-paramiko \

+         python3-pyyaml \

+         /usr/bin/rpcgen 389-ds-base-devel libnsl2-devel \

+         libsss_nss_idmap-devel nspr-devel nss-devel \

+         openldap-devel pam-devel gcc annobin

+     echo "Installing slapi-nis development dependencies"

+     sudo dnf builddep -y --spec slapi-nis.spec --best --allowerasing --setopt=install_weak_deps=False

+   displayName: Prepare build environment 

\ No newline at end of file

@@ -0,0 +1,11 @@ 

+ parameters:

+   artifactName: ''

+   targetPath: ''

+   displayName: ''

+ 

+ steps:

+ - task: PublishPipelineArtifact@1

+   inputs:

+     artifactName: ${{ parameters.artifactName }}

+     targetPath: ${{ parameters.targetPath }}

+   displayName: ${{ parameters.displayName }}

@@ -0,0 +1,58 @@ 

+ steps:

+ - script: |

+     set -eux

+ 

+     workdir="$PROJECT_TESTS_ENV_WORKING_DIR"

+     rm -rf "$workdir"

+     mkdir "$workdir"

+ 

+     ln -sfr \

+         ${BUILD_REPOSITORY_LOCALPATH}/${PROJECT_TESTS_SCRIPTS}/{azure-run-tests.sh,setup_containers.py} \

+         ./

+ 

+     function runner() {

+         set -o pipefail

+         local project_id="$1"

+         local project_name_varname="PROJECT_TESTS_ENV_NAME_${project_id}"

+         local project_name="${!project_name_varname}"

+         [ -z "$project_name" ] && \

+             { echo "Project name is not set for project:${project_id}"; exit 1 ;}

+         local workdir="$PROJECT_TESTS_ENV_WORKING_DIR"

+         local logfile="runner_${project_name}.log"

+         local project_dir="${workdir}/${project_name}"

+         rm -rf "$project_dir"

+         mkdir "$project_dir"

+         # live-logging of tests within environment: '1'

+         if [ "$project_id" == "1" ]; then

+             /usr/bin/time \

+                 --format="tests: ${project_name}, result: %x, time: %E" \

+                 --output="result_${project_id}" \

+                 -- \

+                 ./azure-run-tests.sh "$project_id" 2>&1 | \

+                 ts '[%Y-%m-%d %H:%M:%S]' 2>&1 | tee "${project_dir}/${logfile}"

+             result=$?

+         else

+             /usr/bin/time \

+                 --format="tests: ${project_name}, result: %x, time: %E" \

+                 --output="result_${project_id}" \

+                 -- \

+                 ./azure-run-tests.sh "$project_id" 2>&1 | \

+                 ts '[%Y-%m-%d %H:%M:%S]' 2>&1 > "${project_dir}/${logfile}"

+             result=$?

+         fi

+         exit $result

+     }

+     export -f runner

+ 

+     result=1

+     rm -f result_*

+     { parallel \

+         --tag \

+         --jobs $(MAX_CONTAINER_ENVS) \

+         --linebuffer \

+         'runner {}' ::: "$(seq $(project_tests_total_envs))" && result=0 ; } || \

+             result=$?

+     echo "Results:"

+     cat $(eval echo result_{1..$(project_tests_total_envs)})

+     exit $result

+   displayName: Run tests

@@ -0,0 +1,12 @@ 

+ parameters:

+   logsPath: $(PROJECT_TESTS_ENV_WORKING_DIR)

+   logsArtifact: ''

+ steps:

+ - task: PublishPipelineArtifact@1

+   displayName: Publish logs

+   inputs:

+     artifactName: ${{parameters.logsArtifact}}

+     # globbing is adjusted in .artifactignore on PROJECT_TESTS_ENV_WORKING_DIR

+     targetPath: ${{parameters.logsPath}}

+   condition: always()

+ 

@@ -0,0 +1,37 @@ 

+ parameters:

+   imageName: 'project-azure-builder:latest'

+ 

+ steps:

+ - script: |

+     set -e

+     echo '{ "ipv6": true, "fixed-cidr-v6": "2001:db8::/64" }' > docker-daemon.json

+     sudo mkdir -p /etc/docker

+     sudo cp docker-daemon.json /etc/docker/daemon.json

+     sudo chown root:root /etc/docker/daemon.json

+     sudo systemctl restart docker

+     sudo modprobe ip6_tables

+   displayName: Configure containerization to allow IPv6 network

+ 

+ - script: |

+     set -e

+     sudo modprobe {nfs,nfsd}

+   displayName: Configure NFS to allow NFS server/client within containers

+ 

+ - task: DownloadPipelineArtifact@0

+   displayName: Download prebuilt packages

+   inputs:

+     artifactName: 'packages-$(Build.BuildId)-$(Agent.OS)-$(Agent.OSArchitecture)'

+     targetPath: $(Build.Repository.LocalPath)/dist

+ 

+ - task: DownloadPipelineArtifact@0

+   displayName: Download pre-built container

+   inputs:

+     artifactName: 'image-$(Build.BuildId)-$(Agent.OS)-$(Agent.OSArchitecture)'

+     targetPath: $(Build.Repository.LocalPath)

+ 

+ - script: |

+     set -e

+     docker load --input $(Build.Repository.LocalPath)/project-azure-builder-container.tar.gz

+     docker images

+     docker inspect project-azure-builder:latest

+   displayName: Import pre-built container to the engine

@@ -0,0 +1,127 @@ 

+ steps:

+ - script: |

+     set -e

+     env | sort

+   displayName: Print Host Enviroment

+ 

+ - script: |

+     set -e

+     sudo apt-get update

+     sudo apt-get install -y \

+         parallel \

+         moreutils \

+         rng-tools \

+         systemd-coredump \

+         python3-docker

+     # ubuntu's one is too old: different API

+     python3 -m pip install docker --user

+   displayName: Install Host's tests requirements

+ 

+ - script: |

+     set -e

+     printf "Available entropy: %s\n" $(cat /proc/sys/kernel/random/entropy_avail)

+     sudo service rng-tools start

+     sleep 3

+     printf "Available entropy: %s\n" $(cat /proc/sys/kernel/random/entropy_avail)

+   displayName: Increase entropy level

+ 

+ - script: |

+     set -eu

+     date +'%Y-%m-%d %H:%M:%S' > coredumpctl.time.mark

+     systemd_conf="/etc/systemd/system.conf"

+     sudo sed -i 's/^DumpCore=.*/#&/g' "$systemd_conf"

+     sudo sed -i 's/^DefaultLimitCORE=.*/#&/g' "$systemd_conf"

+     echo -e 'DumpCore=yes\nDefaultLimitCORE=infinity' | \

+         sudo tee -a "$systemd_conf" >/dev/null

+     cat "$systemd_conf"

+     coredump_conf="/etc/systemd/coredump.conf"

+     cat "$coredump_conf"

+     sudo systemctl daemon-reexec

+     # for ns-slapd debugging

+     sudo sysctl -w fs.suid_dumpable=1

+   displayName: Allow coredumps

+ 

+ - template: setup-test-environment.yml

+ 

+ - template: run-test.yml

+ 

+ - script: |

+     set -eux

+     free -m

+     cat /sys/fs/cgroup/memory/memory.memsw.max_usage_in_bytes

+     cat /sys/fs/cgroup/memory/memory.max_usage_in_bytes

+     cat /proc/sys/vm/swappiness

+   condition: succeededOrFailed()

+   displayName: Host's memory statistics

+ 

+ - task: PublishTestResults@2

+   inputs:

+     testResultsFiles: 'ipa_envs/*/$(CI_RUNNER_LOGS_DIR)/nosetests.xml'

+     testRunTitle: $(System.JobIdentifier) results

+   condition: succeededOrFailed()

+ 

+ - script: |

+     set -eu

+     # check the host first, containers cores were dumped here

+     COREDUMPS_SUBDIR="coredumps"

+     COREDUMPS_DIR="${PROJECT_TESTS_ENV_WORKING_DIR}/${COREDUMPS_SUBDIR}"

+     rm -rfv "$COREDUMPS_DIR" ||:

+     mkdir "$COREDUMPS_DIR"

+     since_time="$(cat coredumpctl.time.mark || echo '-1h')"

+     sudo coredumpctl --no-pager --since="$since_time" list ||:

+ 

+     pids="$(sudo coredumpctl --no-pager --since="$since_time" -F COREDUMP_PID || echo '')"

+     # nothing to dump

+     [ -z "$pids" ] && exit 0

+ 

+     # continue in container

+     HOST_JOURNAL="/var/log/host_journal"

+     CONTAINER_COREDUMP="dump_cores"

+     docker create --privileged \

+         -v "$(realpath coredumpctl.time.mark)":/coredumpctl.time.mark:ro \

+         -v /var/lib/systemd/coredump:/var/lib/systemd/coredump:ro \

+         -v /var/log/journal:"$HOST_JOURNAL":ro \

+         -v "${BUILD_REPOSITORY_LOCALPATH}":"${PROJECT_TESTS_REPO_PATH}" \

+         --name "$CONTAINER_COREDUMP" project-azure-builder

+     docker start "$CONTAINER_COREDUMP"

+ 

+     docker exec -t \

+         --env PROJECT_TESTS_REPO_PATH="${PROJECT_TESTS_REPO_PATH}" \

+         --env PROJECT_TESTS_SCRIPTS="${PROJECT_TESTS_REPO_PATH}/${PROJECT_TESTS_SCRIPTS}" \

+         --env IPA_PLATFORM="${IPA_PLATFORM}" \

+         "$CONTAINER_COREDUMP" \

+         /bin/bash --noprofile --norc -eux \

+             "${PROJECT_TESTS_REPO_PATH}/${PROJECT_TESTS_SCRIPTS}/install-debuginfo.sh"

+ 

+     docker exec -t \

+         --env PROJECT_TESTS_REPO_PATH="${PROJECT_TESTS_REPO_PATH}" \

+         --env COREDUMPS_SUBDIR="$COREDUMPS_SUBDIR" \

+         --env HOST_JOURNAL="$HOST_JOURNAL" \

+         "$CONTAINER_COREDUMP" \

+         /bin/bash --noprofile --norc -eux \

+             "${PROJECT_TESTS_REPO_PATH}/${PROJECT_TESTS_SCRIPTS}/dump_cores.sh"

+     # there should be no crashes

+     exit 1

+   condition: succeededOrFailed()

+   displayName: Check for coredumps

+ 

+ - script: |

+     set -e

+ 

+     artifacts_ignore_path="${PROJECT_TESTS_ENV_WORKING_DIR}/.artifactignore"

+     cat > "$artifacts_ignore_path" <<EOF

+     **/*

+     !coredumps/*.core.tar.gz

+     !coredumps/*.stacktrace.tar.gz

+     !*/logs/**

+     !*/*.yml

+     !*/*.yaml

+     !*/*.log

+     EOF

+     cat "$artifacts_ignore_path"

+   condition: succeededOrFailed()

+   displayName: Generating artifactignore file

+ 

+ - template: save-test-artifacts.yml

+   parameters:

+     logsArtifact: logs-$(System.JobIdentifier)-$(Build.BuildId)-$(System.StageAttempt)-$(System.PhaseAttempt)-$(System.JobPositionInPhase)-$(Agent.OS)-$(Agent.OSArchitecture)

@@ -0,0 +1,14 @@ 

+ variables:

+   CI_RUNNER_LOGS_DIR: logs

+   builddir: /__w/1/s

+   # Provision script: setup_containers.py requires Python3.6+

+   # Ubuntu-16.04 has Python 3.5.2 on board

+   # https://github.com/actions/virtual-environments/blob/master/images/linux/Ubuntu1604-REA    DME.md

+   # Ubuntu-18.04 - 3.6.9

+   # https://github.com/actions/virtual-environments/blob/master/images/linux/Ubuntu1804-REA    DME.md

+   VM_IMAGE: 'Ubuntu-18.04'

+   MAX_CONTAINER_ENVS: 5

+   PROJECT_TESTS_ENV_WORKING_DIR: $(Build.Repository.LocalPath)/ipa_envs

+   PROJECT_TESTS_SCRIPTS: 'tests/azure/scripts'

+   PROJECT_TESTS_DOCKERFILES: $(Build.Repository.LocalPath)/tests/azure/Dockerfiles

+   PROJECT_TESTS_REPO_PATH: '/project'

@@ -0,0 +1,13 @@ 

+ variables:

+   IPA_PLATFORM: fedora

+   # the Docker public image to build packages (rpms)

+   DOCKER_BUILD_IMAGE: 'fedora:32'

+ 

+   # the Dockerfile to build Docker image for running IPA tests

+   DOCKER_DOCKERFILE: ${{ format('Dockerfile.build.{0}', variables.IPA_PLATFORM) }}

+ 

+   # the template to install IPA's buildtime dependencies

+   PREPARE_BUILD_TEMPLATE: ${{ format('prepare-build-{0}.yml', variables.IPA_PLATFORM) }}

+ 

+   # the template to build IPA packages (rpms)

+   BUILD_TEMPLATE: ${{ format('build-{0}.yml', variables.IPA_PLATFORM) }}

@@ -0,0 +1,16 @@ 

+ variables:

+   IPA_PLATFORM: fedora

+   # the Docker public image to build packages (rpms)

+   #

+   # replace with 'fedora:rawhide' on fix:

+   # https://bugzilla.redhat.com/show_bug.cgi?id=1869612

+   DOCKER_BUILD_IMAGE: 'registry.fedoraproject.org/fedora:rawhide'

+ 

+   # the Dockerfile to build Docker image for running IPA tests

+   DOCKER_DOCKERFILE: 'Dockerfile.build.rawhide'

+ 

+   # the template to install IPA's buildtime dependencies

+   PREPARE_BUILD_TEMPLATE: ${{ format('prepare-build-{0}.yml', variables.IPA_PLATFORM) }}

+ 

+   # the template to build IPA packages (rpms)

+   BUILD_TEMPLATE: ${{ format('build-{0}.yml', variables.IPA_PLATFORM) }}

@@ -0,0 +1,13 @@ 

+ variables:

+   IPA_PLATFORM: fedora

+   # the Docker public image to build packages (rpms)

+   DOCKER_BUILD_IMAGE: 'fedora:32'

+ 

+   # the Dockerfile to build Docker image for running IPA tests

+   DOCKER_DOCKERFILE: ${{ format('Dockerfile.build.{0}', variables.IPA_PLATFORM) }}

+ 

+   # the template to install IPA's buildtime dependencies

+   PREPARE_BUILD_TEMPLATE: ${{ format('prepare-build-{0}.yml', variables.IPA_PLATFORM) }}

+ 

+   # the template to build IPA packages (rpms)

+   BUILD_TEMPLATE: ${{ format('build-{0}.yml', variables.IPA_PLATFORM) }}

Signed-off-by: Alexander Bokovoy abokovoy@redhat.com

rebased onto 36f0773

3 years ago

rebased onto 65cf098

3 years ago

rebased onto e7faf4f

3 years ago

rebased onto 3231466

3 years ago

rebased onto 6565d07

3 years ago

rebased onto 1636afb

3 years ago

rebased onto f492206

3 years ago

rebased onto cf2b73b

3 years ago

rebased onto 858dc25

3 years ago

rebased onto c213754

3 years ago

rebased onto 1538ce5

3 years ago

rebased onto b64dbe5

3 years ago

Pull-Request has been merged by abbra

3 years ago
Metadata
Changes Summary 32
+27
file added
tests/azure/Dockerfiles/Dockerfile.build.fedora
+29
file added
tests/azure/Dockerfiles/Dockerfile.build.rawhide
+58
file added
tests/azure/Dockerfiles/docker-compose.yml
+787
file added
tests/azure/Dockerfiles/seccomp.json
+4
file added
tests/azure/azure-pipelines-rawhide.yml
+67 -57
file changed
tests/azure/azure-pipelines.yml
+12
file added
tests/azure/azure_definitions/base.yml
+23
file added
tests/azure/azure_definitions/gating.yml
+114
file added
tests/azure/scripts/azure-run-base-tests.sh
+38
file added
tests/azure/scripts/azure-run-integration-tests.sh
+111
file added
tests/azure/scripts/azure-run-tests.sh
+54
file added
tests/azure/scripts/dump_cores.sh
+49
file added
tests/azure/scripts/generate-matrix.py
+20
file added
tests/azure/scripts/install-debuginfo-fedora.sh
+8
file added
tests/azure/scripts/install-debuginfo.sh
+324
file added
tests/azure/scripts/setup_containers.py
+7
file added
tests/azure/scripts/variables-fedora.sh
+14
file added
tests/azure/scripts/variables.sh
+28
file added
tests/azure/templates/build-fedora.yml
+19
file added
tests/azure/templates/generate-job-variables.yml
+9
file added
tests/azure/templates/generate-matrix.yml
+28
file added
tests/azure/templates/ipa-test-config-template.yaml
+24
file added
tests/azure/templates/prepare-build-fedora.yml
+11
file added
tests/azure/templates/publish-build.yml
+58
file added
tests/azure/templates/run-test.yml
+12
file added
tests/azure/templates/save-test-artifacts.yml
+37
file added
tests/azure/templates/setup-test-environment.yml
+127
file added
tests/azure/templates/test-jobs.yml
+14
file added
tests/azure/templates/variables-common.yml
+13
file added
tests/azure/templates/variables-fedora.yml
+16
file added
tests/azure/templates/variables-rawhide.yml
+13
file added
tests/azure/templates/variables.yml