| |
@@ -7,7 +7,7 @@
|
| |
import shutil
|
| |
import shlex
|
| |
import signal
|
| |
- import multiprocessing
|
| |
+ import StringIO
|
| |
import random
|
| |
import socket
|
| |
import subprocess
|
| |
@@ -113,6 +113,23 @@
|
| |
"_meta": {"hostvars": variables}}
|
| |
|
| |
|
| |
+ def get_qemu_smp_arg():
|
| |
+ """Determine the number of CPUs that should be visible in the guest.
|
| |
+ See e.g. https://www.redhat.com/archives/libvirt-users/2017-June/msg00025.html
|
| |
+ We want to match the number of host physical cores.
|
| |
+ """
|
| |
+ # We may be run in a cgroup with fewer cores available than physical.
|
| |
+ available_cpu = int(subprocess.check_output(['nproc']).strip())
|
| |
+ # https://stackoverflow.com/questions/6481005/how-to-obtain-the-number-of-cpus-cores-in-linux-from-the-command-line
|
| |
+ core_sockets = set()
|
| |
+ for line in StringIO.StringIO(subprocess.check_output(['lscpu', '-b', '-p=Core,Socket'])):
|
| |
+ if line.startswith('#'):
|
| |
+ continue
|
| |
+ core_sockets.add(line.strip())
|
| |
+ sockets = min(available_cpu, len(core_sockets))
|
| |
+ return '{},sockets={},cores=1,threads=1'.format(sockets, sockets)
|
| |
+
|
| |
+
|
| |
def write_debug_inventory(file_, host_vars):
|
| |
raw_inventory = {"all": {"children": {"localhost": {"hosts": host_vars},
|
| |
"subjects": {"hosts": host_vars},
|
| |
@@ -149,7 +166,7 @@
|
| |
# Use -cpu host and -smp by default.
|
| |
# virtio-rng-pci: https://wiki.qemu.org/Features/VirtIORNG
|
| |
qemu_cmd = ["/usr/bin/qemu-system-x86_64",
|
| |
- "-cpu", "host", "-smp", "{}".format(multiprocessing.cpu_count()),
|
| |
+ "-cpu", "host", "-smp", get_qemu_smp_arg(),
|
| |
"-m", "1024", image, "-enable-kvm", "-snapshot", "-cdrom", cloudinit,
|
| |
"-net", "nic,model=virtio", "-net", "user,hostfwd=tcp:127.0.0.3:{0}-:22".format(port),
|
| |
"-device", "virtio-rng-pci", "-rtc", "base=utc",
|
| |
I was investigating some rpm-ostree CI performance issues in
https://github.com/projectatomic/rpm-ostree/pull/1362
and I discovered that my previous change to use
-smp $cpus
wassuboptimal. See the linked libvirt discussion; basically we
really want to use physical cores, otherwise the guest kernel
is going to make bad scheduling decisions.
We also take care to also minimize that set by the number of
CPUs available to us; we may be running in a container with
a cgroup for example.