| |
@@ -6,8 +6,8 @@
|
| |
use Exporter;
|
| |
|
| |
use lockapi;
|
| |
- use testapi;
|
| |
- our @EXPORT = qw/run_with_error_check type_safely type_very_safely desktop_vt boot_to_login_screen console_login console_switch_layout desktop_switch_layout console_loadkeys_us do_bootloader boot_decrypt check_release menu_launch_type repo_setup setup_workaround_repo disable_updates_repos mount_update_image umount_update_image cleanup_workaround_repo console_initial_setup handle_welcome_screen gnome_initial_setup anaconda_create_user check_desktop download_modularity_tests quit_firefox advisory_get_installed_packages advisory_check_nonmatching_packages start_with_launcher quit_with_shortcut disable_firefox_studies select_rescue_mode copy_devcdrom_as_isofile get_release_number check_left_bar check_top_bar check_prerelease check_version spell_version_number _assert_and_click is_branched rec_log repos_mirrorlist register_application get_registered_applications solidify_wallpaper check_and_install_git download_testdata make_serial_writable set_update_notification_timestamp/;
|
| |
+ use testapi qw(is_serial_terminal :DEFAULT);
|
| |
+ our @EXPORT = qw/run_with_error_check type_safely type_very_safely desktop_vt boot_to_login_screen console_login console_switch_layout desktop_switch_layout console_loadkeys_us do_bootloader boot_decrypt check_release menu_launch_type setup_repos repo_setup get_workarounds disable_updates_repos cleanup_workaround_repo console_initial_setup handle_welcome_screen gnome_initial_setup anaconda_create_user check_desktop download_modularity_tests quit_firefox advisory_get_installed_packages advisory_check_nonmatching_packages start_with_launcher quit_with_shortcut disable_firefox_studies select_rescue_mode copy_devcdrom_as_isofile get_release_number check_left_bar check_top_bar check_prerelease check_version spell_version_number _assert_and_click is_branched rec_log repos_mirrorlist register_application get_registered_applications solidify_wallpaper check_and_install_git download_testdata make_serial_writable set_update_notification_timestamp/;
|
| |
|
| |
|
| |
# We introduce this global variable to hold the list of applications that have
|
| |
@@ -163,7 +163,7 @@
|
| |
# and let us simplify the process.
|
| |
# We will check if we are logged in, and if so, we will log out to
|
| |
# enable a new proper login based on the user variable.
|
| |
- if (get_var("SERIAL_CONSOLE")) {
|
| |
+ if (get_var("SERIAL_CONSOLE") || is_serial_terminal()) {
|
| |
# Check for the usual prompt.
|
| |
if (wait_serial("~\][#\$]", timeout => 5, quiet => 1)) {
|
| |
type_string "logout\n";
|
| |
@@ -173,7 +173,7 @@
|
| |
# Do the new login.
|
| |
type_string $args{user};
|
| |
type_string "\n";
|
| |
- sleep 2;
|
| |
+ wait_serial("Password:", timeout => 2, quiet => 1);
|
| |
type_string $args{password};
|
| |
type_string "\n";
|
| |
# Let's perform a simple login test. This is the same as
|
| |
@@ -285,7 +285,7 @@
|
| |
while ($xout =~ /tty(\d)/g) {
|
| |
$tty = $1; # most recent match is probably best
|
| |
}
|
| |
- send_key "ctrl-alt-f${tty}";
|
| |
+ select_console "tty${tty}-console";
|
| |
# work around https://gitlab.gnome.org/GNOME/gnome-software/issues/582
|
| |
# if it happens. As of 2019-05, seeing something similar on KDE too
|
| |
my $desktop = get_var('DESKTOP');
|
| |
@@ -302,10 +302,10 @@
|
| |
click_lastmatch if ($desktop eq 'kde');
|
| |
if (match_has_tag "auth_required_fprint") {
|
| |
my $user = get_var("USER_LOGIN", "test");
|
| |
- send_key "ctrl-alt-f6";
|
| |
+ select_console "tty6-console";
|
| |
console_login;
|
| |
assert_script_run "echo SCAN ${user}-finger-1 | socat STDIN UNIX-CONNECT:/run/fprintd-virt";
|
| |
- send_key "ctrl-alt-f${tty}";
|
| |
+ select_console "tty${tty}-console";
|
| |
}
|
| |
elsif (match_has_tag "auth_required_locked") {
|
| |
# When console operation takes a long time, the screen locks
|
| |
@@ -463,58 +463,31 @@
|
| |
assert_script_run "sed -i -e 's,metalink,mirrorlist,g' ${files}";
|
| |
}
|
| |
|
| |
- sub mount_update_image {
|
| |
- # mount the update and workarounds images (whichever are attached)
|
| |
- if (get_var("ISO_2") && script_run "grep updateiso /proc/mounts") {
|
| |
- script_run "mkdir -p /mnt/updateiso";
|
| |
- my $devnode = "/dev/sr0";
|
| |
- $devnode = "/dev/sr1" if (get_var("ISO") || get_var("ISO_1"));
|
| |
- assert_script_run 'echo "' . $devnode . ' /mnt/updateiso iso9660 loop 0 0" >> /etc/fstab';
|
| |
- assert_script_run "mount /mnt/updateiso";
|
| |
- }
|
| |
- if (get_var("ISO_3") && script_run "grep workaroundsiso /proc/mounts") {
|
| |
- script_run "mkdir -p /mnt/workaroundsiso";
|
| |
- my $devnum = 0;
|
| |
- $devnum++ if (get_var("ISO") || get_var("ISO_1"));
|
| |
- $devnum++ if (get_var("ISO_2"));
|
| |
- my $devnode = "/dev/sr${devnum}";
|
| |
- assert_script_run 'echo "' . $devnode . ' /mnt/workaroundsiso iso9660 loop 0 0" >> /etc/fstab';
|
| |
- assert_script_run "mount /mnt/workaroundsiso";
|
| |
+ sub get_setup_repos_script {
|
| |
+ # ensure the 'setup_repos.py' downloader script is present
|
| |
+ if (script_run "ls /usr/local/bin/setup_repos.py") {
|
| |
+ assert_script_run 'curl --retry-delay 10 --max-time 30 --retry 5 -o /usr/local/bin/setup_repos.py https://pagure.io/fedora-qa/os-autoinst-distri-fedora/raw/concdl/f/setup_repos.py', timeout => 180;
|
| |
+ assert_script_run 'chmod ugo+x /usr/local/bin/setup_repos.py';
|
| |
}
|
| |
}
|
| |
|
| |
- sub umount_update_image {
|
| |
- # inverse of mount_update_image
|
| |
- assert_script_run "sed -i '/updateiso/d' /etc/fstab" if (get_var("ISO_2"));
|
| |
- assert_script_run "sed -i '/workaroundsiso/d' /etc/fstab" if (get_var("ISO_3"));
|
| |
- assert_script_run "umount /mnt/updateiso" unless (!get_var("ISO_2") || script_run "grep updateiso /proc/mounts");
|
| |
- assert_script_run "umount /mnt/workaroundsiso" unless (!get_var("ISO_3") || script_run "grep workaroundsiso /proc/mounts");
|
| |
+ sub get_workarounds {
|
| |
+ my $version = shift || get_var("VERSION");
|
| |
+ my %workarounds = (
|
| |
+ "38" => [],
|
| |
+ "39" => [],
|
| |
+ "40" => [],
|
| |
+ );
|
| |
+ my $advortasks = $workarounds{$version};
|
| |
+ return @$advortasks;
|
| |
}
|
| |
|
| |
sub cleanup_workaround_repo {
|
| |
# clean up the workaround repo (see next).
|
| |
+ script_run "rm -rf /mnt/workarounds_repo";
|
| |
script_run "rm -f /etc/yum.repos.d/workarounds.repo";
|
| |
}
|
| |
|
| |
- sub setup_workaround_repo {
|
| |
- # we periodically need to pull an update from updates-testing in
|
| |
- # to fix some bug or other. so, here's an organized way to do it.
|
| |
- # the code that builds the image, and the workaround lists, are
|
| |
- # in fedora_openqa schedule.py. If there are no workarounds, we
|
| |
- # don't get an ISO
|
| |
- return unless (get_var("ISO_3"));
|
| |
- my $version = shift || get_var("VERSION");
|
| |
- cleanup_workaround_repo;
|
| |
- mount_update_image;
|
| |
- # write a repo config file, unless this is the support_server test
|
| |
- # and it is running on a different release than the update is for
|
| |
- # (in this case we need the repo to exist but do not want to use
|
| |
- # it on the actual support_server system)
|
| |
- unless (get_var("TEST") eq "support_server" && $version ne get_var("CURRREL")) {
|
| |
- assert_script_run 'printf "[workarounds]\nname=Workarounds repo\nbaseurl=file:///mnt/workaroundsiso/workarounds_repo\nenabled=1\nmetadata_expire=1\ngpgcheck=0" > /etc/yum.repos.d/workarounds.repo';
|
| |
- }
|
| |
- }
|
| |
-
|
| |
sub disable_updates_repos {
|
| |
# disable updates-testing, or both updates-testing and updates.
|
| |
# factors out similar code in a few different places.
|
| |
@@ -556,13 +529,105 @@
|
| |
# }
|
| |
}
|
| |
|
| |
+ sub _prepare_update_mount {
|
| |
+ # create and mount the filesystem where we will store update/task packages
|
| |
+ # this is separate from setup_repos as it has to happen before we
|
| |
+ # enter the toolbox container on the CANNED workflow
|
| |
+ assert_script_run "mkdir -p /mnt/update_repo";
|
| |
+ # if NUMDISKS is above 1, assume we want to put the update repo on
|
| |
+ # the second disk (to avoid huge updates exhausting space on the main
|
| |
+ # disk)
|
| |
+ if (get_var("NUMDISKS") > 1) {
|
| |
+ # I think the disk will always be vdb. This creates a single large
|
| |
+ # partition.
|
| |
+ assert_script_run "echo 'type=83' | sfdisk /dev/vdb";
|
| |
+ assert_script_run "mkfs.ext4 /dev/vdb1";
|
| |
+ assert_script_run "echo '/dev/vdb1 /mnt/update_repo ext4 defaults 1 2' >> /etc/fstab";
|
| |
+ assert_script_run "mount /mnt/update_repo";
|
| |
+ }
|
| |
+ assert_script_run "cd /mnt/update_repo";
|
| |
+ }
|
| |
+
|
| |
+ sub setup_repos {
|
| |
+ # setup workarounds (if necessary) and updates or tag repositories,
|
| |
+ # using the setup_repos.py script. It's necessary to set up repos
|
| |
+ # (rather than just downloading the RPMs and doing a one-time update)
|
| |
+ # for various reasons: to ensure later package operations use the
|
| |
+ # update packages, and for use when creating deliverables in the
|
| |
+ # tests that do that. Has a 'workarounds only' mode for
|
| |
+ # upgrade_preinstall to use (in case we need workarounds for the
|
| |
+ # pre-upgrade environment)
|
| |
+ my %args = (
|
| |
+ # workarounds only
|
| |
+ waonly => 0,
|
| |
+ # release to get workarounds for
|
| |
+ version => get_var("VERSION"),
|
| |
+ # whether to write repo configs
|
| |
+ configs => 1,
|
| |
+ @_
|
| |
+ );
|
| |
+ my $arch = get_var("ARCH");
|
| |
+ my $tag = get_var("TAG");
|
| |
+ # write the tag repo config if appropriate
|
| |
+ assert_script_run 'printf "[openqa-testtag]\nname=openqa-testtag\nbaseurl=https://kojipkgs.fedoraproject.org/repos/' . "$tag/latest/$arch" . '/\ncost=2000\nenabled=1\ngpgcheck=0\n" > /etc/yum.repos.d/openqa-testtag.repo' if ($tag && !$args{waonly});
|
| |
+ my @was = get_workarounds($args{version});
|
| |
+ # bail if there are no workarounds:
|
| |
+ # * if we're in workarounds-only mode
|
| |
+ # * if we're testing a side tag (so no packages to dl)
|
| |
+ if ($args{waonly} || $tag) {
|
| |
+ return unless (@was);
|
| |
+ }
|
| |
+ # if we got this far, we're definitely downloading *something*
|
| |
+ script_run "dnf -y install createrepo_c bodhi-client koji", 300;
|
| |
+ get_setup_repos_script;
|
| |
+ my $wastring = join(',', @was);
|
| |
+ my $udstring;
|
| |
+ # work out the list of update/task NVRs to test
|
| |
+ if (get_var("ADVISORY_NVRS") || get_var("ADVISORY_NVRS_1")) {
|
| |
+ # regular update case
|
| |
+ # old style single ADVISORY_NVRS var
|
| |
+ my @nvrs = split(/ /, get_var("ADVISORY_NVRS"));
|
| |
+ unless (@nvrs) {
|
| |
+ # new style chunked ADVISORY_NVRS_N vars
|
| |
+ my $count = 1;
|
| |
+ while ($count) {
|
| |
+ if (get_var("ADVISORY_NVRS_$count")) {
|
| |
+ push @nvrs, split(/ /, get_var("ADVISORY_NVRS_$count"));
|
| |
+ $count++;
|
| |
+ }
|
| |
+ else {
|
| |
+ $count = 0;
|
| |
+ }
|
| |
+ }
|
| |
+ }
|
| |
+ $udstring = join(',', @nvrs);
|
| |
+ }
|
| |
+ elsif (get_var("KOJITASK")) {
|
| |
+ # Koji task case (KOJITASK will be set). If multiple tasks,
|
| |
+ # they're concatenated with underscores, switch to commas
|
| |
+ $udstring =~ s/_/,/;
|
| |
+ }
|
| |
+ else {
|
| |
+ die "Neither ADVISORY_NVRS nor KOJITASK set! Don't know what to do";
|
| |
+ }
|
| |
+ my $cmd = "/usr/local/bin/setup_repos.py";
|
| |
+ # don't download updates if we're in workarounds-only mode or testing a tag
|
| |
+ $cmd .= " -u $udstring" unless ($args{waonly} || $tag);
|
| |
+ $cmd .= " -w $wastring" if (@was);
|
| |
+ # write repo config files if asked
|
| |
+ $cmd .= " -c" if ($args{configs});
|
| |
+ $cmd .= " $arch";
|
| |
+ assert_script_run $cmd, 600;
|
| |
+ unless ($args{waonly} || $tag) {
|
| |
+ upload_logs "/mnt/updatepkgnames.txt";
|
| |
+ upload_logs "/mnt/updatepkgs.txt";
|
| |
+ }
|
| |
+ }
|
| |
+
|
| |
sub _repo_setup_updates {
|
| |
# Appropriate repo setup steps for testing a Bodhi update
|
| |
- # sanity check
|
| |
- die "_repo_setup_updates called, but ISO_2 is not attached!" unless (get_var("ISO_2") || get_var("TAG"));
|
| |
- mount_update_image if (get_var("ISO_2"));
|
| |
# Check if we already ran, bail if so
|
| |
- return unless script_run "test -f /root/.oqarsurun";
|
| |
+ return unless script_run "test -f /mnt/updatepkgs.txt";
|
| |
my $version = get_var("VERSION");
|
| |
my $currrel = get_var("CURRREL", "0");
|
| |
my $arch = get_var("ARCH");
|
| |
@@ -571,65 +636,77 @@
|
| |
# unless (script_run 'pushd /etc/yum.repos.d && tar czvf yumreposd.tar.gz * && popd') {
|
| |
# upload_logs "/etc/yum.repos.d/yumreposd.tar.gz";
|
| |
# }
|
| |
+ # if no current console is registered, assume we're on tty1
|
| |
+ my $currcon = current_console || "tty1-console";
|
| |
+ # do all this setup from a serial console for speed (especially when
|
| |
+ # downloading large updates)
|
| |
+ # the console we register as 'virtio-console' is the first virtio
|
| |
+ # serial console, 'virtio_console' on the qemu command line.
|
| |
+ # on most platforms, this console is /dev/hvc0 (and the default
|
| |
+ # qemu serial console, which for openQA is backed by a ringbuf
|
| |
+ # device and logged as serial0.txt, is /dev/ttyS0). however, on
|
| |
+ # Power, the default serial console is /dev/hvc0 and the first
|
| |
+ # virtio serial console is /dev/hvc1.
|
| |
+ # it seems we get a getty on ttyS0 and hvc0 by default, but we
|
| |
+ # don't get one on hvc1. so on Power, start a tty on hvc1
|
| |
+ assert_script_run 'systemctl start serial-getty@hvc1.service' if (get_var("OFW"));
|
| |
+ script_run "echo 'Package download and repo creation happening on serial console...'";
|
| |
+ select_console("virtio-console");
|
| |
+ console_login();
|
| |
+ # prepare the directory the packages will be downloaded to, unless we're
|
| |
+ # testing a side tag
|
| |
+ _prepare_update_mount() unless ($tag);
|
| |
|
| |
- # Set up an additional repo containing the update or task packages. We do
|
| |
- # this rather than simply running a one-time update because it may be the
|
| |
- # case that a package from the update isn't installed *now* but will be
|
| |
- # installed by one of the tests; by setting up a repo containing the
|
| |
- # update and enabling it here, we ensure all later 'dnf install' calls
|
| |
- # will get the packages from the update.
|
| |
# on CANNED, we need to enter the toolbox at this point
|
| |
if (get_var("CANNED")) {
|
| |
type_string "toolbox -y enter\n";
|
| |
- # look for the little purple dot
|
| |
- assert_screen "console_in_toolbox", 180;
|
| |
+ # this is simply to wait till we're in the toolbox
|
| |
+ assert_script_run "true", 180;
|
| |
}
|
| |
+
|
| |
# use mirrorlist not metalink in repo configs
|
| |
repos_mirrorlist();
|
| |
# Disable updates-testing so other bad updates don't break us
|
| |
disable_updates_repos(both => 0) if ($version > $currrel);
|
| |
# use the buildroot repo on Rawhide: see e.g.
|
| |
# https://pagure.io/fedora-ci/general/issue/376 for why
|
| |
- if ($version eq get_var("RAWREL") && get_var("TEST") ne "support_server") {
|
| |
- assert_script_run 'printf "[koji-rawhide]\nname=koji-rawhide\nbaseurl=https://kojipkgs.fedoraproject.org/repos/f' . $version . '-build/latest/' . $arch . '/\ncost=2000\nenabled=1\nmetadata_expire=30\ngpgcheck=0\nskip_if_unavailable=1\n" > /etc/yum.repos.d/koji-rawhide.repo';
|
| |
- }
|
| |
- # set up the workaround repo
|
| |
- setup_workaround_repo;
|
| |
- upload_logs "/mnt/updateiso/updatepkgnames.txt" unless ($tag);
|
| |
- upload_logs "/mnt/updateiso/updatepkgs.txt" unless ($tag);
|
| |
- # write a repo config file, unless this is the support_server test
|
| |
- # and it is running on a different release than the update is for
|
| |
- # (in this case we need the repo to exist but do not want to use
|
| |
- # it on the actual support_server system)
|
| |
- unless (get_var("TEST") eq "support_server" && $version ne get_var("CURRREL")) {
|
| |
- if ($tag) {
|
| |
- assert_script_run 'printf "[openqa-testtag]\nname=openqa-testtag\nbaseurl=https://kojipkgs.fedoraproject.org/repos/' . "$tag/latest/$arch" . '/\ncost=2000\nenabled=1\ngpgcheck=0\n" > /etc/yum.repos.d/openqa-testtag.repo';
|
| |
- }
|
| |
- else {
|
| |
- assert_script_run 'printf "[advisory]\nname=Advisory repo\nbaseurl=file:///mnt/updateiso/update_repo\nenabled=1\nmetadata_expire=3600\ngpgcheck=0" > /etc/yum.repos.d/advisory.repo';
|
| |
- }
|
| |
+ if (get_var("VERSION") eq get_var("RAWREL") && get_var("TEST") ne "support_server") {
|
| |
+ assert_script_run 'printf "[koji-rawhide]\nname=koji-rawhide\nbaseurl=https://kojipkgs.fedoraproject.org/repos/rawhide/latest/' . $arch . '/\ncost=2000\nenabled=1\ngpgcheck=0\n" > /etc/yum.repos.d/koji-rawhide.repo';
|
| |
+ }
|
| |
+ if (get_var("CANNED")) {
|
| |
+ # install and use en_US.UTF-8 locale for consistent sort
|
| |
+ # ordering
|
| |
+ assert_script_run "dnf -y install glibc-langpack-en", 300;
|
| |
+ assert_script_run "export LC_ALL=en_US.UTF-8";
|
| |
+ }
|
| |
+ # set up workarounds and updates repos (if needed)
|
| |
+ if (get_var("TEST") eq "support_server" && $version ne get_var("CURRREL")) {
|
| |
+ # don't write repo configs if this is the support_server test
|
| |
+ # and it is running on a different release than the update is for
|
| |
+ # (in this case we need the repo to exist but do not want to use
|
| |
+ # it on the actual support_server system)
|
| |
+ setup_repos(configs => 0);
|
| |
+ }
|
| |
+ else {
|
| |
+ setup_repos(configs => 1);
|
| |
# run an update now, except for upgrade or install tests,
|
| |
# where the updated packages should have been installed
|
| |
# already and we want to fail if they weren't, or CANNED
|
| |
# tests, there's no point updating the toolbox
|
| |
script_run "dnf -y update", 1200 unless (get_var("UPGRADE") || get_var("INSTALL") || get_var("CANNED"));
|
| |
- # work around update removing 'dnf' command for the dnf5
|
| |
- # revert: https://bodhi.fedoraproject.org/updates/FEDORA-2023-5fd964c1bf#comment-3149533
|
| |
- if (get_var("ADVISORY_OR_TASK") eq "FEDORA-2023-5fd964c1bf") {
|
| |
- script_run "dnf5 -y --best install 'dnf < 5'", 300 unless (get_var("UPGRADE") || get_var("INSTALL") || get_var("CANNED"));
|
| |
- }
|
| |
# on liveinst tests, we'll remove the packages we installed
|
| |
# above (and their deps, which dnf will include automatically),
|
| |
# just in case they're in the update under test; otherwise we
|
| |
# get a bogus failure for the package not being updated
|
| |
- script_run "dnf -y remove bodhi-client createrepo koji", 600 if (get_var("INSTALL") && !get_var("CANNED"));
|
| |
+ script_run "dnf -y remove bodhi-client createrepo_c koji", 600 if (get_var("INSTALL") && !get_var("CANNED"));
|
| |
}
|
| |
# exit the toolbox on CANNED
|
| |
if (get_var("CANNED")) {
|
| |
type_string "exit\n";
|
| |
- wait_still_screen 5;
|
| |
+ wait_serial "# ";
|
| |
}
|
| |
- assert_script_run "touch /root/.oqarsurun";
|
| |
+ # flip back to whatever console we were on before
|
| |
+ select_console $currcon;
|
| |
}
|
| |
|
| |
sub repo_setup {
|
| |
@@ -1009,9 +1086,9 @@
|
| |
my ($whitelist) = @_;
|
| |
# we need python3-yaml for the script to run
|
| |
assert_script_run 'dnf -y install python3-yaml', 180;
|
| |
- assert_script_run 'curl --verbose --retry-delay 10 --max-time 30 --retry 5 -o /root/test.py https://pagure.io/fedora-qa/modularity_testing_scripts/raw/master/f/modular_functions.py', timeout => 180;
|
| |
+ assert_script_run 'curl --retry-delay 10 --max-time 30 --retry 5 -o /root/test.py https://pagure.io/fedora-qa/modularity_testing_scripts/raw/master/f/modular_functions.py', timeout => 180;
|
| |
if ($whitelist eq 'whitelist') {
|
| |
- assert_script_run 'curl --verbose --retry-delay 10 --max-time 30 --retry 5 -o /root/whitelist https://pagure.io/fedora-qa/modularity_testing_scripts/raw/master/f/whitelist', timeout => 180;
|
| |
+ assert_script_run 'curl --retry-delay 10 --max-time 30 --retry 5 -o /root/whitelist https://pagure.io/fedora-qa/modularity_testing_scripts/raw/master/f/whitelist', timeout => 180;
|
| |
}
|
| |
assert_script_run 'chmod 755 /root/test.py';
|
| |
}
|
| |
@@ -1111,21 +1188,18 @@
|
| |
sub advisory_get_installed_packages {
|
| |
# can't do anything useful when testing a side tag
|
| |
return if (get_var("TAG"));
|
| |
- # sanity check
|
| |
- die "advisory_get_installed_packages, but ISO_2 is not attached!" unless (get_var("ISO_2"));
|
| |
- mount_update_image;
|
| |
# bail out if the file doesn't exist: this is in case we get
|
| |
# here in the post-fail hook but we failed before creating it
|
| |
- return if script_run "test -f /mnt/updateiso/updatepkgs.txt";
|
| |
+ return if script_run "test -f /mnt/updatepkgs.txt";
|
| |
assert_script_run 'rpm -qa --qf "%{SOURCERPM} %{NAME} %{EPOCHNUM} %{VERSION} %{RELEASE}\n" | sort -u > /tmp/allpkgs.txt', timeout => 90;
|
| |
# this finds lines which appear in both files
|
| |
# http://www.unix.com/unix-for-dummies-questions-and-answers/34549-find-matching-lines-between-2-files.html
|
| |
- if (script_run 'comm -12 /tmp/allpkgs.txt /mnt/updateiso/updatepkgs.txt > /mnt/testedpkgs.txt') {
|
| |
+ if (script_run 'comm -12 /tmp/allpkgs.txt /mnt/updatepkgs.txt > /mnt/testedpkgs.txt') {
|
| |
# occasionally, for some reason, it's unhappy about sorting;
|
| |
# we shouldn't fail the test in this case, just upload the
|
| |
# files so we can see why...
|
| |
upload_logs "/tmp/allpkgs.txt", failok => 1;
|
| |
- upload_logs "/mnt/updateiso/updatepkgs.txt", failok => 1;
|
| |
+ upload_logs "/mnt/updatepkgs.txt", failok => 1;
|
| |
}
|
| |
# we'll try and upload the output even if comm 'failed', as it
|
| |
# does in fact still write it in some cases
|
| |
@@ -1144,19 +1218,16 @@
|
| |
);
|
| |
# can't do anything useful when testing a side tag
|
| |
return if (get_var("TAG"));
|
| |
- # sanity check
|
| |
- die "advisory_check_nonmatching_packages called, but ISO_2 is not attached!" unless (get_var("ISO_2"));
|
| |
- mount_update_image;
|
| |
# bail out if the file doesn't exist: this is in case we get
|
| |
# here in the post-fail hook but we failed before creating it
|
| |
- return if script_run "test -f /mnt/updateiso/updatepkgnames.txt";
|
| |
+ return if script_run "test -f /mnt/updatepkgnames.txt";
|
| |
# if this fails in advisory_post, we don't want to do it *again*
|
| |
# unnecessarily in post_fail_hook
|
| |
return if (get_var("_ACNMP_DONE"));
|
| |
script_run 'touch /tmp/installedupdatepkgs.txt';
|
| |
# this creates /tmp/installedupdatepkgs.txt as a sorted list of installed
|
| |
# packages with the same name as packages from the update, in the same form
|
| |
- # as /mnt/updateiso/updatepkgs.txt. The '--last | head -1' tries to handle the
|
| |
+ # as /mnt/updatepkgs.txt. The '--last | head -1' tries to handle the
|
| |
# problem of installonly packages like the kernel, where we wind up with
|
| |
# *multiple* versions installed after the update; the first line of output
|
| |
# for any given package with --last is the most recent version, i.e. the
|
| |
@@ -1167,15 +1238,15 @@
|
| |
# (we need four to reach bash, and half of them get eaten by perl or
|
| |
# something along the way). Yes, it only works with *single* quotes. Yes,
|
| |
# I hate escaping
|
| |
- script_run 'for pkg in $(cat /mnt/updateiso/updatepkgnames.txt); do rpm -q $pkg && rpm -q $pkg --last | head -1 | cut -d" " -f1 | sed -e \'s,\^,\\\\\\\\^,g\' | xargs rpm -q --qf "%{SOURCERPM} %{NAME} %{EPOCHNUM} %{VERSION} %{RELEASE}\n" >> /tmp/installedupdatepkgs.txt; done', timeout => 180;
|
| |
+ script_run 'for pkg in $(cat /mnt/updatepkgnames.txt); do rpm -q $pkg && rpm -q $pkg --last | head -1 | cut -d" " -f1 | sed -e \'s,\^,\\\\\\\\^,g\' | xargs rpm -q --qf "%{SOURCERPM} %{NAME} %{EPOCHNUM} %{VERSION} %{RELEASE}\n" >> /tmp/installedupdatepkgs.txt; done', timeout => 180;
|
| |
script_run 'sort -u -o /tmp/installedupdatepkgs.txt /tmp/installedupdatepkgs.txt';
|
| |
# for debugging, may as well always upload these, can't hurt anything
|
| |
upload_logs "/tmp/installedupdatepkgs.txt", failok => 1;
|
| |
- upload_logs "/mnt/updateiso/updatepkgs.txt", failok => 1;
|
| |
+ upload_logs "/mnt/updatepkgs.txt", failok => 1;
|
| |
# download the check script and run it
|
| |
- assert_script_run 'curl --verbose --retry-delay 10 --max-time 30 --retry 5 -o updvercheck.py https://pagure.io/fedora-qa/os-autoinst-distri-fedora/raw/main/f/updvercheck.py', timeout => 180;
|
| |
+ assert_script_run 'curl --retry-delay 10 --max-time 30 --retry 5 -o updvercheck.py https://pagure.io/fedora-qa/os-autoinst-distri-fedora/raw/main/f/updvercheck.py', timeout => 180;
|
| |
my $advisory = get_var("ADVISORY");
|
| |
- my $cmd = 'python3 ./updvercheck.py /mnt/updateiso/updatepkgs.txt /tmp/installedupdatepkgs.txt';
|
| |
+ my $cmd = 'python3 ./updvercheck.py /mnt/updatepkgs.txt /tmp/installedupdatepkgs.txt';
|
| |
$cmd .= " $advisory" if ($advisory);
|
| |
my $ret = script_run $cmd;
|
| |
# 2 is warnings only, 3 is problems, 1 means the script died in
|
| |
@@ -1560,7 +1631,7 @@
|
| |
assert_script_run("mkdir temp");
|
| |
assert_script_run("cd temp");
|
| |
# Download the compressed file with the repository content.
|
| |
- assert_script_run("curl --verbose --retry-delay 10 --max-time 120 --retry 5 -o repository.tar.gz https://pagure.io/fedora-qa/openqa_testdata/blob/thetree/f/repository.tar.gz", timeout => 600);
|
| |
+ assert_script_run("curl --retry-delay 10 --max-time 120 --retry 5 -o repository.tar.gz https://pagure.io/fedora-qa/openqa_testdata/blob/thetree/f/repository.tar.gz", timeout => 600);
|
| |
# Untar it.
|
| |
assert_script_run("tar -zxvf repository.tar.gz");
|
| |
# Copy out the files into the VMs directory structure.
|
| |
Last year, we landed https://pagure.io/fedora-qa/fedora_openqa/c/d4ad4d9426b7ce3a864461078013b24974bc5320?branch=main to have the scheduler download the update/task and workaround packages for update tests, and inject them into the test as ISO images. The intent was to make update tests faster, particularly tests of large multi-package updates.
It did achieve that, but had several drawbacks, including two big ones: restarting tests from the web UI would fail once they were a few hours or days old (because the ISOs got garbage collected), and you could not schedule tests from outside the openQA cluster (it had to be done from a system with the openQA asset NFS share mounted). These are bad enough that I don't think the idea was a good one, in retrospect.
This PR first reverts to more or less the old way of doing things (but adjusted to maintain support for the later-added features of testing multiple tasks at once, and testing side tags) - the tests download the packages in-line, one at a time. Then it replaces that with an approach where the tests use a Python script that downloads the packages in concurrent batches of up to 20 and creates the repo config files, and then it switches to running the entire
_repo_setup_updates
process at a serial console instead of a normal console.This aims to speed the process up significantly. Testing on a large multi-package update - 183 packages - the advisory_update step with the older approach takes 25m 42s. With this newer approach it takes 4m 45s. (The time for the 'scheduler download' approach was 4m 15s, so this gets pretty close to that). On a single-package update I got 6m 25s for the old way and 4m 49s for this way (though the times can vary some as most of the time is spent on dnf operations and those can depend on how loaded down the host is).