summaryrefslogtreecommitdiffstats
path: root/tools/testing
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2026-04-15 17:11:22 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2026-04-15 17:11:22 -0700
commit5ed19574ebf0ba857c8a0d3d80ee409ff9498363 (patch)
tree35b32b28869c996dbda3445cca683dfce3a064f7 /tools/testing
parentb7f84966b6f17626a8129723894dc315a076b391 (diff)
parent81fca7087466bd81fff7100d824b2c788edf7a97 (diff)
downloadlinux-5ed19574ebf0ba857c8a0d3d80ee409ff9498363.tar.gz
linux-5ed19574ebf0ba857c8a0d3d80ee409ff9498363.zip
Merge tag 'ktest-v7.1' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-ktest
Pull ktest updates from Steven Rostedt: - Fix undef warning when WARNINGS_FILE is unset The check_buildlog() references WARNINGS_FILE even when it's not set. Perl triggers a warning in this case. Check if the WARNINGS_FILE is defined before checking if the file it represents exists. - Fix how LOG_FILE is resolved LOG_FILE is expanded immediately after the config file is parsed. If LOG_FILE depends on variables from the tests it will use stale values instead of using the test variables. Have LOG_FILE also resolve test variables. - Treat a undefined self reference variable as empty Variables can recursively include itself for appending. Currently, if the references itself and it is not defined, it leaves the variable in the define: "VAR = ${VAR} foo" keeps the ${VAR} around. Have it removed instead. - Fix clearing of variables per tests If a variable has a defined default, a test can not clear it by assigning the variable to empty. Fix this by clearing the variable for a test when the test config has that variable assigned to nothing. - Fix run_command() to catch stderr in the shell command parsing Switch to Perl list form open to use "sh -c" wrapper to run shell commands to have the log file catch shell parsing errors. - Fix console output during reboot cycle The POWER_CYCLE callback during reboot() can miss output from the next boot making ktest miss the boot string it was waiting for. - Add PRE_KTEST_DIE for PRE_KTEST failures If the command for PRE_KTEST fails, ktest does not fail (this was by design as this command was used to add patches that may or may not apply). Add PRE_KTEST_DIE value to force ktest to fail if PRE_KTEST fails. - Run POST_KTEST hooks on failure and cancellation PRE_KTEST always runs before a ktest test, have POST_KTEST always run after a test even if the test fails or is cancelled to do the teardown of PRE_KTEST. - Add a --dry-run mode Add --dry-run to parse the config, print the results and exit without running any of the tests. - Store failures from the dodie() path as well The STORE_FAILURES saves the logs on failure, but there's failure paths that miss storing. Perform STORE_FAILURES in dodie() to capture these failures too. * tag 'ktest-v7.1' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-ktest: ktest: Store failure logs also in fatal paths ktest: Add a --dry-run mode ktest: Run POST_KTEST hooks on failure and cancellation ktest: Add PRE_KTEST_DIE for PRE_KTEST failures ktest: Stop dropping console output during power-cycle reboot ktest: Run commands through list-form shell open ktest: Honor empty per-test option overrides ktest: Treat undefined self-reference as empty ktest: Resolve LOG_FILE in test option context ktest: Avoid undef warning when WARNINGS_FILE is unset
Diffstat (limited to 'tools/testing')
-rwxr-xr-xtools/testing/ktest/ktest.pl163
-rw-r--r--tools/testing/ktest/sample.conf6
2 files changed, 127 insertions, 42 deletions
diff --git a/tools/testing/ktest/ktest.pl b/tools/testing/ktest/ktest.pl
index 001c4df9f7df..112f9ca2444b 100755
--- a/tools/testing/ktest/ktest.pl
+++ b/tools/testing/ktest/ktest.pl
@@ -85,6 +85,7 @@ my %default = (
);
my $test_log_start = 0;
+my $dry_run = 0;
my $ktest_config = "ktest.conf";
my $version;
@@ -100,7 +101,9 @@ my $test_type;
my $build_type;
my $build_options;
my $final_post_ktest;
+my $post_ktest_done = 0;
my $pre_ktest;
+my $pre_ktest_die;
my $post_ktest;
my $pre_test;
my $pre_test_die;
@@ -283,6 +286,7 @@ my %option_map = (
"BUILD_DIR" => \$builddir,
"TEST_TYPE" => \$test_type,
"PRE_KTEST" => \$pre_ktest,
+ "PRE_KTEST_DIE" => \$pre_ktest_die,
"POST_KTEST" => \$post_ktest,
"PRE_TEST" => \$pre_test,
"PRE_TEST_DIE" => \$pre_test_die,
@@ -584,7 +588,7 @@ sub end_monitor;
sub wait_for_monitor;
sub _logit {
- if (defined($opt{"LOG_FILE"})) {
+ if (defined($opt{"LOG_FILE"}) && defined(fileno(LOG))) {
print LOG @_;
}
}
@@ -910,6 +914,14 @@ sub set_variable {
if (defined($command_tmp_vars{$lvalue})) {
return;
}
+
+ # If a variable is undefined, treat an unescaped self-reference as empty.
+ if (!defined($variable{$lvalue})) {
+ $rvalue =~ s/(?<!\\)\$\{\Q$lvalue\E\}//g;
+ $rvalue =~ s/^\s+//;
+ $rvalue =~ s/\s+$//;
+ }
+
if ($rvalue =~ /^\s*$/) {
delete $variable{$lvalue};
} else {
@@ -1354,6 +1366,9 @@ sub read_config {
print "$option\n";
}
print "Set IGNORE_UNUSED = 1 to have ktest ignore unused variables\n";
+ if ($dry_run) {
+ return;
+ }
if (!read_yn "Do you want to continue?") {
exit -1;
}
@@ -1491,12 +1506,13 @@ sub reboot {
}
if ($powercycle) {
- run_command "$power_cycle";
-
start_monitor;
- # flush out current monitor
- # May contain the reboot success line
- wait_for_monitor 1;
+ if (defined($time)) {
+ # Flush stale console output from the old kernel before power-cycling.
+ wait_for_monitor 1;
+ }
+
+ run_command "$power_cycle";
} else {
# Make sure everything has been written to disk
@@ -1575,6 +1591,24 @@ sub get_test_name() {
return $name;
}
+sub run_post_ktest {
+ my $cmd;
+
+ return if ($post_ktest_done);
+
+ if (defined($final_post_ktest)) {
+ $cmd = $final_post_ktest;
+ } elsif (defined($post_ktest)) {
+ $cmd = $post_ktest;
+ } else {
+ return;
+ }
+
+ my $cp_post_ktest = eval_kernel_version($cmd);
+ run_command $cp_post_ktest;
+ $post_ktest_done = 1;
+}
+
sub dodie {
# avoid recursion
return if ($in_die);
@@ -1601,6 +1635,11 @@ sub dodie {
print " See $opt{LOG_FILE} for more info.\n";
}
+ # Fatal paths bypass fail(), so STORE_FAILURES needs to be handled here.
+ if (defined($store_failures)) {
+ save_logs("fail", $store_failures);
+ }
+
if ($email_on_error) {
my $name = get_test_name;
my $log_file;
@@ -1634,6 +1673,7 @@ sub dodie {
if (defined($post_test)) {
run_command $post_test;
}
+ run_post_ktest;
die @_, "\n";
}
@@ -1913,7 +1953,10 @@ sub run_command {
doprint("$command ... ");
$start_time = time;
- $pid = open(CMD, "$command 2>&1 |") or
+ $pid = open(CMD, "-|",
+ "sh", "-c",
+ 'command=$1; shift; exec 2>&1; eval "$command"',
+ "sh", $command) or
(fail "unable to exec $command" and return 0);
if (defined($opt{"LOG_FILE"})) {
@@ -2508,7 +2551,7 @@ sub check_buildlog {
my $save_no_reboot = $no_reboot;
$no_reboot = 1;
- if (-f $warnings_file) {
+ if (defined($warnings_file) && -f $warnings_file) {
open(IN, $warnings_file) or
dodie "Error opening $warnings_file";
@@ -4183,7 +4226,8 @@ sub __set_test_option {
my $option = "$name\[$i\]";
- if (option_defined($option)) {
+ if (exists($opt{$option})) {
+ return undef if (!option_defined($option));
return $opt{$option};
}
@@ -4191,7 +4235,8 @@ sub __set_test_option {
if ($i >= $test &&
$i < $test + $repeat_tests{$test}) {
$option = "$name\[$test\]";
- if (option_defined($option)) {
+ if (exists($opt{$option})) {
+ return undef if (!option_defined($option));
return $opt{$option};
}
}
@@ -4213,6 +4258,53 @@ sub set_test_option {
return eval_option($name, $option, $i);
}
+sub print_test_preamble {
+ my ($resolved) = @_;
+
+ doprint "\n\nSTARTING AUTOMATED TESTS\n\n";
+
+ for (my $i = 0, my $repeat = 1; $i <= $opt{"NUM_TESTS"}; $i += $repeat) {
+
+ if (!$i) {
+ doprint "DEFAULT OPTIONS:\n";
+ } else {
+ doprint "\nTEST $i OPTIONS";
+ if (defined($repeat_tests{$i})) {
+ $repeat = $repeat_tests{$i};
+ doprint " ITERATE $repeat";
+ }
+ doprint "\n";
+ }
+
+ foreach my $option (sort keys %opt) {
+ my $value;
+
+ if ($option =~ /\[(\d+)\]$/) {
+ next if ($i != $1);
+
+ if ($resolved) {
+ my $name = $option;
+ $name =~ s/\[\d+\]$//;
+ $value = set_test_option($name, $i);
+ } else {
+ $value = $opt{$option};
+ }
+ } else {
+ next if ($i);
+
+ if ($resolved) {
+ $value = set_test_option($option, 0);
+ } else {
+ $value = $opt{$option};
+ }
+ }
+
+ $value = "" if (!defined($value));
+ doprint "$option = $value\n";
+ }
+ }
+}
+
sub find_mailer {
my ($mailer) = @_;
@@ -4298,6 +4390,7 @@ sub cancel_test {
send_email("KTEST: Your [$name] test was cancelled",
"Your test started at $script_start_time was cancelled: sig int");
}
+ run_post_ktest;
die "\nCaught Sig Int, test interrupted: $!\n"
}
@@ -4311,6 +4404,8 @@ ktest.pl version: $VERSION
Sets global BUILD_NOCLEAN to 1
-D TEST_TYPE[2]=build
Sets TEST_TYPE of test 2 to "build"
+ --dry-run
+ Print resolved test options and exit without running tests.
It can also override all temp variables.
-D USE_TEMP_DIR:=1
@@ -4342,6 +4437,9 @@ while ( $#ARGV >= 0 ) {
} else {
$command_vars[$#command_vars + 1] = $val;
}
+ } elsif ( $ARGV[0] eq "--dry-run" ) {
+ $dry_run = 1;
+ shift;
} elsif ( $ARGV[0] eq "-h" ) {
die_usage;
} else {
@@ -4390,8 +4488,13 @@ EOF
}
read_config $ktest_config;
+if ($dry_run) {
+ print_test_preamble 1;
+ exit 0;
+}
+
if (defined($opt{"LOG_FILE"})) {
- $opt{"LOG_FILE"} = eval_option("LOG_FILE", $opt{"LOG_FILE"}, -1);
+ $opt{"LOG_FILE"} = set_test_option("LOG_FILE", 1);
}
# Append any configs entered in manually to the config file.
@@ -4421,31 +4524,7 @@ if (defined($opt{"LOG_FILE"})) {
LOG->autoflush(1);
}
-doprint "\n\nSTARTING AUTOMATED TESTS\n\n";
-
-for (my $i = 0, my $repeat = 1; $i <= $opt{"NUM_TESTS"}; $i += $repeat) {
-
- if (!$i) {
- doprint "DEFAULT OPTIONS:\n";
- } else {
- doprint "\nTEST $i OPTIONS";
- if (defined($repeat_tests{$i})) {
- $repeat = $repeat_tests{$i};
- doprint " ITERATE $repeat";
- }
- doprint "\n";
- }
-
- foreach my $option (sort keys %opt) {
- if ($option =~ /\[(\d+)\]$/) {
- next if ($i != $1);
- } else {
- next if ($i);
- }
-
- doprint "$option = $opt{$option}\n";
- }
-}
+print_test_preamble 0;
$SIG{INT} = qw(cancel_test);
@@ -4492,7 +4571,11 @@ for (my $i = 1; $i <= $opt{"NUM_TESTS"}; $i++) {
if ($i == 1) {
if (defined($pre_ktest)) {
doprint "\n";
- run_command $pre_ktest;
+ my $ret = run_command $pre_ktest;
+ if (!$ret && defined($pre_ktest_die) &&
+ $pre_ktest_die) {
+ dodie "failed to pre_ktest\n";
+ }
}
if ($email_when_started) {
my $name = get_test_name;
@@ -4659,11 +4742,7 @@ for (my $i = 1; $i <= $opt{"NUM_TESTS"}; $i++) {
success $i;
}
-if (defined($final_post_ktest)) {
-
- my $cp_final_post_ktest = eval_kernel_version $final_post_ktest;
- run_command $cp_final_post_ktest;
-}
+run_post_ktest;
if ($opt{"POWEROFF_ON_SUCCESS"}) {
halt;
diff --git a/tools/testing/ktest/sample.conf b/tools/testing/ktest/sample.conf
index 9c4c449a8f3e..b6e439ef511b 100644
--- a/tools/testing/ktest/sample.conf
+++ b/tools/testing/ktest/sample.conf
@@ -494,6 +494,12 @@
#
# default (undefined)
#PRE_KTEST = ${SSH} ~/set_up_test
+#
+# To specify if the test should fail if PRE_KTEST fails,
+# PRE_KTEST_DIE needs to be set to 1. Otherwise the PRE_KTEST
+# result is ignored.
+# (default 0)
+#PRE_KTEST_DIE = 1
# If you want to execute some command after all the tests have
# completed, you can set this option. Note, it can be set as a