aboutsummaryrefslogtreecommitdiffstats
path: root/tools
diff options
context:
space:
mode:
authorSean Christopherson <seanjc@google.com>2025-09-19 14:46:46 -0700
committerSean Christopherson <seanjc@google.com>2025-09-23 08:38:59 -0700
commit1fcd3053aa1a6d4c417450c92ddb960b9edb1bf1 (patch)
tree7de289c4ca11d44fa507cc8d7b14bff5f87c0623 /tools
parentKVM: selftests: Track unavailable_mask for PMU events as 32-bit value (diff)
downloadlinux-1fcd3053aa1a6d4c417450c92ddb960b9edb1bf1.tar.gz
linux-1fcd3053aa1a6d4c417450c92ddb960b9edb1bf1.zip
KVM: selftests: Reduce number of "unavailable PMU events" combos tested
Reduce the number of combinations of unavailable PMU events masks that are testing by the PMU counters test. In reality, testing every possible combination isn't all that interesting, and certainly not worth the tens of seconds (or worse, minutes) of runtime. Fully testing the N^2 space will be especially problematic in the near future, as 5! new arch events are on their way. Use alternating bit patterns (and 0 and -1u) in the hopes that _if_ there is ever a KVM bug, it's not something horribly convoluted that shows up only with a super specific pattern/value. Reported-by: Dapeng Mi <dapeng1.mi@linux.intel.com> Reviewed-by: Dapeng Mi <dapeng1.mi@linux.intel.com> Tested-by: Dapeng Mi <dapeng1.mi@linux.intel.com> Link: https://lore.kernel.org/r/20250919214648.1585683-4-seanjc@google.com Signed-off-by: Sean Christopherson <seanjc@google.com>
Diffstat (limited to 'tools')
-rw-r--r--tools/testing/selftests/kvm/x86/pmu_counters_test.c38
1 files changed, 23 insertions, 15 deletions
diff --git a/tools/testing/selftests/kvm/x86/pmu_counters_test.c b/tools/testing/selftests/kvm/x86/pmu_counters_test.c
index 1ef038c4c73f..c6987a9b65bf 100644
--- a/tools/testing/selftests/kvm/x86/pmu_counters_test.c
+++ b/tools/testing/selftests/kvm/x86/pmu_counters_test.c
@@ -578,6 +578,26 @@ static void test_intel_counters(void)
};
/*
+ * To keep the total runtime reasonable, test only a handful of select,
+ * semi-arbitrary values for the mask of unavailable PMU events. Test
+ * 0 (all events available) and all ones (no events available) as well
+ * as alternating bit sequencues, e.g. to detect if KVM is checking the
+ * wrong bit(s).
+ */
+ const uint32_t unavailable_masks[] = {
+ 0x0,
+ 0xffffffffu,
+ 0xaaaaaaaau,
+ 0x55555555u,
+ 0xf0f0f0f0u,
+ 0x0f0f0f0fu,
+ 0xa0a0a0a0u,
+ 0x0a0a0a0au,
+ 0x50505050u,
+ 0x05050505u,
+ };
+
+ /*
* Test up to PMU v5, which is the current maximum version defined by
* Intel, i.e. is the last version that is guaranteed to be backwards
* compatible with KVM's existing behavior.
@@ -614,16 +634,7 @@ static void test_intel_counters(void)
pr_info("Testing arch events, PMU version %u, perf_caps = %lx\n",
v, perf_caps[i]);
- /*
- * To keep the total runtime reasonable, test every
- * possible non-zero, non-reserved bitmap combination
- * only with the native PMU version and the full bit
- * vector length.
- */
- if (v == pmu_version) {
- for (k = 1; k < (BIT(NR_INTEL_ARCH_EVENTS) - 1); k++)
- test_arch_events(v, perf_caps[i], NR_INTEL_ARCH_EVENTS, k);
- }
+
/*
* Test single bits for all PMU version and lengths up
* the number of events +1 (to verify KVM doesn't do
@@ -632,11 +643,8 @@ static void test_intel_counters(void)
* ones i.e. all events being available and unavailable.
*/
for (j = 0; j <= NR_INTEL_ARCH_EVENTS + 1; j++) {
- test_arch_events(v, perf_caps[i], j, 0);
- test_arch_events(v, perf_caps[i], j, -1u);
-
- for (k = 0; k < NR_INTEL_ARCH_EVENTS; k++)
- test_arch_events(v, perf_caps[i], j, BIT(k));
+ for (k = 1; k < ARRAY_SIZE(unavailable_masks); k++)
+ test_arch_events(v, perf_caps[i], j, unavailable_masks[k]);
}
pr_info("Testing GP counters, PMU version %u, perf_caps = %lx\n",