diff options
| author | Sean Christopherson <seanjc@google.com> | 2025-06-10 15:57:34 -0700 |
|---|---|---|
| committer | Sean Christopherson <seanjc@google.com> | 2025-06-20 13:07:36 -0700 |
| commit | 54f1c770611b9f8d7e8f8a50a60384291f143689 (patch) | |
| tree | 8bd1259d1997ed0f804ea964d515c03a9bdb7ac9 /arch/x86/kvm/svm/nested.c | |
| parent | KVM: SVM: Return -EINVAL instead of MSR_INVALID to signal out-of-range MSR (diff) | |
| download | linux-54f1c770611b9f8d7e8f8a50a60384291f143689.tar.gz linux-54f1c770611b9f8d7e8f8a50a60384291f143689.zip | |
KVM: nSVM: Merge MSRPM in 64-bit chunks on 64-bit kernels
When merging L0 and L1 MSRPMs as part of nested VMRUN emulation, access
the bitmaps using "unsigned long" chunks, i.e. use 8-byte access for
64-bit kernels instead of arbitrarily working on 4-byte chunks.
Opportunistically rename local variables in nested_svm_merge_msrpm() to
more precisely/accurately reflect their purpose ("offset" in particular is
extremely ambiguous).
Link: https://lore.kernel.org/r/20250610225737.156318-30-seanjc@google.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
Diffstat (limited to 'arch/x86/kvm/svm/nested.c')
| -rw-r--r-- | arch/x86/kvm/svm/nested.c | 21 |
1 files changed, 11 insertions, 10 deletions
diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c index 7ca45361ced3..749f7b866ac8 100644 --- a/arch/x86/kvm/svm/nested.c +++ b/arch/x86/kvm/svm/nested.c @@ -196,6 +196,7 @@ void recalc_intercepts(struct vcpu_svm *svm) */ static int nested_svm_msrpm_merge_offsets[6] __ro_after_init; static int nested_svm_nr_msrpm_merge_offsets __ro_after_init; +typedef unsigned long nsvm_msrpm_merge_t; int __init nested_svm_init_msrpm_merge_offsets(void) { @@ -230,10 +231,10 @@ int __init nested_svm_init_msrpm_merge_offsets(void) return -EIO; /* - * Merging is done in 32-bit chunks to reduce the number of - * accesses to L1's bitmap. + * Merging is done in chunks to reduce the number of accesses + * to L1's bitmap. */ - offset = bit_nr / BITS_PER_BYTE / sizeof(u32); + offset = bit_nr / BITS_PER_BYTE / sizeof(nsvm_msrpm_merge_t); for (j = 0; j < nested_svm_nr_msrpm_merge_offsets; j++) { if (nested_svm_msrpm_merge_offsets[j] == offset) @@ -261,8 +262,8 @@ int __init nested_svm_init_msrpm_merge_offsets(void) static bool nested_svm_merge_msrpm(struct kvm_vcpu *vcpu) { struct vcpu_svm *svm = to_svm(vcpu); - u32 *msrpm02 = svm->nested.msrpm; - u32 *msrpm01 = svm->msrpm; + nsvm_msrpm_merge_t *msrpm02 = svm->nested.msrpm; + nsvm_msrpm_merge_t *msrpm01 = svm->msrpm; int i; /* @@ -289,15 +290,15 @@ static bool nested_svm_merge_msrpm(struct kvm_vcpu *vcpu) for (i = 0; i < nested_svm_nr_msrpm_merge_offsets; i++) { const int p = nested_svm_msrpm_merge_offsets[i]; - u32 value; - u64 offset; + nsvm_msrpm_merge_t l1_val; + gpa_t gpa; - offset = svm->nested.ctl.msrpm_base_pa + (p * 4); + gpa = svm->nested.ctl.msrpm_base_pa + (p * sizeof(l1_val)); - if (kvm_vcpu_read_guest(vcpu, offset, &value, 4)) + if (kvm_vcpu_read_guest(vcpu, gpa, &l1_val, sizeof(l1_val))) return false; - msrpm02[p] = msrpm01[p] | value; + msrpm02[p] = msrpm01[p] | l1_val; } svm->nested.force_msr_bitmap_recalc = false; |
