diff options
| author | Sean Christopherson <seanjc@google.com> | 2026-02-24 17:20:49 -0800 |
|---|---|---|
| committer | Sean Christopherson <seanjc@google.com> | 2026-03-02 16:06:49 -0800 |
| commit | e2138c4a5be1e50d75281136bdc3e709cb07ec5e (patch) | |
| tree | 3bbb0777a0250c90de08b6a6a6954e00003e2931 | |
| parent | 4f09e62afcd6c7a2c3428a3453ced7e56475dc70 (diff) | |
| download | linux-e2138c4a5be1e50d75281136bdc3e709cb07ec5e.tar.gz linux-e2138c4a5be1e50d75281136bdc3e709cb07ec5e.zip | |
KVM: x86: Add helpers to prepare kvm_run for userspace MMIO exit
Add helpers to fill kvm_run for userspace MMIO exits to deduplicate a
variety of code, and to allow for a cleaner return path in
emulator_read_write().
Opportunistically add a KVM_BUG_ON() to ensure the caller is limiting the
length of a single MMIO access to 8 bytes (the largest size userspace is
prepared to handled, as the ABI was baked before things like MOVDQ came
along).
No functional change intended.
Cc: Rick Edgecombe <rick.p.edgecombe@intel.com>
Cc: Binbin Wu <binbin.wu@linux.intel.com>
Cc: Xiaoyao Li <xiaoyao.li@intel.com>
Cc: Tom Lendacky <thomas.lendacky@amd.com>
Cc: Michael Roth <michael.roth@amd.com>
Tested-by: Tom Lendacky <thomas.lendacky@gmail.com>
Tested-by: Rick Edgecombe <rick.p.edgecombe@intel.com>
Link: https://patch.msgid.link/20260225012049.920665-15-seanjc@google.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
| -rw-r--r-- | arch/x86/kvm/vmx/tdx.c | 12 | ||||
| -rw-r--r-- | arch/x86/kvm/x86.c | 42 | ||||
| -rw-r--r-- | arch/x86/kvm/x86.h | 26 |
3 files changed, 37 insertions, 43 deletions
diff --git a/arch/x86/kvm/vmx/tdx.c b/arch/x86/kvm/vmx/tdx.c index c5065f84b78b..5e9b0c4d9af6 100644 --- a/arch/x86/kvm/vmx/tdx.c +++ b/arch/x86/kvm/vmx/tdx.c @@ -1467,17 +1467,11 @@ static int tdx_emulate_mmio(struct kvm_vcpu *vcpu) /* Request the device emulation to userspace device model. */ vcpu->mmio_is_write = write; - if (!write) - vcpu->arch.complete_userspace_io = tdx_complete_mmio_read; - vcpu->run->mmio.phys_addr = gpa; - vcpu->run->mmio.len = size; - vcpu->run->mmio.is_write = write; - vcpu->run->exit_reason = KVM_EXIT_MMIO; + __kvm_prepare_emulated_mmio_exit(vcpu, gpa, size, &val, write); - if (write) { - memcpy(vcpu->run->mmio.data, &val, size); - } else { + if (!write) { + vcpu->arch.complete_userspace_io = tdx_complete_mmio_read; vcpu->mmio_fragments[0].gpa = gpa; vcpu->mmio_fragments[0].len = size; trace_kvm_mmio(KVM_TRACE_MMIO_READ_UNSATISFIED, size, gpa, NULL); diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 1467652ceabc..8cb6b1f1916e 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -8209,7 +8209,6 @@ static int emulator_read_write(struct x86_emulate_ctxt *ctxt, const struct read_write_emulator_ops *ops) { struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); - struct kvm_mmio_fragment *frag; int rc; if (WARN_ON_ONCE((bytes > 8u || !ops->write) && object_is_on_stack(val))) @@ -8267,12 +8266,9 @@ static int emulator_read_write(struct x86_emulate_ctxt *ctxt, vcpu->mmio_needed = 1; vcpu->mmio_cur_fragment = 0; + vcpu->mmio_is_write = ops->write; - frag = &vcpu->mmio_fragments[0]; - vcpu->run->mmio.len = min(8u, frag->len); - vcpu->run->mmio.is_write = vcpu->mmio_is_write = ops->write; - vcpu->run->exit_reason = KVM_EXIT_MMIO; - vcpu->run->mmio.phys_addr = frag->gpa; + kvm_prepare_emulated_mmio_exit(vcpu, &vcpu->mmio_fragments[0]); /* * For MMIO reads, stop emulating and immediately exit to userspace, as @@ -8282,11 +8278,7 @@ static int emulator_read_write(struct x86_emulate_ctxt *ctxt, * after completing emulation (see the check on vcpu->mmio_needed in * x86_emulate_instruction()). */ - if (!ops->write) - return X86EMUL_IO_NEEDED; - - memcpy(vcpu->run->mmio.data, frag->data, min(8u, frag->len)); - return X86EMUL_CONTINUE; + return ops->write ? X86EMUL_CONTINUE : X86EMUL_IO_NEEDED; } static int emulator_read_emulated(struct x86_emulate_ctxt *ctxt, @@ -11883,12 +11875,7 @@ static int complete_emulated_mmio(struct kvm_vcpu *vcpu) return complete_emulated_io(vcpu); } - run->exit_reason = KVM_EXIT_MMIO; - run->mmio.phys_addr = frag->gpa; - if (vcpu->mmio_is_write) - memcpy(run->mmio.data, frag->data, min(8u, frag->len)); - run->mmio.len = min(8u, frag->len); - run->mmio.is_write = vcpu->mmio_is_write; + kvm_prepare_emulated_mmio_exit(vcpu, frag); vcpu->arch.complete_userspace_io = complete_emulated_mmio; return 0; } @@ -14295,15 +14282,8 @@ static int complete_sev_es_emulated_mmio(struct kvm_vcpu *vcpu) } // More MMIO is needed - run->mmio.phys_addr = frag->gpa; - run->mmio.len = min(8u, frag->len); - run->mmio.is_write = vcpu->mmio_is_write; - if (run->mmio.is_write) - memcpy(run->mmio.data, frag->data, min(8u, frag->len)); - run->exit_reason = KVM_EXIT_MMIO; - + kvm_prepare_emulated_mmio_exit(vcpu, frag); vcpu->arch.complete_userspace_io = complete_sev_es_emulated_mmio; - return 0; } @@ -14332,23 +14312,17 @@ int kvm_sev_es_mmio(struct kvm_vcpu *vcpu, bool is_write, gpa_t gpa, * requests that split a page boundary. */ frag = vcpu->mmio_fragments; - vcpu->mmio_nr_fragments = 1; frag->len = bytes; frag->gpa = gpa; frag->data = data; vcpu->mmio_needed = 1; vcpu->mmio_cur_fragment = 0; + vcpu->mmio_nr_fragments = 1; + vcpu->mmio_is_write = is_write; - vcpu->run->mmio.phys_addr = gpa; - vcpu->run->mmio.len = min(8u, frag->len); - vcpu->run->mmio.is_write = is_write; - if (is_write) - memcpy(vcpu->run->mmio.data, frag->data, min(8u, frag->len)); - vcpu->run->exit_reason = KVM_EXIT_MMIO; - + kvm_prepare_emulated_mmio_exit(vcpu, frag); vcpu->arch.complete_userspace_io = complete_sev_es_emulated_mmio; - return 0; } EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_sev_es_mmio); diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h index 1d0f0edd31b3..44a28d343d40 100644 --- a/arch/x86/kvm/x86.h +++ b/arch/x86/kvm/x86.h @@ -718,6 +718,32 @@ int kvm_sev_es_string_io(struct kvm_vcpu *vcpu, unsigned int size, unsigned int port, void *data, unsigned int count, int in); +static inline void __kvm_prepare_emulated_mmio_exit(struct kvm_vcpu *vcpu, + gpa_t gpa, unsigned int len, + const void *data, + bool is_write) +{ + struct kvm_run *run = vcpu->run; + + KVM_BUG_ON(len > 8, vcpu->kvm); + + run->mmio.len = len; + run->mmio.is_write = is_write; + run->exit_reason = KVM_EXIT_MMIO; + run->mmio.phys_addr = gpa; + if (is_write) + memcpy(run->mmio.data, data, len); +} + +static inline void kvm_prepare_emulated_mmio_exit(struct kvm_vcpu *vcpu, + struct kvm_mmio_fragment *frag) +{ + WARN_ON_ONCE(!vcpu->mmio_needed || !vcpu->mmio_nr_fragments); + + __kvm_prepare_emulated_mmio_exit(vcpu, frag->gpa, min(8u, frag->len), + frag->data, vcpu->mmio_is_write); +} + static inline bool user_exit_on_hypercall(struct kvm *kvm, unsigned long hc_nr) { return kvm->arch.hypercall_exit_enabled & BIT(hc_nr); |
