summaryrefslogtreecommitdiffstats
path: root/arch/s390/kvm/faultin.c
blob: e37cd18200f530530e33ebf6f8800becfd204289 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
// SPDX-License-Identifier: GPL-2.0
/*
 *  KVM guest fault handling.
 *
 *    Copyright IBM Corp. 2025
 *    Author(s): Claudio Imbrenda <imbrenda@linux.ibm.com>
 */
#include <linux/kvm_types.h>
#include <linux/kvm_host.h>

#include "gmap.h"
#include "trace.h"
#include "faultin.h"

bool kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu);

/*
 * kvm_s390_faultin_gfn() - handle a dat fault.
 * @vcpu: The vCPU whose gmap is to be fixed up, or NULL if operating on the VM.
 * @kvm: The VM whose gmap is to be fixed up, or NULL if operating on a vCPU.
 * @f: The guest fault that needs to be resolved.
 *
 * Return:
 * * 0 on success
 * * < 0 in case of error
 * * > 0 in case of guest exceptions
 *
 * Context:
 * * The mm lock must not be held before calling
 * * kvm->srcu must be held
 * * may sleep
 */
int kvm_s390_faultin_gfn(struct kvm_vcpu *vcpu, struct kvm *kvm, struct guest_fault *f)
{
	struct kvm_s390_mmu_cache *local_mc __free(kvm_s390_mmu_cache) = NULL;
	struct kvm_s390_mmu_cache *mc = NULL;
	struct kvm_memory_slot *slot;
	unsigned long inv_seq;
	int foll, rc = 0;

	foll = f->write_attempt ? FOLL_WRITE : 0;
	foll |= f->attempt_pfault ? FOLL_NOWAIT : 0;

	if (vcpu) {
		kvm = vcpu->kvm;
		mc = vcpu->arch.mc;
	}

	lockdep_assert_held(&kvm->srcu);

	scoped_guard(read_lock, &kvm->mmu_lock) {
		if (gmap_try_fixup_minor(kvm->arch.gmap, f) == 0)
			return 0;
	}

	while (1) {
		f->valid = false;
		inv_seq = kvm->mmu_invalidate_seq;
		/* Pairs with the smp_wmb() in kvm_mmu_invalidate_end(). */
		smp_rmb();

		if (vcpu)
			slot = kvm_vcpu_gfn_to_memslot(vcpu, f->gfn);
		else
			slot = gfn_to_memslot(kvm, f->gfn);
		f->pfn = __kvm_faultin_pfn(slot, f->gfn, foll, &f->writable, &f->page);

		/* Needs I/O, try to setup async pfault (only possible with FOLL_NOWAIT). */
		if (f->pfn == KVM_PFN_ERR_NEEDS_IO) {
			if (unlikely(!f->attempt_pfault))
				return -EAGAIN;
			if (unlikely(!vcpu))
				return -EINVAL;
			trace_kvm_s390_major_guest_pfault(vcpu);
			if (kvm_arch_setup_async_pf(vcpu))
				return 0;
			vcpu->stat.pfault_sync++;
			/* Could not setup async pfault, try again synchronously. */
			foll &= ~FOLL_NOWAIT;
			f->pfn = __kvm_faultin_pfn(slot, f->gfn, foll, &f->writable, &f->page);
		}

		/* Access outside memory, addressing exception. */
		if (is_noslot_pfn(f->pfn))
			return PGM_ADDRESSING;
		/* Signal pending: try again. */
		if (f->pfn == KVM_PFN_ERR_SIGPENDING)
			return -EAGAIN;
		/* Check if it's read-only memory; don't try to actually handle that case. */
		if (f->pfn == KVM_PFN_ERR_RO_FAULT)
			return -EOPNOTSUPP;
		/* Any other error. */
		if (is_error_pfn(f->pfn))
			return -EFAULT;

		if (!mc) {
			local_mc = kvm_s390_new_mmu_cache();
			if (!local_mc)
				return -ENOMEM;
			mc = local_mc;
		}

		/* Loop, will automatically release the faulted page. */
		if (mmu_invalidate_retry_gfn_unsafe(kvm, inv_seq, f->gfn)) {
			kvm_release_faultin_page(kvm, f->page, true, false);
			continue;
		}

		scoped_guard(read_lock, &kvm->mmu_lock) {
			if (!mmu_invalidate_retry_gfn(kvm, inv_seq, f->gfn)) {
				f->valid = true;
				rc = gmap_link(mc, kvm->arch.gmap, f);
				kvm_release_faultin_page(kvm, f->page, !!rc, f->write_attempt);
				f->page = NULL;
			}
		}
		kvm_release_faultin_page(kvm, f->page, true, false);

		if (rc == -ENOMEM) {
			rc = kvm_s390_mmu_cache_topup(mc);
			if (rc)
				return rc;
		} else if (rc != -EAGAIN) {
			return rc;
		}
	}
}

int kvm_s390_get_guest_page(struct kvm *kvm, struct guest_fault *f, gfn_t gfn, bool w)
{
	struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn);
	int foll = w ? FOLL_WRITE : 0;

	f->write_attempt = w;
	f->gfn = gfn;
	f->pfn = __kvm_faultin_pfn(slot, gfn, foll, &f->writable, &f->page);
	if (is_noslot_pfn(f->pfn))
		return PGM_ADDRESSING;
	if (is_sigpending_pfn(f->pfn))
		return -EINTR;
	if (f->pfn == KVM_PFN_ERR_NEEDS_IO)
		return -EAGAIN;
	if (is_error_pfn(f->pfn))
		return -EFAULT;

	f->valid = true;
	return 0;
}