Skip to content

Commit 8181343

Browse files
author
CKI KWF Bot
committed
Merge: KVM: arm64: pick up fixes up to v6.17
MR: https://gitlab.com/redhat/centos-stream/src/kernel/centos-stream-9/-/merge_requests/7524 JIRA: https://issues.redhat.com/browse/RHEL-115975 This MR picks up the most important fixes in the kvm/arm code up to v6.17. I tried to be rather minimal in my choice of patches, preferring to rework code over pulling in bigger changes. Focus on code we support, preferring to omit changes to areas we don't support or new tests. Omitted-fix: c35dd83 ("KVM: arm64: Guard PMSCR_EL1 initialization with SPE presence check") We don't support pkvm, and properly backporting this would need backporting or reimplementing parts of the debug rework like host data, which seems like misplaced effort. Changes for v4: - picked several simple post v6.17 fixes - added "KVM: arm64: Fix vcpu_{read,write}_sys_reg() accessors" and pre-reqs - picked "KVM: arm64: Rename the device variable to s2_force_noncacheable" to reduce conflicts Changes for v5: - improved conflict resolution description, one more new fix ("KVM: arm64: vgic-v3: Reinstate IRQ lock ordering for LPI xarray") Changes for v6: - minor reshuffle, picked one more fix ("KVM: arm64: vgic-v3: Release reserved slot outside of lpi_xa's lock") Signed-off-by: Cornelia Huck <cohuck@redhat.com> Approved-by: Eric Auger <eric.auger@redhat.com> Approved-by: Sebastian Ott <sebott@redhat.com> Approved-by: CKI KWF Bot <cki-ci-bot+kwf-gitlab-com@redhat.com> Merged-by: CKI GitLab Kmaint Pipeline Bot <26919896-cki-kmaint-pipeline-bot@users.noreply.gitlab.com>
2 parents eb660ac + b129e5c commit 8181343

35 files changed

+627
-385
lines changed

arch/arm64/include/asm/kvm_host.h

Lines changed: 8 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -984,6 +984,7 @@ static inline u64 *___ctxt_sys_reg(const struct kvm_cpu_context *ctxt, int r)
984984

985985
#define ctxt_sys_reg(c,r) (*__ctxt_sys_reg(c,r))
986986

987+
u64 kvm_vcpu_apply_reg_masks(const struct kvm_vcpu *, enum vcpu_sysreg, u64);
987988
u64 kvm_vcpu_sanitise_vncr_reg(const struct kvm_vcpu *, enum vcpu_sysreg);
988989
#define __vcpu_sys_reg(v,r) \
989990
(*({ \
@@ -994,8 +995,8 @@ u64 kvm_vcpu_sanitise_vncr_reg(const struct kvm_vcpu *, enum vcpu_sysreg);
994995
__r; \
995996
}))
996997

997-
u64 vcpu_read_sys_reg(const struct kvm_vcpu *vcpu, int reg);
998-
void vcpu_write_sys_reg(struct kvm_vcpu *vcpu, u64 val, int reg);
998+
u64 vcpu_read_sys_reg(const struct kvm_vcpu *, enum vcpu_sysreg);
999+
void vcpu_write_sys_reg(struct kvm_vcpu *, u64, enum vcpu_sysreg);
9991000

10001001
static inline bool __vcpu_read_sys_reg_from_cpu(int reg, u64 *val)
10011002
{
@@ -1005,6 +1006,8 @@ static inline bool __vcpu_read_sys_reg_from_cpu(int reg, u64 *val)
10051006
* System registers listed in the switch are not saved on every
10061007
* exit from the guest but are only saved on vcpu_put.
10071008
*
1009+
* SYSREGS_ON_CPU *MUST* be checked before using this helper.
1010+
*
10081011
* Note that MPIDR_EL1 for the guest is set by KVM via VMPIDR_EL2 but
10091012
* should never be listed below, because the guest cannot modify its
10101013
* own MPIDR_EL1 and MPIDR_EL1 is accessed for VCPU A from VCPU B's
@@ -1055,6 +1058,8 @@ static inline bool __vcpu_write_sys_reg_to_cpu(u64 val, int reg)
10551058
* System registers listed in the switch are not restored on every
10561059
* entry to the guest but are only restored on vcpu_load.
10571060
*
1061+
* SYSREGS_ON_CPU *MUST* be checked before using this helper.
1062+
*
10581063
* Note that MPIDR_EL1 for the guest is set by KVM via VMPIDR_EL2 but
10591064
* should never be listed below, because the MPIDR should only be set
10601065
* once, before running the VCPU, and never changed later.
@@ -1302,6 +1307,7 @@ static inline void kvm_arch_sync_events(struct kvm *kvm) {}
13021307
static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
13031308

13041309
void kvm_arm_init_debug(void);
1310+
void kvm_debug_init_vhe(void);
13051311
void kvm_arm_vcpu_init_debug(struct kvm_vcpu *vcpu);
13061312
void kvm_arm_setup_debug(struct kvm_vcpu *vcpu);
13071313
void kvm_arm_clear_debug(struct kvm_vcpu *vcpu);
@@ -1325,7 +1331,6 @@ int kvm_vm_ioctl_get_reg_writable_masks(struct kvm *kvm,
13251331
struct reg_mask_range *range);
13261332

13271333
/* Guest/host FPSIMD coordination helpers */
1328-
int kvm_arch_vcpu_run_map_fp(struct kvm_vcpu *vcpu);
13291334
void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu);
13301335
void kvm_arch_vcpu_ctxflush_fp(struct kvm_vcpu *vcpu);
13311336
void kvm_arch_vcpu_ctxsync_fp(struct kvm_vcpu *vcpu);

arch/arm64/include/asm/memory.h

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -143,13 +143,16 @@
143143

144144
#define OVERFLOW_STACK_SIZE SZ_4K
145145

146+
#define NVHE_STACK_SHIFT PAGE_SHIFT
147+
#define NVHE_STACK_SIZE (UL(1) << NVHE_STACK_SHIFT)
148+
146149
/*
147150
* With the minimum frame size of [x29, x30], exactly half the combined
148151
* sizes of the hyp and overflow stacks is the maximum size needed to
149152
* save the unwinded stacktrace; plus an additional entry to delimit the
150153
* end.
151154
*/
152-
#define NVHE_STACKTRACE_SIZE ((OVERFLOW_STACK_SIZE + PAGE_SIZE) / 2 + sizeof(long))
155+
#define NVHE_STACKTRACE_SIZE ((OVERFLOW_STACK_SIZE + NVHE_STACK_SIZE) / 2 + sizeof(long))
153156

154157
/*
155158
* Alignment of kernel segments (e.g. .text, .data).

arch/arm64/include/asm/stacktrace/nvhe.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -47,7 +47,7 @@ static inline void kvm_nvhe_unwind_init(struct unwind_state *state,
4747

4848
DECLARE_KVM_NVHE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE/sizeof(long)], overflow_stack);
4949
DECLARE_KVM_NVHE_PER_CPU(struct kvm_nvhe_stacktrace_info, kvm_stacktrace_info);
50-
DECLARE_PER_CPU(unsigned long, kvm_arm_hyp_stack_page);
50+
DECLARE_PER_CPU(unsigned long, kvm_arm_hyp_stack_base);
5151

5252
void kvm_nvhe_dump_backtrace(unsigned long hyp_offset);
5353

arch/arm64/kvm/arm.c

Lines changed: 24 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -59,7 +59,7 @@ static enum kvm_wfx_trap_policy kvm_wfe_trap_policy __read_mostly = KVM_WFX_NOTR
5959

6060
DECLARE_KVM_HYP_PER_CPU(unsigned long, kvm_hyp_vector);
6161

62-
DEFINE_PER_CPU(unsigned long, kvm_arm_hyp_stack_page);
62+
DEFINE_PER_CPU(unsigned long, kvm_arm_hyp_stack_base);
6363
DECLARE_KVM_NVHE_PER_CPU(struct kvm_nvhe_init_params, kvm_init_params);
6464

6565
DECLARE_KVM_NVHE_PER_CPU(struct kvm_cpu_context, kvm_hyp_ctxt);
@@ -725,7 +725,8 @@ int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
725725
*/
726726
int kvm_arch_vcpu_runnable(struct kvm_vcpu *v)
727727
{
728-
bool irq_lines = *vcpu_hcr(v) & (HCR_VI | HCR_VF);
728+
bool irq_lines = *vcpu_hcr(v) & (HCR_VI | HCR_VF | HCR_VSE);
729+
729730
return ((irq_lines || kvm_vgic_vcpu_pending_irq(v))
730731
&& !kvm_arm_vcpu_stopped(v) && !v->arch.pause);
731732
}
@@ -810,10 +811,6 @@ int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu)
810811
if (!kvm_arm_vcpu_is_finalized(vcpu))
811812
return -EPERM;
812813

813-
ret = kvm_arch_vcpu_run_map_fp(vcpu);
814-
if (ret)
815-
return ret;
816-
817814
if (likely(vcpu_has_run_once(vcpu)))
818815
return 0;
819816

@@ -2078,8 +2075,10 @@ static void cpu_hyp_init_features(void)
20782075
cpu_set_hyp_vector();
20792076
kvm_arm_init_debug();
20802077

2081-
if (is_kernel_in_hyp_mode())
2078+
if (is_kernel_in_hyp_mode()) {
20822079
kvm_timer_init_vhe();
2080+
kvm_debug_init_vhe();
2081+
}
20832082

20842083
if (vgic_present)
20852084
kvm_vgic_init_cpu_hardware();
@@ -2102,7 +2101,7 @@ static void cpu_hyp_init(void *discard)
21022101

21032102
static void cpu_hyp_uninit(void *discard)
21042103
{
2105-
if (__this_cpu_read(kvm_hyp_initialized)) {
2104+
if (!is_protected_kvm_enabled() && __this_cpu_read(kvm_hyp_initialized)) {
21062105
cpu_hyp_reset();
21072106
__this_cpu_write(kvm_hyp_initialized, 0);
21082107
}
@@ -2311,15 +2310,23 @@ static void __init teardown_hyp_mode(void)
23112310

23122311
free_hyp_pgds();
23132312
for_each_possible_cpu(cpu) {
2314-
free_page(per_cpu(kvm_arm_hyp_stack_page, cpu));
2315-
free_pages(kvm_nvhe_sym(kvm_arm_hyp_percpu_base)[cpu], nvhe_percpu_order());
2313+
if (per_cpu(kvm_hyp_initialized, cpu))
2314+
continue;
2315+
2316+
free_pages(per_cpu(kvm_arm_hyp_stack_base, cpu), NVHE_STACK_SHIFT - PAGE_SHIFT);
2317+
2318+
if (!kvm_nvhe_sym(kvm_arm_hyp_percpu_base)[cpu])
2319+
continue;
23162320

23172321
if (free_sve) {
23182322
struct cpu_sve_state *sve_state;
23192323

23202324
sve_state = per_cpu_ptr_nvhe_sym(kvm_host_data, cpu)->sve_state;
23212325
free_pages((unsigned long) sve_state, pkvm_host_sve_state_order());
23222326
}
2327+
2328+
free_pages(kvm_nvhe_sym(kvm_arm_hyp_percpu_base)[cpu], nvhe_percpu_order());
2329+
23232330
}
23242331
}
23252332

@@ -2498,15 +2505,15 @@ static int __init init_hyp_mode(void)
24982505
* Allocate stack pages for Hypervisor-mode
24992506
*/
25002507
for_each_possible_cpu(cpu) {
2501-
unsigned long stack_page;
2508+
unsigned long stack_base;
25022509

2503-
stack_page = __get_free_page(GFP_KERNEL);
2504-
if (!stack_page) {
2510+
stack_base = __get_free_pages(GFP_KERNEL, NVHE_STACK_SHIFT - PAGE_SHIFT);
2511+
if (!stack_base) {
25052512
err = -ENOMEM;
25062513
goto out_err;
25072514
}
25082515

2509-
per_cpu(kvm_arm_hyp_stack_page, cpu) = stack_page;
2516+
per_cpu(kvm_arm_hyp_stack_base, cpu) = stack_base;
25102517
}
25112518

25122519
/*
@@ -2575,9 +2582,9 @@ static int __init init_hyp_mode(void)
25752582
*/
25762583
for_each_possible_cpu(cpu) {
25772584
struct kvm_nvhe_init_params *params = per_cpu_ptr_nvhe_sym(kvm_init_params, cpu);
2578-
char *stack_page = (char *)per_cpu(kvm_arm_hyp_stack_page, cpu);
2585+
char *stack_base = (char *)per_cpu(kvm_arm_hyp_stack_base, cpu);
25792586

2580-
err = create_hyp_stack(__pa(stack_page), &params->stack_hyp_va);
2587+
err = create_hyp_stack(__pa(stack_base), &params->stack_hyp_va);
25812588
if (err) {
25822589
kvm_err("Cannot map hyp stack\n");
25832590
goto out_err;
@@ -2589,7 +2596,7 @@ static int __init init_hyp_mode(void)
25892596
* __hyp_pa() won't do the right thing there, since the stack
25902597
* has been mapped in the flexible private VA space.
25912598
*/
2592-
params->stack_pa = __pa(stack_page);
2599+
params->stack_pa = __pa(stack_base);
25932600
}
25942601

25952602
for_each_possible_cpu(cpu) {

arch/arm64/kvm/debug.c

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -136,6 +136,13 @@ void kvm_arm_vcpu_init_debug(struct kvm_vcpu *vcpu)
136136
preempt_enable();
137137
}
138138

139+
void kvm_debug_init_vhe(void)
140+
{
141+
/* Clear PMSCR_EL1.E{0,1}SPE which reset to UNKNOWN values. */
142+
if (SYS_FIELD_GET(ID_AA64DFR0_EL1, PMSVer, read_sysreg(id_aa64dfr0_el1)))
143+
write_sysreg_el1(0, SYS_PMSCR);
144+
}
145+
139146
/**
140147
* kvm_arm_reset_debug_ptr - reset the debug ptr to point to the vcpu state
141148
* @vcpu: the vcpu pointer

arch/arm64/kvm/fpsimd.c

Lines changed: 0 additions & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -14,32 +14,6 @@
1414
#include <asm/kvm_mmu.h>
1515
#include <asm/sysreg.h>
1616

17-
/*
18-
* Called on entry to KVM_RUN unless this vcpu previously ran at least
19-
* once and the most recent prior KVM_RUN for this vcpu was called from
20-
* the same task as current (highly likely).
21-
*
22-
* This is guaranteed to execute before kvm_arch_vcpu_load_fp(vcpu),
23-
* such that on entering hyp the relevant parts of current are already
24-
* mapped.
25-
*/
26-
int kvm_arch_vcpu_run_map_fp(struct kvm_vcpu *vcpu)
27-
{
28-
struct user_fpsimd_state *fpsimd = &current->thread.uw.fpsimd_state;
29-
int ret;
30-
31-
/* pKVM has its own tracking of the host fpsimd state. */
32-
if (is_protected_kvm_enabled())
33-
return 0;
34-
35-
/* Make sure the host task fpsimd state is visible to hyp: */
36-
ret = kvm_share_hyp(fpsimd, fpsimd + 1);
37-
if (ret)
38-
return ret;
39-
40-
return 0;
41-
}
42-
4317
/*
4418
* Prepare vcpu for saving the host's FPSIMD state and loading the guest's.
4519
* The actual loading is done by the FPSIMD access trap taken to hyp.

arch/arm64/kvm/hyp/exception.c

Lines changed: 6 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -22,50 +22,44 @@
2222

2323
static inline u64 __vcpu_read_sys_reg(const struct kvm_vcpu *vcpu, int reg)
2424
{
25-
u64 val;
26-
27-
if (unlikely(vcpu_has_nv(vcpu)))
25+
if (has_vhe())
2826
return vcpu_read_sys_reg(vcpu, reg);
29-
else if (__vcpu_read_sys_reg_from_cpu(reg, &val))
30-
return val;
3127

3228
return __vcpu_sys_reg(vcpu, reg);
3329
}
3430

3531
static inline void __vcpu_write_sys_reg(struct kvm_vcpu *vcpu, u64 val, int reg)
3632
{
37-
if (unlikely(vcpu_has_nv(vcpu)))
33+
if (has_vhe())
3834
vcpu_write_sys_reg(vcpu, val, reg);
39-
else if (!__vcpu_write_sys_reg_to_cpu(val, reg))
35+
else
4036
__vcpu_sys_reg(vcpu, reg) = val;
4137
}
4238

4339
static void __vcpu_write_spsr(struct kvm_vcpu *vcpu, unsigned long target_mode,
4440
u64 val)
4541
{
46-
if (unlikely(vcpu_has_nv(vcpu))) {
42+
if (has_vhe()) {
4743
if (target_mode == PSR_MODE_EL1h)
4844
vcpu_write_sys_reg(vcpu, val, SPSR_EL1);
4945
else
5046
vcpu_write_sys_reg(vcpu, val, SPSR_EL2);
51-
} else if (has_vhe()) {
52-
write_sysreg_el1(val, SYS_SPSR);
5347
} else {
5448
__vcpu_sys_reg(vcpu, SPSR_EL1) = val;
5549
}
5650
}
5751

5852
static void __vcpu_write_spsr_abt(struct kvm_vcpu *vcpu, u64 val)
5953
{
60-
if (has_vhe())
54+
if (has_vhe() && vcpu_get_flag(vcpu, SYSREGS_ON_CPU))
6155
write_sysreg(val, spsr_abt);
6256
else
6357
vcpu->arch.ctxt.spsr_abt = val;
6458
}
6559

6660
static void __vcpu_write_spsr_und(struct kvm_vcpu *vcpu, u64 val)
6761
{
68-
if (has_vhe())
62+
if (has_vhe() && vcpu_get_flag(vcpu, SYSREGS_ON_CPU))
6963
write_sysreg(val, spsr_und);
7064
else
7165
vcpu->arch.ctxt.spsr_und = val;

arch/arm64/kvm/hyp/include/nvhe/trap_handler.h

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,8 @@
1212
#include <asm/kvm_host.h>
1313

1414
#define cpu_reg(ctxt, r) (ctxt)->regs.regs[r]
15-
#define DECLARE_REG(type, name, ctxt, reg) \
15+
#define DECLARE_REG(type, name, ctxt, reg) \
16+
__always_unused int ___check_reg_ ## reg; \
1617
type name = (type)cpu_reg(ctxt, (reg))
1718

1819
#endif /* __ARM64_KVM_NVHE_TRAP_HANDLER_H__ */

arch/arm64/kvm/hyp/nvhe/host.S

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -188,12 +188,12 @@ SYM_FUNC_END(__host_hvc)
188188

189189
/*
190190
* Test whether the SP has overflowed, without corrupting a GPR.
191-
* nVHE hypervisor stacks are aligned so that the PAGE_SHIFT bit
191+
* nVHE hypervisor stacks are aligned so that the NVHE_STACK_SHIFT bit
192192
* of SP should always be 1.
193193
*/
194194
add sp, sp, x0 // sp' = sp + x0
195195
sub x0, sp, x0 // x0' = sp' - x0 = (sp + x0) - x0 = sp
196-
tbz x0, #PAGE_SHIFT, .L__hyp_sp_overflow\@
196+
tbz x0, #NVHE_STACK_SHIFT, .L__hyp_sp_overflow\@
197197
sub x0, sp, x0 // x0'' = sp' - x0' = (sp + x0) - sp = x0
198198
sub sp, sp, x0 // sp'' = sp' - x0 = (sp + x0) - x0 = sp
199199

arch/arm64/kvm/hyp/nvhe/list_debug.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -17,7 +17,7 @@ static inline __must_check bool nvhe_check_data_corruption(bool v)
1717
bool corruption = unlikely(condition); \
1818
if (corruption) { \
1919
if (IS_ENABLED(CONFIG_BUG_ON_DATA_CORRUPTION)) { \
20-
BUG_ON(1); \
20+
BUG(); \
2121
} else \
2222
WARN_ON(1); \
2323
} \

0 commit comments

Comments
 (0)