Skip to content

Commit 938309b

Browse files
author
Oliver Upton
committed
Merge branch 'kvm-arm64/vgic-lr-overflow' into kvmarm/next
* kvm-arm64/vgic-lr-overflow: (50 commits) : Support for VGIC LR overflows, courtesy of Marc Zyngier : : Address deficiencies in KVM's GIC emulation when a vCPU has more active : IRQs than can be represented in the VGIC list registers. Sort the AP : list to prioritize inactive and pending IRQs, potentially spilling : active IRQs outside of the LRs. : : Handle deactivation of IRQs outside of the LRs for both EOImode=0/1, : which involves special consideration for SPIs being deactivated from a : different vCPU than the one that acked it. KVM: arm64: Convert ICH_HCR_EL2_TDIR cap to EARLY_LOCAL_CPU_FEATURE KVM: arm64: selftests: vgic_irq: Add timer deactivation test KVM: arm64: selftests: vgic_irq: Add Group-0 enable test KVM: arm64: selftests: vgic_irq: Add asymmetric SPI deaectivation test KVM: arm64: selftests: vgic_irq: Perform EOImode==1 deactivation in ack order KVM: arm64: selftests: vgic_irq: Remove LR-bound limitation KVM: arm64: selftests: vgic_irq: Exclude timer-controlled interrupts KVM: arm64: selftests: vgic_irq: Change configuration before enabling interrupt KVM: arm64: selftests: vgic_irq: Fix GUEST_ASSERT_IAR_EMPTY() helper KVM: arm64: selftests: gic_v3: Disable Group-0 interrupts by default KVM: arm64: selftests: gic_v3: Add irq group setting helper KVM: arm64: GICv2: Always trap GICV_DIR register KVM: arm64: GICv2: Handle deactivation via GICV_DIR traps KVM: arm64: GICv2: Handle LR overflow when EOImode==0 KVM: arm64: GICv3: Force exit to sync ICH_HCR_EL2.En KVM: arm64: GICv3: nv: Plug L1 LR sync into deactivation primitive KVM: arm64: GICv3: nv: Resync LRs/VMCR/HCR early for better MI emulation KVM: arm64: GICv3: Avoid broadcast kick on CPUs lacking TDIR KVM: arm64: GICv3: Handle in-LR deactivation when possible KVM: arm64: GICv3: Add SPI tracking to handle asymmetric deactivation ... Signed-off-by: Oliver Upton <oupton@kernel.org>
2 parents 11b8e6e + 64d67e7 commit 938309b

34 files changed

Lines changed: 1352 additions & 419 deletions

arch/arm64/include/asm/kvm_asm.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -79,7 +79,7 @@ enum __kvm_host_smccc_func {
7979
__KVM_HOST_SMCCC_FUNC___kvm_tlb_flush_vmid_range,
8080
__KVM_HOST_SMCCC_FUNC___kvm_flush_cpu_context,
8181
__KVM_HOST_SMCCC_FUNC___kvm_timer_set_cntvoff,
82-
__KVM_HOST_SMCCC_FUNC___vgic_v3_save_vmcr_aprs,
82+
__KVM_HOST_SMCCC_FUNC___vgic_v3_save_aprs,
8383
__KVM_HOST_SMCCC_FUNC___vgic_v3_restore_vmcr_aprs,
8484
__KVM_HOST_SMCCC_FUNC___pkvm_reserve_vm,
8585
__KVM_HOST_SMCCC_FUNC___pkvm_unreserve_vm,

arch/arm64/include/asm/kvm_host.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -54,6 +54,7 @@
5454
#define KVM_REQ_NESTED_S2_UNMAP KVM_ARCH_REQ(8)
5555
#define KVM_REQ_GUEST_HYP_IRQ_PENDING KVM_ARCH_REQ(9)
5656
#define KVM_REQ_MAP_L1_VNCR_EL2 KVM_ARCH_REQ(10)
57+
#define KVM_REQ_VGIC_PROCESS_UPDATE KVM_ARCH_REQ(11)
5758

5859
#define KVM_DIRTY_LOG_MANUAL_CAPS (KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE | \
5960
KVM_DIRTY_LOG_INITIALLY_SET)

arch/arm64/include/asm/kvm_hyp.h

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -77,12 +77,13 @@ DECLARE_PER_CPU(struct kvm_nvhe_init_params, kvm_init_params);
7777
int __vgic_v2_perform_cpuif_access(struct kvm_vcpu *vcpu);
7878

7979
u64 __gic_v3_get_lr(unsigned int lr);
80+
void __gic_v3_set_lr(u64 val, int lr);
8081

8182
void __vgic_v3_save_state(struct vgic_v3_cpu_if *cpu_if);
8283
void __vgic_v3_restore_state(struct vgic_v3_cpu_if *cpu_if);
8384
void __vgic_v3_activate_traps(struct vgic_v3_cpu_if *cpu_if);
8485
void __vgic_v3_deactivate_traps(struct vgic_v3_cpu_if *cpu_if);
85-
void __vgic_v3_save_vmcr_aprs(struct vgic_v3_cpu_if *cpu_if);
86+
void __vgic_v3_save_aprs(struct vgic_v3_cpu_if *cpu_if);
8687
void __vgic_v3_restore_vmcr_aprs(struct vgic_v3_cpu_if *cpu_if);
8788
int __vgic_v3_perform_cpuif_access(struct kvm_vcpu *vcpu);
8889

arch/arm64/include/asm/virt.h

Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -40,8 +40,13 @@
4040
*/
4141
#define HVC_FINALISE_EL2 3
4242

43+
/*
44+
* HVC_GET_ICH_VTR_EL2 - Retrieve the ICH_VTR_EL2 value
45+
*/
46+
#define HVC_GET_ICH_VTR_EL2 4
47+
4348
/* Max number of HYP stub hypercalls */
44-
#define HVC_STUB_HCALL_NR 4
49+
#define HVC_STUB_HCALL_NR 5
4550

4651
/* Error returned when an invalid stub number is passed into x0 */
4752
#define HVC_STUB_ERR 0xbadca11

arch/arm64/kernel/cpufeature.c

Lines changed: 52 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2303,6 +2303,49 @@ static bool has_gic_prio_relaxed_sync(const struct arm64_cpu_capabilities *entry
23032303
}
23042304
#endif
23052305

2306+
static bool can_trap_icv_dir_el1(const struct arm64_cpu_capabilities *entry,
2307+
int scope)
2308+
{
2309+
static const struct midr_range has_vgic_v3[] = {
2310+
MIDR_ALL_VERSIONS(MIDR_APPLE_M1_ICESTORM),
2311+
MIDR_ALL_VERSIONS(MIDR_APPLE_M1_FIRESTORM),
2312+
MIDR_ALL_VERSIONS(MIDR_APPLE_M1_ICESTORM_PRO),
2313+
MIDR_ALL_VERSIONS(MIDR_APPLE_M1_FIRESTORM_PRO),
2314+
MIDR_ALL_VERSIONS(MIDR_APPLE_M1_ICESTORM_MAX),
2315+
MIDR_ALL_VERSIONS(MIDR_APPLE_M1_FIRESTORM_MAX),
2316+
MIDR_ALL_VERSIONS(MIDR_APPLE_M2_BLIZZARD),
2317+
MIDR_ALL_VERSIONS(MIDR_APPLE_M2_AVALANCHE),
2318+
MIDR_ALL_VERSIONS(MIDR_APPLE_M2_BLIZZARD_PRO),
2319+
MIDR_ALL_VERSIONS(MIDR_APPLE_M2_AVALANCHE_PRO),
2320+
MIDR_ALL_VERSIONS(MIDR_APPLE_M2_BLIZZARD_MAX),
2321+
MIDR_ALL_VERSIONS(MIDR_APPLE_M2_AVALANCHE_MAX),
2322+
{},
2323+
};
2324+
struct arm_smccc_res res = {};
2325+
2326+
BUILD_BUG_ON(ARM64_HAS_ICH_HCR_EL2_TDIR <= ARM64_HAS_GICV3_CPUIF);
2327+
BUILD_BUG_ON(ARM64_HAS_ICH_HCR_EL2_TDIR <= ARM64_HAS_GICV5_LEGACY);
2328+
if (!this_cpu_has_cap(ARM64_HAS_GICV3_CPUIF) &&
2329+
!is_midr_in_range_list(has_vgic_v3))
2330+
return false;
2331+
2332+
if (!is_hyp_mode_available())
2333+
return false;
2334+
2335+
if (this_cpu_has_cap(ARM64_HAS_GICV5_LEGACY))
2336+
return true;
2337+
2338+
if (is_kernel_in_hyp_mode())
2339+
res.a1 = read_sysreg_s(SYS_ICH_VTR_EL2);
2340+
else
2341+
arm_smccc_1_1_hvc(HVC_GET_ICH_VTR_EL2, &res);
2342+
2343+
if (res.a0 == HVC_STUB_ERR)
2344+
return false;
2345+
2346+
return res.a1 & ICH_VTR_EL2_TDS;
2347+
}
2348+
23062349
#ifdef CONFIG_ARM64_BTI
23072350
static void bti_enable(const struct arm64_cpu_capabilities *__unused)
23082351
{
@@ -2814,6 +2857,15 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
28142857
.matches = has_gic_prio_relaxed_sync,
28152858
},
28162859
#endif
2860+
{
2861+
/*
2862+
* Depends on having GICv3
2863+
*/
2864+
.desc = "ICV_DIR_EL1 trapping",
2865+
.capability = ARM64_HAS_ICH_HCR_EL2_TDIR,
2866+
.type = ARM64_CPUCAP_EARLY_LOCAL_CPU_FEATURE,
2867+
.matches = can_trap_icv_dir_el1,
2868+
},
28172869
#ifdef CONFIG_ARM64_E0PD
28182870
{
28192871
.desc = "E0PD",

arch/arm64/kernel/hyp-stub.S

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -54,6 +54,11 @@ SYM_CODE_START_LOCAL(elx_sync)
5454
1: cmp x0, #HVC_FINALISE_EL2
5555
b.eq __finalise_el2
5656

57+
cmp x0, #HVC_GET_ICH_VTR_EL2
58+
b.ne 2f
59+
mrs_s x1, SYS_ICH_VTR_EL2
60+
b 9f
61+
5762
2: cmp x0, #HVC_SOFT_RESTART
5863
b.ne 3f
5964
mov x0, x2

arch/arm64/kernel/image-vars.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -91,6 +91,7 @@ KVM_NVHE_ALIAS(spectre_bhb_patch_loop_mitigation_enable);
9191
KVM_NVHE_ALIAS(spectre_bhb_patch_wa3);
9292
KVM_NVHE_ALIAS(spectre_bhb_patch_clearbhb);
9393
KVM_NVHE_ALIAS(alt_cb_patch_nops);
94+
KVM_NVHE_ALIAS(kvm_compute_ich_hcr_trap_bits);
9495

9596
/* Global kernel state accessed by nVHE hyp code. */
9697
KVM_NVHE_ALIAS(kvm_vgic_global_state);

arch/arm64/kvm/arm.c

Lines changed: 5 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -664,8 +664,7 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
664664
void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
665665
{
666666
if (is_protected_kvm_enabled()) {
667-
kvm_call_hyp(__vgic_v3_save_vmcr_aprs,
668-
&vcpu->arch.vgic_cpu.vgic_v3);
667+
kvm_call_hyp(__vgic_v3_save_aprs, &vcpu->arch.vgic_cpu.vgic_v3);
669668
kvm_call_hyp_nvhe(__pkvm_vcpu_put);
670669
}
671670

@@ -1047,6 +1046,10 @@ static int check_vcpu_requests(struct kvm_vcpu *vcpu)
10471046
*/
10481047
kvm_check_request(KVM_REQ_IRQ_PENDING, vcpu);
10491048

1049+
/* Process interrupts deactivated through a trap */
1050+
if (kvm_check_request(KVM_REQ_VGIC_PROCESS_UPDATE, vcpu))
1051+
kvm_vgic_process_async_update(vcpu);
1052+
10501053
if (kvm_check_request(KVM_REQ_RECORD_STEAL, vcpu))
10511054
kvm_update_stolen_time(vcpu);
10521055

arch/arm64/kvm/hyp/nvhe/hyp-main.c

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -157,6 +157,7 @@ static void sync_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu)
157157
host_vcpu->arch.iflags = hyp_vcpu->vcpu.arch.iflags;
158158

159159
host_cpu_if->vgic_hcr = hyp_cpu_if->vgic_hcr;
160+
host_cpu_if->vgic_vmcr = hyp_cpu_if->vgic_vmcr;
160161
for (i = 0; i < hyp_cpu_if->used_lrs; ++i)
161162
host_cpu_if->vgic_lr[i] = hyp_cpu_if->vgic_lr[i];
162163
}
@@ -464,11 +465,11 @@ static void handle___vgic_v3_init_lrs(struct kvm_cpu_context *host_ctxt)
464465
__vgic_v3_init_lrs();
465466
}
466467

467-
static void handle___vgic_v3_save_vmcr_aprs(struct kvm_cpu_context *host_ctxt)
468+
static void handle___vgic_v3_save_aprs(struct kvm_cpu_context *host_ctxt)
468469
{
469470
DECLARE_REG(struct vgic_v3_cpu_if *, cpu_if, host_ctxt, 1);
470471

471-
__vgic_v3_save_vmcr_aprs(kern_hyp_va(cpu_if));
472+
__vgic_v3_save_aprs(kern_hyp_va(cpu_if));
472473
}
473474

474475
static void handle___vgic_v3_restore_vmcr_aprs(struct kvm_cpu_context *host_ctxt)
@@ -616,7 +617,7 @@ static const hcall_t host_hcall[] = {
616617
HANDLE_FUNC(__kvm_tlb_flush_vmid_range),
617618
HANDLE_FUNC(__kvm_flush_cpu_context),
618619
HANDLE_FUNC(__kvm_timer_set_cntvoff),
619-
HANDLE_FUNC(__vgic_v3_save_vmcr_aprs),
620+
HANDLE_FUNC(__vgic_v3_save_aprs),
620621
HANDLE_FUNC(__vgic_v3_restore_vmcr_aprs),
621622
HANDLE_FUNC(__pkvm_reserve_vm),
622623
HANDLE_FUNC(__pkvm_unreserve_vm),

arch/arm64/kvm/hyp/nvhe/pkvm.c

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -337,6 +337,9 @@ static void pkvm_init_features_from_host(struct pkvm_hyp_vm *hyp_vm, const struc
337337
/* CTR_EL0 is always under host control, even for protected VMs. */
338338
hyp_vm->kvm.arch.ctr_el0 = host_kvm->arch.ctr_el0;
339339

340+
/* Preserve the vgic model so that GICv3 emulation works */
341+
hyp_vm->kvm.arch.vgic.vgic_model = host_kvm->arch.vgic.vgic_model;
342+
340343
if (test_bit(KVM_ARCH_FLAG_MTE_ENABLED, &host_kvm->arch.flags))
341344
set_bit(KVM_ARCH_FLAG_MTE_ENABLED, &kvm->arch.flags);
342345

0 commit comments

Comments
 (0)