Skip to content

Commit 16fdc1d

Browse files
committed
KVM: SVM: replace regs argument of __svm_vcpu_run() with vcpu_svm
Since registers are reachable through vcpu_svm, and we will need to access more fields of that struct, pass it instead of the regs[] array. No functional change intended. Cc: stable@vger.kernel.org Fixes: a149180 ("x86: Add magic AMD return-thunk") Reviewed-by: Sean Christopherson <seanjc@google.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
1 parent debc5a1 commit 16fdc1d

5 files changed

Lines changed: 30 additions & 20 deletions

File tree

arch/x86/kvm/Makefile

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -35,6 +35,9 @@ obj-$(CONFIG_KVM) += kvm.o
3535
obj-$(CONFIG_KVM_INTEL) += kvm-intel.o
3636
obj-$(CONFIG_KVM_AMD) += kvm-amd.o
3737

38+
AFLAGS_svm/vmenter.o := -iquote $(obj)
39+
$(obj)/svm/vmenter.o: $(obj)/kvm-asm-offsets.h
40+
3841
AFLAGS_vmx/vmenter.o := -iquote $(obj)
3942
$(obj)/vmx/vmenter.o: $(obj)/kvm-asm-offsets.h
4043

arch/x86/kvm/kvm-asm-offsets.c

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -8,9 +8,15 @@
88

99
#include <linux/kbuild.h>
1010
#include "vmx/vmx.h"
11+
#include "svm/svm.h"
1112

1213
static void __used common(void)
1314
{
15+
if (IS_ENABLED(CONFIG_KVM_AMD)) {
16+
BLANK();
17+
OFFSET(SVM_vcpu_arch_regs, vcpu_svm, vcpu.arch.regs);
18+
}
19+
1420
if (IS_ENABLED(CONFIG_KVM_INTEL)) {
1521
BLANK();
1622
OFFSET(VMX_spec_ctrl, vcpu_vmx, spec_ctrl);

arch/x86/kvm/svm/svm.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3930,7 +3930,7 @@ static noinstr void svm_vcpu_enter_exit(struct kvm_vcpu *vcpu)
39303930
* vmcb02 when switching vmcbs for nested virtualization.
39313931
*/
39323932
vmload(svm->vmcb01.pa);
3933-
__svm_vcpu_run(vmcb_pa, (unsigned long *)&vcpu->arch.regs);
3933+
__svm_vcpu_run(vmcb_pa, svm);
39343934
vmsave(svm->vmcb01.pa);
39353935

39363936
vmload(__sme_page_pa(sd->save_area));

arch/x86/kvm/svm/svm.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -684,6 +684,6 @@ void sev_es_unmap_ghcb(struct vcpu_svm *svm);
684684
/* vmenter.S */
685685

686686
void __svm_sev_es_vcpu_run(unsigned long vmcb_pa);
687-
void __svm_vcpu_run(unsigned long vmcb_pa, unsigned long *regs);
687+
void __svm_vcpu_run(unsigned long vmcb_pa, struct vcpu_svm *svm);
688688

689689
#endif

arch/x86/kvm/svm/vmenter.S

Lines changed: 19 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -4,35 +4,36 @@
44
#include <asm/bitsperlong.h>
55
#include <asm/kvm_vcpu_regs.h>
66
#include <asm/nospec-branch.h>
7+
#include "kvm-asm-offsets.h"
78

89
#define WORD_SIZE (BITS_PER_LONG / 8)
910

1011
/* Intentionally omit RAX as it's context switched by hardware */
11-
#define VCPU_RCX __VCPU_REGS_RCX * WORD_SIZE
12-
#define VCPU_RDX __VCPU_REGS_RDX * WORD_SIZE
13-
#define VCPU_RBX __VCPU_REGS_RBX * WORD_SIZE
12+
#define VCPU_RCX (SVM_vcpu_arch_regs + __VCPU_REGS_RCX * WORD_SIZE)
13+
#define VCPU_RDX (SVM_vcpu_arch_regs + __VCPU_REGS_RDX * WORD_SIZE)
14+
#define VCPU_RBX (SVM_vcpu_arch_regs + __VCPU_REGS_RBX * WORD_SIZE)
1415
/* Intentionally omit RSP as it's context switched by hardware */
15-
#define VCPU_RBP __VCPU_REGS_RBP * WORD_SIZE
16-
#define VCPU_RSI __VCPU_REGS_RSI * WORD_SIZE
17-
#define VCPU_RDI __VCPU_REGS_RDI * WORD_SIZE
16+
#define VCPU_RBP (SVM_vcpu_arch_regs + __VCPU_REGS_RBP * WORD_SIZE)
17+
#define VCPU_RSI (SVM_vcpu_arch_regs + __VCPU_REGS_RSI * WORD_SIZE)
18+
#define VCPU_RDI (SVM_vcpu_arch_regs + __VCPU_REGS_RDI * WORD_SIZE)
1819

1920
#ifdef CONFIG_X86_64
20-
#define VCPU_R8 __VCPU_REGS_R8 * WORD_SIZE
21-
#define VCPU_R9 __VCPU_REGS_R9 * WORD_SIZE
22-
#define VCPU_R10 __VCPU_REGS_R10 * WORD_SIZE
23-
#define VCPU_R11 __VCPU_REGS_R11 * WORD_SIZE
24-
#define VCPU_R12 __VCPU_REGS_R12 * WORD_SIZE
25-
#define VCPU_R13 __VCPU_REGS_R13 * WORD_SIZE
26-
#define VCPU_R14 __VCPU_REGS_R14 * WORD_SIZE
27-
#define VCPU_R15 __VCPU_REGS_R15 * WORD_SIZE
21+
#define VCPU_R8 (SVM_vcpu_arch_regs + __VCPU_REGS_R8 * WORD_SIZE)
22+
#define VCPU_R9 (SVM_vcpu_arch_regs + __VCPU_REGS_R9 * WORD_SIZE)
23+
#define VCPU_R10 (SVM_vcpu_arch_regs + __VCPU_REGS_R10 * WORD_SIZE)
24+
#define VCPU_R11 (SVM_vcpu_arch_regs + __VCPU_REGS_R11 * WORD_SIZE)
25+
#define VCPU_R12 (SVM_vcpu_arch_regs + __VCPU_REGS_R12 * WORD_SIZE)
26+
#define VCPU_R13 (SVM_vcpu_arch_regs + __VCPU_REGS_R13 * WORD_SIZE)
27+
#define VCPU_R14 (SVM_vcpu_arch_regs + __VCPU_REGS_R14 * WORD_SIZE)
28+
#define VCPU_R15 (SVM_vcpu_arch_regs + __VCPU_REGS_R15 * WORD_SIZE)
2829
#endif
2930

3031
.section .noinstr.text, "ax"
3132

3233
/**
3334
* __svm_vcpu_run - Run a vCPU via a transition to SVM guest mode
3435
* @vmcb_pa: unsigned long
35-
* @regs: unsigned long * (to guest registers)
36+
* @svm: struct vcpu_svm *
3637
*/
3738
SYM_FUNC_START(__svm_vcpu_run)
3839
push %_ASM_BP
@@ -47,13 +48,13 @@ SYM_FUNC_START(__svm_vcpu_run)
4748
#endif
4849
push %_ASM_BX
4950

50-
/* Save @regs. */
51+
/* Save @svm. */
5152
push %_ASM_ARG2
5253

5354
/* Save @vmcb. */
5455
push %_ASM_ARG1
5556

56-
/* Move @regs to RAX. */
57+
/* Move @svm to RAX. */
5758
mov %_ASM_ARG2, %_ASM_AX
5859

5960
/* Load guest registers. */
@@ -89,7 +90,7 @@ SYM_FUNC_START(__svm_vcpu_run)
8990
FILL_RETURN_BUFFER %_ASM_AX, RSB_CLEAR_LOOPS, X86_FEATURE_RETPOLINE
9091
#endif
9192

92-
/* "POP" @regs to RAX. */
93+
/* "POP" @svm to RAX. */
9394
pop %_ASM_AX
9495

9596
/* Save all guest registers. */

0 commit comments

Comments
 (0)