Skip to content

Commit 1e8c597

Browse files
codomaniasuryasaimadhu
authored andcommitted
x86/mm/cpa: Generalize __set_memory_enc_pgtable()
The kernel provides infrastructure to set or clear the encryption mask from the pages for AMD SEV, but TDX requires few tweaks. - TDX and SEV have different requirements to the cache and TLB flushing. - TDX has own routine to notify VMM about page encryption status change. Modify __set_memory_enc_pgtable() and make it flexible enough to cover both AMD SEV and Intel TDX. The AMD-specific behavior is isolated in the callbacks under x86_platform.guest. TDX will provide own version of said callbacks. [ bp: Beat into submission. ] Signed-off-by: Brijesh Singh <brijesh.singh@amd.com> Signed-off-by: Borislav Petkov <bp@suse.de> Acked-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Link: https://lore.kernel.org/r/20220223043528.2093214-1-brijesh.singh@amd.com
1 parent b577f54 commit 1e8c597

5 files changed

Lines changed: 91 additions & 34 deletions

File tree

arch/x86/include/asm/set_memory.h

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -84,7 +84,6 @@ int set_pages_rw(struct page *page, int numpages);
8484
int set_direct_map_invalid_noflush(struct page *page);
8585
int set_direct_map_default_noflush(struct page *page);
8686
bool kernel_page_present(struct page *page);
87-
void notify_range_enc_status_changed(unsigned long vaddr, int npages, bool enc);
8887

8988
extern int kernel_set_to_readonly;
9089

arch/x86/include/asm/x86_init.h

Lines changed: 16 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -141,6 +141,21 @@ struct x86_init_acpi {
141141
void (*reduced_hw_early_init)(void);
142142
};
143143

144+
/**
145+
* struct x86_guest - Functions used by misc guest incarnations like SEV, TDX, etc.
146+
*
147+
* @enc_status_change_prepare Notify HV before the encryption status of a range is changed
148+
* @enc_status_change_finish Notify HV after the encryption status of a range is changed
149+
* @enc_tlb_flush_required Returns true if a TLB flush is needed before changing page encryption status
150+
* @enc_cache_flush_required Returns true if a cache flush is needed before changing page encryption status
151+
*/
152+
struct x86_guest {
153+
void (*enc_status_change_prepare)(unsigned long vaddr, int npages, bool enc);
154+
bool (*enc_status_change_finish)(unsigned long vaddr, int npages, bool enc);
155+
bool (*enc_tlb_flush_required)(bool enc);
156+
bool (*enc_cache_flush_required)(void);
157+
};
158+
144159
/**
145160
* struct x86_init_ops - functions for platform specific setup
146161
*
@@ -287,6 +302,7 @@ struct x86_platform_ops {
287302
struct x86_legacy_features legacy;
288303
void (*set_legacy_features)(void);
289304
struct x86_hyper_runtime hyper;
305+
struct x86_guest guest;
290306
};
291307

292308
struct x86_apic_ops {

arch/x86/kernel/x86_init.c

Lines changed: 14 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -129,6 +129,11 @@ struct x86_cpuinit_ops x86_cpuinit = {
129129

130130
static void default_nmi_init(void) { };
131131

132+
static void enc_status_change_prepare_noop(unsigned long vaddr, int npages, bool enc) { }
133+
static bool enc_status_change_finish_noop(unsigned long vaddr, int npages, bool enc) { return false; }
134+
static bool enc_tlb_flush_required_noop(bool enc) { return false; }
135+
static bool enc_cache_flush_required_noop(void) { return false; }
136+
132137
struct x86_platform_ops x86_platform __ro_after_init = {
133138
.calibrate_cpu = native_calibrate_cpu_early,
134139
.calibrate_tsc = native_calibrate_tsc,
@@ -138,9 +143,16 @@ struct x86_platform_ops x86_platform __ro_after_init = {
138143
.is_untracked_pat_range = is_ISA_range,
139144
.nmi_init = default_nmi_init,
140145
.get_nmi_reason = default_get_nmi_reason,
141-
.save_sched_clock_state = tsc_save_sched_clock_state,
142-
.restore_sched_clock_state = tsc_restore_sched_clock_state,
146+
.save_sched_clock_state = tsc_save_sched_clock_state,
147+
.restore_sched_clock_state = tsc_restore_sched_clock_state,
143148
.hyper.pin_vcpu = x86_op_int_noop,
149+
150+
.guest = {
151+
.enc_status_change_prepare = enc_status_change_prepare_noop,
152+
.enc_status_change_finish = enc_status_change_finish_noop,
153+
.enc_tlb_flush_required = enc_tlb_flush_required_noop,
154+
.enc_cache_flush_required = enc_cache_flush_required_noop,
155+
},
144156
};
145157

146158
EXPORT_SYMBOL_GPL(x86_platform);

arch/x86/mm/mem_encrypt_amd.c

Lines changed: 50 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -177,25 +177,6 @@ void __init sme_map_bootdata(char *real_mode_data)
177177
__sme_early_map_unmap_mem(__va(cmdline_paddr), COMMAND_LINE_SIZE, true);
178178
}
179179

180-
void __init sme_early_init(void)
181-
{
182-
unsigned int i;
183-
184-
if (!sme_me_mask)
185-
return;
186-
187-
early_pmd_flags = __sme_set(early_pmd_flags);
188-
189-
__supported_pte_mask = __sme_set(__supported_pte_mask);
190-
191-
/* Update the protection map with memory encryption mask */
192-
for (i = 0; i < ARRAY_SIZE(protection_map); i++)
193-
protection_map[i] = pgprot_encrypted(protection_map[i]);
194-
195-
if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT))
196-
swiotlb_force = SWIOTLB_FORCE;
197-
}
198-
199180
void __init sev_setup_arch(void)
200181
{
201182
phys_addr_t total_mem = memblock_phys_mem_size();
@@ -256,7 +237,17 @@ static unsigned long pg_level_to_pfn(int level, pte_t *kpte, pgprot_t *ret_prot)
256237
return pfn;
257238
}
258239

259-
void notify_range_enc_status_changed(unsigned long vaddr, int npages, bool enc)
240+
static bool amd_enc_tlb_flush_required(bool enc)
241+
{
242+
return true;
243+
}
244+
245+
static bool amd_enc_cache_flush_required(void)
246+
{
247+
return !cpu_feature_enabled(X86_FEATURE_SME_COHERENT);
248+
}
249+
250+
static void enc_dec_hypercall(unsigned long vaddr, int npages, bool enc)
260251
{
261252
#ifdef CONFIG_PARAVIRT
262253
unsigned long sz = npages << PAGE_SHIFT;
@@ -287,6 +278,19 @@ void notify_range_enc_status_changed(unsigned long vaddr, int npages, bool enc)
287278
#endif
288279
}
289280

281+
static void amd_enc_status_change_prepare(unsigned long vaddr, int npages, bool enc)
282+
{
283+
}
284+
285+
/* Return true unconditionally: return value doesn't matter for the SEV side */
286+
static bool amd_enc_status_change_finish(unsigned long vaddr, int npages, bool enc)
287+
{
288+
if (!cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT))
289+
enc_dec_hypercall(vaddr, npages, enc);
290+
291+
return true;
292+
}
293+
290294
static void __init __set_clr_pte_enc(pte_t *kpte, int level, bool enc)
291295
{
292296
pgprot_t old_prot, new_prot;
@@ -392,7 +396,7 @@ static int __init early_set_memory_enc_dec(unsigned long vaddr,
392396

393397
ret = 0;
394398

395-
notify_range_enc_status_changed(start, PAGE_ALIGN(size) >> PAGE_SHIFT, enc);
399+
early_set_mem_enc_dec_hypercall(start, PAGE_ALIGN(size) >> PAGE_SHIFT, enc);
396400
out:
397401
__flush_tlb_all();
398402
return ret;
@@ -410,7 +414,31 @@ int __init early_set_memory_encrypted(unsigned long vaddr, unsigned long size)
410414

411415
void __init early_set_mem_enc_dec_hypercall(unsigned long vaddr, int npages, bool enc)
412416
{
413-
notify_range_enc_status_changed(vaddr, npages, enc);
417+
enc_dec_hypercall(vaddr, npages, enc);
418+
}
419+
420+
void __init sme_early_init(void)
421+
{
422+
unsigned int i;
423+
424+
if (!sme_me_mask)
425+
return;
426+
427+
early_pmd_flags = __sme_set(early_pmd_flags);
428+
429+
__supported_pte_mask = __sme_set(__supported_pte_mask);
430+
431+
/* Update the protection map with memory encryption mask */
432+
for (i = 0; i < ARRAY_SIZE(protection_map); i++)
433+
protection_map[i] = pgprot_encrypted(protection_map[i]);
434+
435+
if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT))
436+
swiotlb_force = SWIOTLB_FORCE;
437+
438+
x86_platform.guest.enc_status_change_prepare = amd_enc_status_change_prepare;
439+
x86_platform.guest.enc_status_change_finish = amd_enc_status_change_finish;
440+
x86_platform.guest.enc_tlb_flush_required = amd_enc_tlb_flush_required;
441+
x86_platform.guest.enc_cache_flush_required = amd_enc_cache_flush_required;
414442
}
415443

416444
void __init mem_encrypt_free_decrypted_mem(void)

arch/x86/mm/pat/set_memory.c

Lines changed: 11 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -2008,10 +2008,12 @@ static int __set_memory_enc_pgtable(unsigned long addr, int numpages, bool enc)
20082008
kmap_flush_unused();
20092009
vm_unmap_aliases();
20102010

2011-
/*
2012-
* Before changing the encryption attribute, we need to flush caches.
2013-
*/
2014-
cpa_flush(&cpa, !this_cpu_has(X86_FEATURE_SME_COHERENT));
2011+
/* Flush the caches as needed before changing the encryption attribute. */
2012+
if (x86_platform.guest.enc_tlb_flush_required(enc))
2013+
cpa_flush(&cpa, x86_platform.guest.enc_cache_flush_required());
2014+
2015+
/* Notify hypervisor that we are about to set/clr encryption attribute. */
2016+
x86_platform.guest.enc_status_change_prepare(addr, numpages, enc);
20152017

20162018
ret = __change_page_attr_set_clr(&cpa, 1);
20172019

@@ -2024,11 +2026,11 @@ static int __set_memory_enc_pgtable(unsigned long addr, int numpages, bool enc)
20242026
*/
20252027
cpa_flush(&cpa, 0);
20262028

2027-
/*
2028-
* Notify hypervisor that a given memory range is mapped encrypted
2029-
* or decrypted.
2030-
*/
2031-
notify_range_enc_status_changed(addr, numpages, enc);
2029+
/* Notify hypervisor that we have successfully set/clr encryption attribute. */
2030+
if (!ret) {
2031+
if (!x86_platform.guest.enc_status_change_finish(addr, numpages, enc))
2032+
ret = -EIO;
2033+
}
20322034

20332035
return ret;
20342036
}

0 commit comments

Comments
 (0)