Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
109 changes: 109 additions & 0 deletions arch/x86/kernel/head64.c
Original file line number Diff line number Diff line change
Expand Up @@ -318,6 +318,115 @@ unsigned long __head __startup_64(unsigned long physaddr,
return sme_postprocess_startup(bp, pmd);
}

#ifdef CONFIG_AMD_MEM_ENCRYPT

extern bool bsp_flush_bss_decrypted_section_handled;

/* Get CPUID data through GHCB MSR protocol */
static int __cpuid_msr_protocol(u32 fn, int reg_idx, u32 *reg)
{
unsigned int msr_idx = (unsigned int)MSR_AMD64_SEV_ES_GHCB;
struct msr m;

m.q = GHCB_CPUID_REQ(fn, reg_idx);

asm volatile("wrmsr" : : "c" (msr_idx), "a"(m.l), "d" (m.h) : "memory");
VMGEXIT();
asm volatile("rdmsr" : "=a" (m.l), "=d" (m.h) : "c" (msr_idx));

if (GHCB_RESP_CODE(m.q) != GHCB_MSR_CPUID_RESP)
return -EIO;

*reg = m.h;

return 0;
}

static bool __should_do_clflush(void)
{
u32 eax, ebx, ecx, edx;
int ret;

/* Check if this is a Hygon CSV guest or an AMD SEV guest */
if (!sme_get_me_mask() ||
!(RIP_REL_REF(sev_status) & MSR_AMD64_SEV_ENABLED))
return false;

/* Get cpuid vendor info, if cannot get vendor info, then return false */
eax = 0x0;
if (!(RIP_REL_REF(sev_status) & MSR_AMD64_SEV_ES_ENABLED)) {
native_cpuid(&eax, &ebx, &ecx, &edx);
} else {
/*
* Hygon CSV2 guest or AMD SEV-ES guest should use GHCB MSR
* protocol to get cpu vendor info.
*/
ret = __cpuid_msr_protocol(eax, GHCB_CPUID_REQ_EBX, &ebx);
ret = ret ? : __cpuid_msr_protocol(eax, GHCB_CPUID_REQ_ECX, &ecx);
ret = ret ? : __cpuid_msr_protocol(eax, GHCB_CPUID_REQ_EDX, &edx);
if (ret)
return false;
}

/* Check if this is a Hygon CSV guest */
#define STRING_Hygo 0x6f677948
#define STRING_uine 0x656e6975
#define STRING_nGen 0x6e65476e

if (ebx != STRING_Hygo || ecx != STRING_uine || edx != STRING_nGen)
return false;

return true;
}

void __ref early_clflush_bss_decrypted_section(void)
{
unsigned long vaddr, vaddr_end;
char *cl, *start, *end;

/* Only allow bsp flush these caches and the bsp must at early boot stage */
if (bsp_flush_bss_decrypted_section_handled)
return;

if (read_cr3_pa() != __pa_nodebug(early_top_pgt))
return;

/* Only Hygon CSV guest should do the clflush */
if (!__should_do_clflush())
goto handled;

/*
* The memory region of .bss..decrypted section maybe mapped
* with encryption in earlier stage. If the correspond stale
* caches lives in earlier stage were not flushed before we
* access that memory region, then Linux will crash later
* because the stale caches will pollute the memory. So we
* need flush the caches with encrypted mapping before we
* access .bss..decrypted section.
*
* The function __startup_64() have already filled the
* encrypted mapping for .bss..decrypted section, use that
* mapping here.
*/
vaddr = (unsigned long)__start_bss_decrypted -
__START_KERNEL_map + phys_base;
vaddr_end = (unsigned long)__end_bss_decrypted -
__START_KERNEL_map + phys_base;

/* Hardcode cl-size to 64 at this stage. */
start = (char *)(vaddr & ~63);
end = (char *)((vaddr_end + 63) & ~63);

asm volatile("mfence" : : : "memory");
for (cl = start; cl != end; cl += 64)
clflush(cl);
asm volatile("mfence" : : : "memory");

handled:
bsp_flush_bss_decrypted_section_handled = true;
}
#endif

/* Wipe all early page tables except for the kernel symbol map */
static void __init reset_early_page_tables(void)
{
Expand Down
10 changes: 10 additions & 0 deletions arch/x86/kernel/head_64.S
Original file line number Diff line number Diff line change
Expand Up @@ -375,6 +375,14 @@ SYM_INNER_LABEL(secondary_startup_64_no_verify, SYM_L_GLOBAL)
shrq $32, %rdx
wrmsr

#ifdef CONFIG_AMD_MEM_ENCRYPT
/*
* Ensure .bss.decrypted memory's stale caches which lived in earlier
* stage to be flushed.
*/
call early_clflush_bss_decrypted_section
#endif

/* Setup and Load IDT */
call early_setup_idt

Expand Down Expand Up @@ -511,6 +519,8 @@ SYM_CODE_END(vc_boot_ghcb)
SYM_DATA(initial_code, .quad x86_64_start_kernel)
#ifdef CONFIG_AMD_MEM_ENCRYPT
SYM_DATA(initial_vc_handler, .quad handle_vc_boot_ghcb)
SYM_DATA(bsp_flush_bss_decrypted_section_handled, .byte 0x0)
.balign 8
#endif

SYM_DATA(trampoline_lock, .quad 0);
Expand Down
13 changes: 13 additions & 0 deletions arch/x86/kernel/sev.c
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,7 @@
#include <asm/insn-eval.h>
#include <asm/fpu/xcr.h>
#include <asm/processor.h>
#include <asm/processor-hygon.h>
#include <asm/realmode.h>
#include <asm/setup.h>
#include <asm/traps.h>
Expand Down Expand Up @@ -1852,6 +1853,15 @@ static bool vc_raw_handle_exception(struct pt_regs *regs, unsigned long error_co
struct ghcb *ghcb;
bool ret = true;

/*
* Make sure the codes between __sev_get_ghcb() and __sev_put_ghcb()
* keep in atomic context. If #VC comes from kernel mode, then the
* codes here are in atomic context. If #VC comes from user mode, then
* it's necessary to switch to atomic context manually.
*/
if (is_x86_vendor_hygon() && !in_nmi())
__preempt_count_add(HARDIRQ_OFFSET);

ghcb = __sev_get_ghcb(&state);

vc_ghcb_invalidate(ghcb);
Expand All @@ -1862,6 +1872,9 @@ static bool vc_raw_handle_exception(struct pt_regs *regs, unsigned long error_co

__sev_put_ghcb(&state);

if (is_x86_vendor_hygon() && !in_nmi())
__preempt_count_sub(HARDIRQ_OFFSET);

/* Done - now check the result */
switch (result) {
case ES_OK:
Expand Down
29 changes: 24 additions & 5 deletions arch/x86/kvm/svm/sev.c
Original file line number Diff line number Diff line change
Expand Up @@ -159,6 +159,13 @@ static int sev_asid_new(struct kvm_sev_info *sev)
bool retry = true;
int ret;

/*
* No matter what the min_sev_asid is, all asids in range
* [1, max_sev_asid] can be used for CSV2 guest on Hygon CPUs.
*/
if (is_x86_vendor_hygon())
max_asid = max_sev_asid;

if (min_asid > max_asid)
return -ENOTTY;

Expand Down Expand Up @@ -2308,11 +2315,19 @@ void __init sev_hardware_setup(void)
goto out;
}

/* Has the system been allocated ASIDs for SEV-ES? */
if (min_sev_asid == 1)
goto out;
if (is_x86_vendor_hygon()) {
/*
* Ths ASIDs from 1 to max_sev_asid are available for hygon
* CSV2 guest.
*/
sev_es_asid_count = max_sev_asid;
} else {
/* Has the system been allocated ASIDs for SEV-ES? */
if (min_sev_asid == 1)
goto out;

sev_es_asid_count = min_sev_asid - 1;
sev_es_asid_count = min_sev_asid - 1;
}
WARN_ON_ONCE(misc_cg_set_capacity(MISC_CG_RES_SEV_ES, sev_es_asid_count));
sev_es_supported = true;

Expand All @@ -2328,7 +2343,8 @@ void __init sev_hardware_setup(void)
pr_info("%s %s (ASIDs %u - %u)\n",
is_x86_vendor_hygon() ? "CSV2" : "SEV-ES",
sev_es_supported ? "enabled" : "disabled",
min_sev_asid > 1 ? 1 : 0, min_sev_asid - 1);
is_x86_vendor_hygon() ? 1 : (min_sev_asid > 1 ? 1 : 0),
is_x86_vendor_hygon() ? max_sev_asid : min_sev_asid - 1);

sev_enabled = sev_supported;
sev_es_enabled = sev_es_supported;
Expand Down Expand Up @@ -2451,6 +2467,9 @@ void sev_free_vcpu(struct kvm_vcpu *vcpu)

__free_page(virt_to_page(svm->sev_es.vmsa));

if (svm->sev_es.ghcb)
kvm_vcpu_unmap(vcpu, &svm->sev_es.ghcb_map, false);

if (svm->sev_es.ghcb_sa_free)
kvfree(svm->sev_es.ghcb_sa);

Expand Down
22 changes: 11 additions & 11 deletions arch/x86/kvm/x86.c
Original file line number Diff line number Diff line change
Expand Up @@ -1709,22 +1709,17 @@ static int do_get_msr_feature(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
struct kvm_msr_entry msr;
int r;

/* Unconditionally clear the output for simplicity */
msr.data = 0;
msr.index = index;
r = kvm_get_msr_feature(&msr);

if (r == KVM_MSR_RET_INVALID) {
/* Unconditionally clear the output for simplicity */
*data = 0;
if (kvm_msr_ignored_check(index, 0, false))
r = 0;
}

if (r)
return r;
if (r == KVM_MSR_RET_INVALID && kvm_msr_ignored_check(index, 0, false))
r = 0;

*data = msr.data;

return 0;
return r;
}

static bool __kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer)
Expand Down Expand Up @@ -9855,8 +9850,13 @@ static int complete_hypercall_exit(struct kvm_vcpu *vcpu)
{
u64 ret = vcpu->run->hypercall.ret;

if (!is_64_bit_mode(vcpu))
/* Use is_64_bit_hypercall() instead of is_64_bit_mode() for Hygon CPUs */
if (is_x86_vendor_hygon()) {
if (!is_64_bit_hypercall(vcpu))
ret = (u32)ret;
} else if (!is_64_bit_mode(vcpu)) {
ret = (u32)ret;
}
kvm_rax_write(vcpu, ret);
++vcpu->stat.hypercalls;
return kvm_skip_emulated_instruction(vcpu);
Expand Down