diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index 0b8d9dd42b72a..45230e8b7a421 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -5236,13 +5236,22 @@ static int kvm_hygon_arch_hypercall(struct kvm *kvm, u64 nr, u64 a0, u64 a1, u64 struct kvm_vpsp vpsp = { .kvm = kvm, .write_guest = kvm_write_guest, - .read_guest = kvm_read_guest + .read_guest = kvm_read_guest, + .gfn_to_pfn = gfn_to_pfn, }; + + if (sev_guest(kvm)) { + vpsp.vm_handle = to_kvm_svm(kvm)->sev_info.handle; + vpsp.is_csv_guest = 1; + } + switch (nr) { - case KVM_HC_PSP_OP: - ret = kvm_pv_psp_op(&vpsp, a0, a1, a2, a3); + case KVM_HC_PSP_COPY_FORWARD_OP: + ret = kvm_pv_psp_copy_forward_op(&vpsp, a0, a1, a2); + break; + case KVM_HC_PSP_FORWARD_OP: + ret = kvm_pv_psp_forward_op(&vpsp, a0, a1, a2); break; - default: ret = -KVM_ENOSYS; break; diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index ab74ffcd4513c..c6ef0c702b300 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -9895,7 +9895,10 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu) } if (static_call(kvm_x86_get_cpl)(vcpu) != 0 && - !(is_x86_vendor_hygon() && (nr == KVM_HC_VM_ATTESTATION || nr == KVM_HC_PSP_OP))) { + !(is_x86_vendor_hygon() && (nr == KVM_HC_VM_ATTESTATION + || nr == KVM_HC_PSP_OP_OBSOLETE + || nr == KVM_HC_PSP_COPY_FORWARD_OP + || nr == KVM_HC_PSP_FORWARD_OP))) { ret = -KVM_EPERM; goto out; } @@ -9932,7 +9935,9 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu) kvm_sched_yield(vcpu, a0); ret = 0; break; - case KVM_HC_PSP_OP: + case KVM_HC_PSP_OP_OBSOLETE: + case KVM_HC_PSP_COPY_FORWARD_OP: + case KVM_HC_PSP_FORWARD_OP: ret = -KVM_ENOSYS; if (kvm_arch_hypercall) ret = kvm_arch_hypercall(vcpu->kvm, nr, a0, a1, a2, a3); diff --git a/drivers/crypto/ccp/hygon/csv-dev.c b/drivers/crypto/ccp/hygon/csv-dev.c index d070bfb4ebf22..02ebcd8ef247f 100644 --- a/drivers/crypto/ccp/hygon/csv-dev.c +++ b/drivers/crypto/ccp/hygon/csv-dev.c @@ -14,6 +14,7 @@ #include #include #include +#include #include @@ -663,12 +664,12 @@ static int vpsp_dequeue_cmd(int prio, int index, * Populate the command from the virtual machine to the queue to * support execution in ringbuffer mode */ -static int vpsp_fill_cmd_queue(uint32_t vid, int prio, int cmd, void *data, uint16_t flags) +static int vpsp_fill_cmd_queue(int prio, int cmd, phys_addr_t phy_addr, uint16_t flags) { struct csv_cmdptr_entry cmdptr = { }; int index = -1; - cmdptr.cmd_buf_ptr = PUT_PSP_VID(__psp_pa(data), vid); + cmdptr.cmd_buf_ptr = phy_addr; cmdptr.cmd_id = cmd; cmdptr.cmd_flags = flags; @@ -968,12 +969,91 @@ static int vpsp_rb_check_and_cmd_prio_parse(uint8_t *prio, return rb_supported; } -int __vpsp_do_cmd_locked(uint32_t vid, int cmd, void *data, int *psp_ret); +static int __vpsp_do_cmd_locked(int cmd, phys_addr_t phy_addr, int *psp_ret) +{ + struct psp_device *psp = psp_master; + struct sev_device *sev; + unsigned int phys_lsb, phys_msb; + unsigned int reg, ret = 0; + + if (!psp || !psp->sev_data) + return -ENODEV; + + if (*hygon_psp_hooks.psp_dead) + return -EBUSY; + + sev = psp->sev_data; + + /* Get the physical address of the command buffer */ + phys_lsb = phy_addr ? lower_32_bits(phy_addr) : 0; + phys_msb = phy_addr ? upper_32_bits(phy_addr) : 0; + + dev_dbg(sev->dev, "sev command id %#x buffer 0x%08x%08x timeout %us\n", + cmd, phys_msb, phys_lsb, *hygon_psp_hooks.psp_timeout); + + iowrite32(phys_lsb, sev->io_regs + sev->vdata->cmdbuff_addr_lo_reg); + iowrite32(phys_msb, sev->io_regs + sev->vdata->cmdbuff_addr_hi_reg); + + sev->int_rcvd = 0; + + reg = FIELD_PREP(SEV_CMDRESP_CMD, cmd) | SEV_CMDRESP_IOC; + iowrite32(reg, sev->io_regs + sev->vdata->cmdresp_reg); + + /* wait for command completion */ + ret = hygon_psp_hooks.sev_wait_cmd_ioc(sev, ®, *hygon_psp_hooks.psp_timeout); + if (ret) { + if (psp_ret) + *psp_ret = 0; + + dev_err(sev->dev, "sev command %#x timed out, disabling PSP\n", cmd); + *hygon_psp_hooks.psp_dead = true; + + return ret; + } + + *hygon_psp_hooks.psp_timeout = *hygon_psp_hooks.psp_cmd_timeout; + + if (psp_ret) + *psp_ret = FIELD_GET(PSP_CMDRESP_STS, reg); + + if (FIELD_GET(PSP_CMDRESP_STS, reg)) { + dev_dbg(sev->dev, "sev command %#x failed (%#010lx)\n", + cmd, FIELD_GET(PSP_CMDRESP_STS, reg)); + ret = -EIO; + } + + return ret; +} + +int vpsp_do_cmd(int cmd, phys_addr_t phy_addr, int *psp_ret) +{ + int rc; + int mutex_enabled = READ_ONCE(hygon_psp_hooks.psp_mutex_enabled); + + if (is_vendor_hygon() && mutex_enabled) { + if (psp_mutex_lock_timeout(&hygon_psp_hooks.psp_misc->data_pg_aligned->mb_mutex, + PSP_MUTEX_TIMEOUT) != 1) { + return -EBUSY; + } + } else { + mutex_lock(hygon_psp_hooks.sev_cmd_mutex); + } + + rc = __vpsp_do_cmd_locked(cmd, phy_addr, psp_ret); + + if (is_vendor_hygon() && mutex_enabled) + psp_mutex_unlock(&hygon_psp_hooks.psp_misc->data_pg_aligned->mb_mutex); + else + mutex_unlock(hygon_psp_hooks.sev_cmd_mutex); + + return rc; +} + /* * Try to obtain the result again by the command index, this * interface is used in ringbuffer mode */ -int vpsp_try_get_result(uint32_t vid, uint8_t prio, uint32_t index, void *data, +int vpsp_try_get_result(uint8_t prio, uint32_t index, phys_addr_t phy_addr, struct vpsp_ret *psp_ret) { int ret = 0; @@ -996,8 +1076,7 @@ int vpsp_try_get_result(uint32_t vid, uint8_t prio, uint32_t index, void *data, /* dequeue command from queue*/ vpsp_dequeue_cmd(prio, index, &cmd); - ret = __vpsp_do_cmd_locked(vid, cmd.cmd_id, data, - (int *)psp_ret); + ret = __vpsp_do_cmd_locked(cmd.cmd_id, phy_addr, (int *)psp_ret); psp_ret->status = VPSP_FINISH; vpsp_psp_mutex_unlock(); if (unlikely(ret)) { @@ -1040,7 +1119,7 @@ EXPORT_SYMBOL_GPL(vpsp_try_get_result); * vpsp_try_get_result interface will be used to obtain the result * later again */ -int vpsp_try_do_cmd(uint32_t vid, int cmd, void *data, struct vpsp_ret *psp_ret) +int vpsp_try_do_cmd(int cmd, phys_addr_t phy_addr, struct vpsp_ret *psp_ret) { int ret = 0; int rb_supported; @@ -1055,10 +1134,10 @@ int vpsp_try_do_cmd(uint32_t vid, int cmd, void *data, struct vpsp_ret *psp_ret) (struct vpsp_cmd *)&cmd); if (rb_supported) { /* fill command in ringbuffer's queue and get index */ - index = vpsp_fill_cmd_queue(vid, prio, cmd, data, 0); + index = vpsp_fill_cmd_queue(prio, cmd, phy_addr, 0); if (unlikely(index < 0)) { /* do mailbox command if queuing failed*/ - ret = vpsp_do_cmd(vid, cmd, data, (int *)psp_ret); + ret = vpsp_do_cmd(cmd, phy_addr, (int *)psp_ret); if (unlikely(ret)) { if (ret == -EIO) { ret = 0; @@ -1074,14 +1153,14 @@ int vpsp_try_do_cmd(uint32_t vid, int cmd, void *data, struct vpsp_ret *psp_ret) } /* try to get result from the ringbuffer command */ - ret = vpsp_try_get_result(vid, prio, index, data, psp_ret); + ret = vpsp_try_get_result(prio, index, phy_addr, psp_ret); if (unlikely(ret)) { pr_err("[%s]: vpsp_try_get_result failed %d\n", __func__, ret); goto end; } } else { /* mailbox mode */ - ret = vpsp_do_cmd(vid, cmd, data, (int *)psp_ret); + ret = vpsp_do_cmd(cmd, phy_addr, (int *)psp_ret); if (unlikely(ret)) { if (ret == -EIO) { ret = 0; diff --git a/drivers/crypto/ccp/hygon/psp-dev.c b/drivers/crypto/ccp/hygon/psp-dev.c index 5c56c6fc5a31e..c150aa163a7a2 100644 --- a/drivers/crypto/ccp/hygon/psp-dev.c +++ b/drivers/crypto/ccp/hygon/psp-dev.c @@ -30,6 +30,8 @@ enum HYGON_PSP_OPCODE { HYGON_PSP_MUTEX_ENABLE = 1, HYGON_PSP_MUTEX_DISABLE, HYGON_VPSP_CTRL_OPT, + HYGON_PSP_OP_PIN_USER_PAGE, + HYGON_PSP_OP_UNPIN_USER_PAGE, HYGON_PSP_OPCODE_MAX_NR, }; @@ -38,16 +40,26 @@ enum VPSP_DEV_CTRL_OPCODE { VPSP_OP_VID_DEL, VPSP_OP_SET_DEFAULT_VID_PERMISSION, VPSP_OP_GET_DEFAULT_VID_PERMISSION, + VPSP_OP_SET_GPA, }; struct vpsp_dev_ctrl { unsigned char op; + /** + * To be compatible with old user mode, + * struct vpsp_dev_ctrl must be kept at 132 bytes. + */ + unsigned char resv[3]; union { unsigned int vid; // Set or check the permissions for the default VID unsigned int def_vid_perm; + struct { + u64 gpa_start; + u64 gpa_end; + } gpa; unsigned char reserved[128]; - } data; + } __packed data; }; uint64_t atomic64_exchange(uint64_t *dst, uint64_t val) @@ -160,19 +172,15 @@ DEFINE_RWLOCK(vpsp_rwlock); #define VPSP_VID_MAX_ENTRIES 2048 #define VPSP_VID_NUM_MAX 64 -struct vpsp_vid_entry { - uint32_t vid; - pid_t pid; -}; -static struct vpsp_vid_entry g_vpsp_vid_array[VPSP_VID_MAX_ENTRIES]; +static struct vpsp_context g_vpsp_context_array[VPSP_VID_MAX_ENTRIES]; static uint32_t g_vpsp_vid_num; static int compare_vid_entries(const void *a, const void *b) { - return ((struct vpsp_vid_entry *)a)->pid - ((struct vpsp_vid_entry *)b)->pid; + return ((struct vpsp_context *)a)->pid - ((struct vpsp_context *)b)->pid; } static void swap_vid_entries(void *a, void *b, int size) { - struct vpsp_vid_entry entry; + struct vpsp_context entry; memcpy(&entry, a, size); memcpy(a, b, size); @@ -197,43 +205,41 @@ int vpsp_get_default_vid_permission(void) EXPORT_SYMBOL_GPL(vpsp_get_default_vid_permission); /** - * When the virtual machine executes the 'tkm' command, - * it needs to retrieve the corresponding 'vid' - * by performing a binary search using 'kvm->userspace_pid'. + * get a vpsp context from pid */ -int vpsp_get_vid(uint32_t *vid, pid_t pid) +int vpsp_get_context(struct vpsp_context **ctx, pid_t pid) { - struct vpsp_vid_entry new_entry = {.pid = pid}; - struct vpsp_vid_entry *existing_entry = NULL; + struct vpsp_context new_entry = {.pid = pid}; + struct vpsp_context *existing_entry = NULL; read_lock(&vpsp_rwlock); - existing_entry = bsearch(&new_entry, g_vpsp_vid_array, g_vpsp_vid_num, - sizeof(struct vpsp_vid_entry), compare_vid_entries); + existing_entry = bsearch(&new_entry, g_vpsp_context_array, g_vpsp_vid_num, + sizeof(struct vpsp_context), compare_vid_entries); read_unlock(&vpsp_rwlock); if (!existing_entry) return -ENOENT; - if (vid) { - *vid = existing_entry->vid; - pr_debug("PSP: %s %d, by pid %d\n", __func__, *vid, pid); - } + + if (ctx) + *ctx = existing_entry; + return 0; } -EXPORT_SYMBOL_GPL(vpsp_get_vid); +EXPORT_SYMBOL_GPL(vpsp_get_context); /** * Upon qemu startup, this section checks whether * the '-device psp,vid' parameter is specified. * If set, it utilizes the 'vpsp_add_vid' function - * to insert the 'vid' and 'pid' values into the 'g_vpsp_vid_array'. + * to insert the 'vid' and 'pid' values into the 'g_vpsp_context_array'. * The insertion is done in ascending order of 'pid'. */ static int vpsp_add_vid(uint32_t vid) { pid_t cur_pid = task_pid_nr(current); - struct vpsp_vid_entry new_entry = {.vid = vid, .pid = cur_pid}; + struct vpsp_context new_entry = {.vid = vid, .pid = cur_pid}; - if (vpsp_get_vid(NULL, cur_pid) == 0) + if (vpsp_get_context(NULL, cur_pid) == 0) return -EEXIST; if (g_vpsp_vid_num == VPSP_VID_MAX_ENTRIES) return -ENOMEM; @@ -241,8 +247,8 @@ static int vpsp_add_vid(uint32_t vid) return -EINVAL; write_lock(&vpsp_rwlock); - memcpy(&g_vpsp_vid_array[g_vpsp_vid_num++], &new_entry, sizeof(struct vpsp_vid_entry)); - sort(g_vpsp_vid_array, g_vpsp_vid_num, sizeof(struct vpsp_vid_entry), + memcpy(&g_vpsp_context_array[g_vpsp_vid_num++], &new_entry, sizeof(struct vpsp_context)); + sort(g_vpsp_context_array, g_vpsp_vid_num, sizeof(struct vpsp_context), compare_vid_entries, swap_vid_entries); pr_info("PSP: add vid %d, by pid %d, total vid num is %d\n", vid, cur_pid, g_vpsp_vid_num); write_unlock(&vpsp_rwlock); @@ -261,12 +267,12 @@ static int vpsp_del_vid(void) write_lock(&vpsp_rwlock); for (i = 0; i < g_vpsp_vid_num; ++i) { - if (g_vpsp_vid_array[i].pid == cur_pid) { + if (g_vpsp_context_array[i].pid == cur_pid) { --g_vpsp_vid_num; pr_info("PSP: delete vid %d, by pid %d, total vid num is %d\n", - g_vpsp_vid_array[i].vid, cur_pid, g_vpsp_vid_num); - memmove(&g_vpsp_vid_array[i], &g_vpsp_vid_array[i + 1], - sizeof(struct vpsp_vid_entry) * (g_vpsp_vid_num - i)); + g_vpsp_context_array[i].vid, cur_pid, g_vpsp_vid_num); + memmove(&g_vpsp_context_array[i], &g_vpsp_context_array[i + 1], + sizeof(struct vpsp_context) * (g_vpsp_vid_num - i)); ret = 0; goto end; } @@ -277,6 +283,85 @@ static int vpsp_del_vid(void) return ret; } +static int vpsp_set_gpa_range(u64 gpa_start, u64 gpa_end) +{ + pid_t cur_pid = task_pid_nr(current); + struct vpsp_context *ctx = NULL; + + vpsp_get_context(&ctx, cur_pid); + if (!ctx) { + pr_err("PSP: %s get vpsp_context failed from pid %d\n", __func__, cur_pid); + return -ENOENT; + } + + ctx->gpa_start = gpa_start; + ctx->gpa_end = gpa_end; + pr_info("PSP: set gpa range (start 0x%llx, end 0x%llx), by pid %d\n", + gpa_start, gpa_end, cur_pid); + return 0; +} + +/** + * Try to pin a page + * + * @vaddr: the userspace virtual address, must be aligned to PAGE_SIZE + */ +static int psp_pin_user_page(u64 vaddr) +{ + struct page *page; + long npinned = 0; + int ref_count = 0; + + // check must be aligned to PAGE_SIZE + if (vaddr & (PAGE_SIZE - 1)) { + pr_err("vaddr %llx not aligned to 0x%lx\n", vaddr, PAGE_SIZE); + return -EFAULT; + } + + npinned = pin_user_pages_fast(vaddr, 1, FOLL_WRITE, &page); + if (npinned != 1) { + pr_err("PSP: pin_user_pages_fast fail\n"); + return -ENOMEM; + } + + ref_count = page_ref_count(page); + pr_debug("pin user page with address %llx, page ref_count %d\n", vaddr, ref_count); + return 0; +} + +/** + * Try to unpin a page + * + * @vaddr: the userspace virtual address, must be aligned to PAGE_SIZE + */ +static int psp_unpin_user_page(u64 vaddr) +{ + struct page *page; + long npinned = 0; + int ref_count = 0; + + // check must be aligned to PAGE_SIZE + if (vaddr & (PAGE_SIZE - 1)) { + pr_err("vaddr %llx not aligned to 0x%lx\n", vaddr, PAGE_SIZE); + return -EFAULT; + } + + // page reference count increment by 1 + npinned = get_user_pages_fast(vaddr, 1, FOLL_WRITE, &page); + if (npinned != 1) { + pr_err("PSP: pin_user_pages_fast fail\n"); + return -ENOMEM; + } + + // page reference count decrement by 2 + put_page(page); + put_page(page); + + ref_count = page_ref_count(page); + pr_debug("unpin user page with address %llx, page ref_count %d\n", vaddr, ref_count); + return 0; +} + static int do_vpsp_op_ioctl(struct vpsp_dev_ctrl *ctrl) { int ret = 0; @@ -299,6 +384,10 @@ static int do_vpsp_op_ioctl(struct vpsp_dev_ctrl *ctrl) ctrl->data.def_vid_perm = vpsp_get_default_vid_permission(); break; + case VPSP_OP_SET_GPA: + ret = vpsp_set_gpa_range(ctrl->data.gpa.gpa_start, ctrl->data.gpa.gpa_end); + break; + default: ret = -EINVAL; break; @@ -355,6 +444,14 @@ static long ioctl_psp(struct file *file, unsigned int ioctl, unsigned long arg) return -EFAULT; break; + case HYGON_PSP_OP_PIN_USER_PAGE: + ret = psp_pin_user_page((u64)arg); + break; + + case HYGON_PSP_OP_UNPIN_USER_PAGE: + ret = psp_unpin_user_page((u64)arg); + break; + default: pr_info("%s: invalid ioctl number: %d\n", __func__, opcode); return -EINVAL; @@ -498,100 +595,6 @@ static int __psp_do_cmd_locked(int cmd, void *data, int *psp_ret) return ret; } -int __vpsp_do_cmd_locked(uint32_t vid, int cmd, void *data, int *psp_ret) -{ - struct psp_device *psp = psp_master; - struct sev_device *sev; - phys_addr_t phys_addr; - unsigned int phys_lsb, phys_msb; - unsigned int reg, ret = 0; - - if (!psp || !psp->sev_data || !hygon_psp_hooks.sev_dev_hooks_installed) - return -ENODEV; - - if (*hygon_psp_hooks.psp_dead) - return -EBUSY; - - sev = psp->sev_data; - - if (data && WARN_ON_ONCE(!virt_addr_valid(data))) - return -EINVAL; - - /* Get the physical address of the command buffer */ - phys_addr = PUT_PSP_VID(__psp_pa(data), vid); - phys_lsb = data ? lower_32_bits(phys_addr) : 0; - phys_msb = data ? upper_32_bits(phys_addr) : 0; - - dev_dbg(sev->dev, "sev command id %#x buffer 0x%08x%08x timeout %us\n", - cmd, phys_msb, phys_lsb, *hygon_psp_hooks.psp_timeout); - - print_hex_dump_debug("(in): ", DUMP_PREFIX_OFFSET, 16, 2, data, - hygon_psp_hooks.sev_cmd_buffer_len(cmd), false); - - iowrite32(phys_lsb, sev->io_regs + sev->vdata->cmdbuff_addr_lo_reg); - iowrite32(phys_msb, sev->io_regs + sev->vdata->cmdbuff_addr_hi_reg); - - sev->int_rcvd = 0; - - reg = FIELD_PREP(SEV_CMDRESP_CMD, cmd) | SEV_CMDRESP_IOC; - iowrite32(reg, sev->io_regs + sev->vdata->cmdresp_reg); - - /* wait for command completion */ - ret = hygon_psp_hooks.sev_wait_cmd_ioc(sev, ®, *hygon_psp_hooks.psp_timeout); - if (ret) { - if (psp_ret) - *psp_ret = 0; - - dev_err(sev->dev, "sev command %#x timed out, disabling PSP\n", cmd); - *hygon_psp_hooks.psp_dead = true; - - return ret; - } - - *hygon_psp_hooks.psp_timeout = *hygon_psp_hooks.psp_cmd_timeout; - - if (psp_ret) - *psp_ret = FIELD_GET(PSP_CMDRESP_STS, reg); - - if (FIELD_GET(PSP_CMDRESP_STS, reg)) { - dev_dbg(sev->dev, "sev command %#x failed (%#010lx)\n", - cmd, FIELD_GET(PSP_CMDRESP_STS, reg)); - ret = -EIO; - } - - print_hex_dump_debug("(out): ", DUMP_PREFIX_OFFSET, 16, 2, data, - hygon_psp_hooks.sev_cmd_buffer_len(cmd), false); - - return ret; -} - -int vpsp_do_cmd(uint32_t vid, int cmd, void *data, int *psp_ret) -{ - int rc; - int mutex_enabled = READ_ONCE(hygon_psp_hooks.psp_mutex_enabled); - - if (!hygon_psp_hooks.sev_dev_hooks_installed) - return -ENODEV; - - if (mutex_enabled) { - if (psp_mutex_lock_timeout(&psp_misc->data_pg_aligned->mb_mutex, - PSP_MUTEX_TIMEOUT) != 1) { - return -EBUSY; - } - } else { - mutex_lock(hygon_psp_hooks.sev_cmd_mutex); - } - - rc = __vpsp_do_cmd_locked(vid, cmd, data, psp_ret); - - if (mutex_enabled) - psp_mutex_unlock(&psp_misc->data_pg_aligned->mb_mutex); - else - mutex_unlock(hygon_psp_hooks.sev_cmd_mutex); - - return rc; -} - int psp_do_cmd(int cmd, void *data, int *psp_ret) { int rc; diff --git a/drivers/crypto/ccp/hygon/vpsp.c b/drivers/crypto/ccp/hygon/vpsp.c index 13208fe2c4b3d..df62dab035b89 100644 --- a/drivers/crypto/ccp/hygon/vpsp.c +++ b/drivers/crypto/ccp/hygon/vpsp.c @@ -13,545 +13,464 @@ #include #include #include +#include #ifdef pr_fmt #undef pr_fmt #endif #define pr_fmt(fmt) "vpsp: " fmt +#define VTKM_VM_BIND 0x904 /* - * The file mainly implements the base execution - * logic of virtual PSP in kernel mode, which mainly includes: - * (1) Obtain the VM command and preprocess the pointer - * mapping table information in the command buffer - * (2) The command that has been converted will interact - * with the channel of the psp through the driver and - * try to obtain the execution result - * (3) The executed command data is recovered according to - * the multilevel pointer of the mapping table, and then returned to the VM + * The file mainly implements the base execution logic of virtual PSP in kernel mode, + * which mainly includes: + * (1) Preprocess the guest data in the host kernel + * (2) The command that has been converted will interact with the channel of the + * psp through the driver and try to obtain the execution result + * (3) The executed command data is recovered, and then returned to the VM * * The primary implementation logic of virtual PSP in kernel mode * call trace: - * guest command(vmmcall) - * | - * | |-> kvm_pv_psp_cmd_pre_op - * | | - * | | -> guest_addr_map_table_op - * | | - * | | -> guest_multiple_level_gpa_replace + * guest command(vmmcall, KVM_HC_PSP_COPY_FORWARD_OP) * | - * kvm_pv_psp_op->|-> vpsp_try_do_cmd/vpsp_try_get_result <====> psp device driver - * | - * | - * |-> kvm_pv_psp_cmd_post_op + * kvm_pv_psp_copy_op----> | -> kvm_pv_psp_cmd_pre_op + * | + * | -> vpsp_try_do_cmd/vpsp_try_get_result + * | |<=> psp device driver * | - * | -> guest_addr_map_table_op - * | - * | -> guest_multiple_level_gpa_restore + * | + * |-> kvm_pv_psp_cmd_post_op + * + * guest command(vmmcall, KVM_HC_PSP_FORWARD_OP) + * | + * kvm_pv_psp_forward_op-> |-> vpsp_try_do_cmd/vpsp_try_get_result + * |<=> psp device driver */ -#define TKM_CMD_ID_MIN 0x120 -#define TKM_CMD_ID_MAX 0x12f - struct psp_cmdresp_head { uint32_t buf_size; uint32_t cmdresp_size; uint32_t cmdresp_code; } __packed; -/** - * struct map_tbl - multilevel pointer address mapping table - * - * @parent_pa: parent address block's physics address - * @offset: offset in parent address block - * @size: submemory size - * @align: submemory align size, hva need to keep size alignment in kernel - * @hva: submemory copy block in kernel virtual address - */ -struct map_tbl { - uint64_t parent_pa; - uint32_t offset; - uint32_t size; - uint32_t align; - uint64_t hva; -} __packed; - -struct addr_map_tbls { - uint32_t tbl_nums; - struct map_tbl tbl[]; -} __packed; - -/* gpa and hva conversion maintenance table for internal use */ -struct gpa2hva_t { - void *hva; - gpa_t gpa; -}; - -struct gpa2hva_tbls { - uint32_t max_nums; - uint32_t tbl_nums; - struct gpa2hva_t tbl[]; -}; - /* save command data for restoring later */ struct vpsp_hbuf_wrapper { void *data; uint32_t data_size; - struct addr_map_tbls *map_tbls; - struct gpa2hva_tbls *g2h_tbls; }; /* Virtual PSP host memory information maintenance, used in ringbuffer mode */ struct vpsp_hbuf_wrapper g_hbuf_wrap[CSV_COMMAND_PRIORITY_NUM][CSV_RING_BUFFER_SIZE / CSV_RING_BUFFER_ESIZE] = {0}; -void __maybe_unused map_tbl_dump(const char *title, struct addr_map_tbls *tbls) +static int check_gpa_range(struct vpsp_context *vpsp_ctx, gpa_t addr, uint32_t size) { - int i; - - pr_info("[%s]-> map_tbl_nums: %d", title, tbls->tbl_nums); - for (i = 0; i < tbls->tbl_nums; i++) { - pr_info("\t[%d]: parent_pa: 0x%llx, offset: 0x%x, size: 0x%x, align: 0x%x hva: 0x%llx", - i, tbls->tbl[i].parent_pa, tbls->tbl[i].offset, - tbls->tbl[i].size, tbls->tbl[i].align, tbls->tbl[i].hva); - } - pr_info("\n"); -} + if (!vpsp_ctx || !addr) + return -EFAULT; -void __maybe_unused g2h_tbl_dump(const char *title, struct gpa2hva_tbls *tbls) -{ - int i; - - pr_info("[%s]-> g2h_tbl_nums: %d, max_nums: %d", title, tbls->tbl_nums, - tbls->max_nums); - for (i = 0; i < tbls->tbl_nums; i++) - pr_info("\t[%d]: hva: 0x%llx, gpa: 0x%llx", i, - (uint64_t)tbls->tbl[i].hva, tbls->tbl[i].gpa); - pr_info("\n"); + if (addr >= vpsp_ctx->gpa_start && (addr + size) <= vpsp_ctx->gpa_end) + return 0; + return -EFAULT; } -static int gpa2hva_tbl_fill(struct gpa2hva_tbls *tbls, void *hva, gpa_t gpa) +static int check_psp_mem_range(struct vpsp_context *vpsp_ctx, + void *data, uint32_t size) { - uint32_t fill_idx = tbls->tbl_nums; - - if (fill_idx >= tbls->max_nums) + if ((((uintptr_t)data + size - 1) & ~PSP_2MB_MASK) != + ((uintptr_t)data & ~PSP_2MB_MASK)) { + pr_err("data %llx, size %d crossing 2MB\n", (u64)data, size); return -EFAULT; + } - tbls->tbl[fill_idx].hva = hva; - tbls->tbl[fill_idx].gpa = gpa; - tbls->tbl_nums = fill_idx + 1; + if (vpsp_ctx) + return check_gpa_range(vpsp_ctx, (gpa_t)data, size); return 0; } -static void clear_hva_in_g2h_tbls(struct gpa2hva_tbls *g2h, void *hva) +/** + * Copy the guest data to the host kernel buffer + * and record the host buffer address in 'hbuf'. + * This 'hbuf' is used to restore context information + * during asynchronous processing. + */ +static int kvm_pv_psp_cmd_pre_op(struct kvm_vpsp *vpsp, gpa_t data_gpa, + struct vpsp_hbuf_wrapper *hbuf) { - int i; + int ret = 0; + void *data = NULL; + struct psp_cmdresp_head psp_head; + uint32_t data_size; - for (i = 0; i < g2h->tbl_nums; i++) { - if (g2h->tbl[i].hva == hva) - g2h->tbl[i].hva = NULL; - } -} + if (unlikely(vpsp->read_guest(vpsp->kvm, data_gpa, &psp_head, + sizeof(struct psp_cmdresp_head)))) + return -EFAULT; -static void *get_hva_from_gpa(struct gpa2hva_tbls *g2h, gpa_t gpa) -{ - int i; + data_size = psp_head.buf_size; + if (check_psp_mem_range(NULL, (void *)data_gpa, data_size)) + return -EFAULT; - for (i = 0; i < g2h->tbl_nums; i++) { - if (g2h->tbl[i].gpa == gpa) - return (void *)g2h->tbl[i].hva; + data = kzalloc(data_size, GFP_KERNEL); + if (!data) + return -ENOMEM; + + if (unlikely(vpsp->read_guest(vpsp->kvm, data_gpa, data, data_size))) { + ret = -EFAULT; + goto end; } - return NULL; + hbuf->data = data; + hbuf->data_size = data_size; + +end: + return ret; } -static gpa_t get_gpa_from_hva(struct gpa2hva_tbls *g2h, void *hva) +static int kvm_pv_psp_cmd_post_op(struct kvm_vpsp *vpsp, gpa_t data_gpa, + struct vpsp_hbuf_wrapper *hbuf) { - int i; + int ret = 0; - for (i = 0; i < g2h->tbl_nums; i++) { - if (g2h->tbl[i].hva == hva) - return g2h->tbl[i].gpa; + /* restore cmdresp's buffer from context */ + if (unlikely(vpsp->write_guest(vpsp->kvm, data_gpa, hbuf->data, + hbuf->data_size))) { + pr_err("[%s]: kvm_write_guest for cmdresp data failed\n", + __func__); + ret = -EFAULT; + goto end; } +end: + kfree(hbuf->data); + memset(hbuf, 0, sizeof(*hbuf)); + return ret; +} +static int cmd_type_is_tkm(int cmd) +{ + if (cmd >= TKM_CMD_ID_MIN && cmd <= TKM_CMD_ID_MAX) + return 1; return 0; } -/* - * The virtual machine multilevel pointer command buffer handles the - * execution entity, synchronizes the data in the original gpa to the - * newly allocated hva(host virtual address) and updates the mapping - * relationship in the parent memory - */ -static int guest_multiple_level_gpa_replace(struct kvm_vpsp *vpsp, - struct map_tbl *tbl, struct gpa2hva_tbls *g2h) +static int cmd_type_is_allowed(int cmd) +{ + if (cmd >= TKM_PSP_CMDID_OFFSET && cmd <= TKM_CMD_ID_MAX) + return 1; + return 0; +} + +struct psp_cmdresp_vtkm_vm_bind { + struct psp_cmdresp_head head; + uint16_t vid; + uint32_t vm_handle; + uint8_t reserved[46]; +} __packed; + +static int kvm_bind_vtkm(uint32_t vm_handle, uint32_t cmd_id, uint32_t vid, uint32_t *pret) { int ret = 0; - uint32_t sub_block_size; - uint64_t sub_paddr; - void *parent_kva = NULL; - - /* kmalloc memory for child block */ - sub_block_size = max(tbl->size, tbl->align); - tbl->hva = (uint64_t)kzalloc(sub_block_size, GFP_KERNEL); - if (!tbl->hva) + struct psp_cmdresp_vtkm_vm_bind *data; + + data = kzalloc(sizeof(*data), GFP_KERNEL); + if (!data) return -ENOMEM; - /* get child gpa from parent gpa */ - if (unlikely(vpsp->read_guest(vpsp->kvm, tbl->parent_pa + tbl->offset, - &sub_paddr, sizeof(sub_paddr)))) { - pr_err("[%s]: kvm_read_guest for parent gpa failed\n", - __func__); - ret = -EFAULT; - goto e_free; - } + data->head.buf_size = sizeof(*data); + data->head.cmdresp_size = sizeof(*data); + data->head.cmdresp_code = VTKM_VM_BIND; + data->vid = vid; + data->vm_handle = vm_handle; - /* copy child block data from gpa to hva */ - if (unlikely(vpsp->read_guest(vpsp->kvm, sub_paddr, (void *)tbl->hva, - tbl->size))) { - pr_err("[%s]: kvm_read_guest for sub_data failed\n", - __func__); - ret = -EFAULT; - goto e_free; - } + ret = psp_do_cmd(cmd_id, data, pret); + if (ret == -EIO) + ret = 0; - /* get hva from gpa */ - parent_kva = get_hva_from_gpa(g2h, tbl->parent_pa); - if (unlikely(!parent_kva)) { - pr_err("[%s]: get_hva_from_gpa for parent_pa failed\n", - __func__); - ret = -EFAULT; - goto e_free; - } + kfree(data); + return ret; +} - /* replace pa of hva from gpa */ - *(uint64_t *)((uint8_t *)parent_kva + tbl->offset) = __psp_pa(tbl->hva); +static unsigned long vpsp_get_me_mask(void) +{ + unsigned int eax, ebx, ecx, edx; + unsigned long me_mask; + +#define AMD_SME_BIT BIT(0) +#define AMD_SEV_BIT BIT(1) + /* + * Check for the SME/SEV feature: + * CPUID Fn8000_001F[EAX] + * - Bit 0 - Secure Memory Encryption support + * - Bit 1 - Secure Encrypted Virtualization support + * CPUID Fn8000_001F[EBX] + * - Bits 5:0 - Pagetable bit position used to indicate encryption + */ + eax = 0x8000001f; + ecx = 0; + native_cpuid(&eax, &ebx, &ecx, &edx); + /* Check whether SEV or SME is supported */ + if (!(eax & (AMD_SEV_BIT | AMD_SME_BIT))) + return 0; + + me_mask = 1UL << (ebx & 0x3f); + return me_mask; +} - /* fill in gpa and hva to map table for restoring later */ - if (unlikely(gpa2hva_tbl_fill(g2h, (void *)tbl->hva, sub_paddr))) { - pr_err("[%s]: gpa2hva_tbl_fill for sub_addr failed\n", - __func__); - ret = -EFAULT; - goto e_free; +static phys_addr_t gpa_to_hpa(struct kvm_vpsp *vpsp, unsigned long data_gpa) +{ + phys_addr_t hpa = 0; + unsigned long pfn = vpsp->gfn_to_pfn(vpsp->kvm, data_gpa >> PAGE_SHIFT); + unsigned long me_mask = sme_get_me_mask(); + struct page *page; + + if (me_mask == 0 && vpsp->is_csv_guest) + me_mask = vpsp_get_me_mask(); + + if (!is_error_pfn(pfn)) + hpa = ((pfn << PAGE_SHIFT) + offset_in_page(data_gpa)) | me_mask; + else { + pr_err("[%s] pfn: %lx is invalid, gpa %lx", + __func__, pfn, data_gpa); + return 0; } - return ret; + /* + * Using gfn_to_pfn causes the refcount to increment + * atomically by one, which needs to be released. + */ + page = pfn_to_page(pfn); + if (PageCompound(page)) + page = compound_head(page); + + put_page(page); + + pr_debug("gpa %lx, hpa %llx\n", data_gpa, hpa); + return hpa; -e_free: - kfree((const void *)tbl->hva); - return ret; } -/* The virtual machine multi-level pointer command memory handles the - * execution entity, synchronizes the data in the hva(host virtual - * address) back to the memory corresponding to the gpa, and restores - * the mapping relationship in the original parent memory - */ -static int guest_multiple_level_gpa_restore(struct kvm_vpsp *vpsp, - struct map_tbl *tbl, struct gpa2hva_tbls *g2h) +static int check_cmd_forward_op_permission(struct kvm_vpsp *vpsp, struct vpsp_context *vpsp_ctx, + uint64_t data, uint32_t cmd) { - int ret = 0; - gpa_t sub_gpa; - void *parent_hva = NULL; + int ret; + struct vpsp_cmd *vcmd = (struct vpsp_cmd *)&cmd; + struct psp_cmdresp_head psp_head; - /* get gpa from hva */ - sub_gpa = get_gpa_from_hva(g2h, (void *)tbl->hva); - if (unlikely(!sub_gpa)) { - pr_err("[%s]: get_gpa_from_hva for sub_gpa failed\n", - __func__); - ret = -EFAULT; - goto end; + if (!cmd_type_is_allowed(vcmd->cmd_id)) { + pr_err("[%s]: unsupported cmd id %x\n", __func__, vcmd->cmd_id); + return -EINVAL; } - /* copy child block data from hva to gpa */ - if (unlikely(vpsp->write_guest(vpsp->kvm, sub_gpa, (void *)tbl->hva, - tbl->size))) { - pr_err("[%s]: kvm_write_guest for sub_gpa failed\n", - __func__); - ret = -EFAULT; - goto end; - } - - /* get parent hva from parent gpa */ - parent_hva = get_hva_from_gpa(g2h, tbl->parent_pa); - if (unlikely(!parent_hva)) { - pr_err("[%s]: get_hva_from_gpa for parent_pa failed\n", - __func__); - ret = -EFAULT; - goto end; - } + if (vpsp->is_csv_guest) { + /** + * If the gpa address range exists, + * it means there must be a legal vid + */ + if (!vpsp_ctx || !vpsp_ctx->gpa_start || !vpsp_ctx->gpa_end) { + pr_err("[%s]: No set gpa range or vid in csv guest\n", __func__); + return -EPERM; + } - /* restore gpa from pa of hva in parent block */ - *(uint64_t *)((uint8_t *)parent_hva + tbl->offset) = sub_gpa; + ret = check_psp_mem_range(vpsp_ctx, (void *)data, 0); + if (ret) + return -EFAULT; + } else { + if (!vpsp_ctx && cmd_type_is_tkm(vcmd->cmd_id) + && !vpsp_get_default_vid_permission()) { + pr_err("[%s]: not allowed tkm command without vid\n", __func__); + return -EPERM; + } - /* free child block memory */ - clear_hva_in_g2h_tbls(g2h, (void *)tbl->hva); - kfree((const void *)tbl->hva); - tbl->hva = 0; + // the 'data' is gpa address + if (unlikely(vpsp->read_guest(vpsp->kvm, data, &psp_head, + sizeof(struct psp_cmdresp_head)))) + return -EFAULT; -end: - return ret; + ret = check_psp_mem_range(vpsp_ctx, (void *)data, psp_head.buf_size); + if (ret) + return -EFAULT; + } + return 0; } -/* - * The virtual machine multilevel pointer command memory processing - * executes upper-layer abstract interfaces, including replacing and - * restoring two sub-processing functions - */ -static int guest_addr_map_table_op(struct kvm_vpsp *vpsp, struct gpa2hva_tbls *g2h, - struct addr_map_tbls *map_tbls, int op) +static int +check_cmd_copy_forward_op_permission(struct kvm_vpsp *vpsp, + struct vpsp_context *vpsp_ctx, + uint64_t data, uint32_t cmd) { int ret = 0; - int i; - uint64_t *sub_paddr_ptr; - - if (op) { - for (i = map_tbls->tbl_nums - 1; i >= 0; i--) { - /* check if the gpa of root points to itself */ - if (map_tbls->tbl[i].parent_pa == g2h->tbl[0].gpa) { - sub_paddr_ptr = (uint64_t *)((uint8_t *)g2h->tbl[0].hva - + map_tbls->tbl[i].offset); - /* if the child paddr is equal to the parent paddr */ - if ((uint64_t)g2h->tbl[0].hva == map_tbls->tbl[i].hva) { - *sub_paddr_ptr = g2h->tbl[0].gpa; - continue; - } - } - - /* restore new pa of kva with the gpa from guest */ - if (unlikely(guest_multiple_level_gpa_restore(vpsp, - &map_tbls->tbl[i], g2h))) { - pr_err("[%s]: guest_multiple_level_gpa_restore failed\n", - __func__); - ret = -EFAULT; - goto end; - } - } - } else { - for (i = 0; i < map_tbls->tbl_nums; i++) { - /* check if the gpa of root points to itself */ - if (map_tbls->tbl[i].parent_pa == g2h->tbl[0].gpa) { - sub_paddr_ptr = (uint64_t *)((uint8_t *)g2h->tbl[0].hva - + map_tbls->tbl[i].offset); - /* if the child paddr is equal to the parent paddr */ - if (*sub_paddr_ptr == map_tbls->tbl[i].parent_pa) { - *sub_paddr_ptr = __psp_pa(g2h->tbl[0].hva); - map_tbls->tbl[i].hva = (uint64_t)g2h->tbl[0].hva; - continue; - } - } + struct vpsp_cmd *vcmd = (struct vpsp_cmd *)&cmd; - /* check if parent_pa is valid */ - if (unlikely(!get_hva_from_gpa(g2h, map_tbls->tbl[i].parent_pa))) { - pr_err("[%s]: g2h->tbl[%d].parent_pa: 0x%llx is invalid\n", - __func__, i, map_tbls->tbl[i].parent_pa); - ret = -EFAULT; - goto end; - } + if (!cmd_type_is_allowed(vcmd->cmd_id)) { + pr_err("[%s]: unsupported cmd id %x\n", __func__, vcmd->cmd_id); + return -EINVAL; + } - /* replace the gpa from guest with the new pa of kva */ - if (unlikely(guest_multiple_level_gpa_replace(vpsp, - &map_tbls->tbl[i], g2h))) { - pr_err("[%s]: guest_multiple_level_gpa_replace failed\n", - __func__); - ret = -EFAULT; - goto end; - } + if (vpsp->is_csv_guest) { + pr_err("[%s]: unsupported run on csv guest\n", __func__); + ret = -EPERM; + } else { + if (!vpsp_ctx && cmd_type_is_tkm(vcmd->cmd_id) + && !vpsp_get_default_vid_permission()) { + pr_err("[%s]: not allowed tkm command without vid\n", __func__); + ret = -EPERM; } } - -end: return ret; } -static void kvm_pv_psp_mem_free(struct gpa2hva_tbls *g2h, struct addr_map_tbls - *map_tbl, void *data) +static int vpsp_try_bind_vtkm(struct kvm_vpsp *vpsp, struct vpsp_context *vpsp_ctx, + uint32_t cmd, uint32_t *psp_ret) { - int i; + int ret; + struct vpsp_cmd *vcmd = (struct vpsp_cmd *)&cmd; - if (g2h) { - for (i = 0; i < g2h->tbl_nums; i++) { - if (g2h->tbl[i].hva && (g2h->tbl[i].hva != data)) { - kfree(g2h->tbl[i].hva); - g2h->tbl[i].hva = NULL; - } + if (vpsp_ctx && !vpsp_ctx->vm_is_bound && vpsp->is_csv_guest) { + ret = kvm_bind_vtkm(vpsp->vm_handle, vcmd->cmd_id, + vpsp_ctx->vid, psp_ret); + if (ret || *psp_ret) { + pr_err("[%s] kvm bind vtkm failed with ret: %d, pspret: %d\n", + __func__, ret, *psp_ret); + return ret; } - kfree(g2h); + vpsp_ctx->vm_is_bound = 1; } - - kfree(map_tbl); - kfree(data); + return 0; } -/* - * Obtain the VM command and preprocess the pointer mapping table - * information in the command buffer, the processed data will be - * used to interact with the psp device +/** + * @brief Directly convert the gpa address into hpa and forward it to PSP, + * It is another form of kvm_pv_psp_copy_op, mainly used for csv VMs. + * + * @param vpsp points to kvm related data + * @param cmd psp cmd id, bit 31 indicates queue priority + * @param data_gpa guest physical address of input data + * @param psp_ret indicates Asynchronous context information + * + * Since the csv guest memory cannot be read or written directly, + * the shared asynchronous context information is shared through psp_ret and return value. */ -static int kvm_pv_psp_cmd_pre_op(struct kvm_vpsp *vpsp, gpa_t data_gpa, - gpa_t table_gpa, struct vpsp_hbuf_wrapper *hbuf) +int kvm_pv_psp_forward_op(struct kvm_vpsp *vpsp, uint32_t cmd, + gpa_t data_gpa, uint32_t psp_ret) { - int ret = 0; - void *data = NULL; - struct psp_cmdresp_head psp_head; - uint32_t data_size; - struct addr_map_tbls map_head, *map_tbls = NULL; - uint32_t map_tbl_size; - struct gpa2hva_tbls *g2h = NULL; - uint32_t g2h_tbl_size; - - if (unlikely(vpsp->read_guest(vpsp->kvm, data_gpa, &psp_head, - sizeof(struct psp_cmdresp_head)))) - return -EFAULT; + int ret; + uint64_t data_hpa; + uint32_t index = 0, vid = 0; + struct vpsp_ret psp_async = {0}; + struct vpsp_context *vpsp_ctx = NULL; + struct vpsp_cmd *vcmd = (struct vpsp_cmd *)&cmd; + uint8_t prio = CSV_COMMAND_PRIORITY_LOW; + phys_addr_t hpa; - data_size = psp_head.buf_size; - data = kzalloc(data_size, GFP_KERNEL); - if (!data) - return -ENOMEM; + vpsp_get_context(&vpsp_ctx, vpsp->kvm->userspace_pid); - if (unlikely(vpsp->read_guest(vpsp->kvm, data_gpa, data, data_size))) { - ret = -EFAULT; + ret = check_cmd_forward_op_permission(vpsp, vpsp_ctx, data_gpa, cmd); + if (unlikely(ret)) { + pr_err("directly operation not allowed\n"); goto end; } - if (table_gpa) { - /* parse address map table from guest */ - if (unlikely(vpsp->read_guest(vpsp->kvm, table_gpa, &map_head, - sizeof(struct addr_map_tbls)))) { - pr_err("[%s]: kvm_read_guest for map_head failed\n", - __func__); - ret = -EFAULT; - goto end; - } + ret = vpsp_try_bind_vtkm(vpsp, vpsp_ctx, cmd, (uint32_t *)&psp_async); + if (unlikely(ret || *(uint32_t *)&psp_async)) { + pr_err("try to bind vtkm failed (ret %x, psp_async %x)\n", + ret, *(uint32_t *)&psp_async); + goto end; + } - map_tbl_size = sizeof(struct addr_map_tbls) + map_head.tbl_nums - * sizeof(struct map_tbl); - map_tbls = kzalloc(map_tbl_size, GFP_KERNEL); - if (!map_tbls) { - ret = -ENOMEM; - goto end; - } + if (vpsp_ctx) + vid = vpsp_ctx->vid; - if (unlikely(vpsp->read_guest(vpsp->kvm, table_gpa, map_tbls, - map_tbl_size))) { - pr_err("[%s]: kvm_read_guest for map_tbls failed\n", - __func__); - ret = -EFAULT; - goto end; - } + *((uint32_t *)&psp_async) = psp_ret; - /* init for gpa2hva table*/ - g2h_tbl_size = sizeof(struct gpa2hva_tbls) + (map_head.tbl_nums - + 1) * sizeof(struct gpa2hva_t); - g2h = kzalloc(g2h_tbl_size, GFP_KERNEL); - if (!g2h) { - ret = -ENOMEM; - goto end; - } - g2h->max_nums = map_head.tbl_nums + 1; + hpa = gpa_to_hpa(vpsp, data_gpa); + if (unlikely(!hpa)) { + ret = -EFAULT; + goto end; + } - /* fill the root parent address */ - if (gpa2hva_tbl_fill(g2h, data, data_gpa)) { - pr_err("[%s]: gpa2hva_tbl_fill for root data address failed\n", - __func__); - ret = -EFAULT; - goto end; - } + data_hpa = PUT_PSP_VID(hpa, vid); - if (guest_addr_map_table_op(vpsp, g2h, map_tbls, 0)) { - pr_err("[%s]: guest_addr_map_table_op for replacing failed\n", - __func__); - ret = -EFAULT; + switch (psp_async.status) { + case VPSP_INIT: + /* try to send command to the device for execution*/ + ret = vpsp_try_do_cmd(cmd, data_hpa, &psp_async); + if (unlikely(ret)) { + pr_err("[%s]: vpsp_do_cmd failed\n", __func__); goto end; } - } - - hbuf->data = data; - hbuf->data_size = data_size; - hbuf->map_tbls = map_tbls; - hbuf->g2h_tbls = g2h; - -end: - return ret; -} - -/* - * The executed command data is recovered according to the multilevel - * pointer of the mapping table when the command has finished - * interacting with the psp device - */ -static int kvm_pv_psp_cmd_post_op(struct kvm_vpsp *vpsp, gpa_t data_gpa, - struct vpsp_hbuf_wrapper *hbuf) -{ - int ret = 0; + break; - if (hbuf->map_tbls) { - if (guest_addr_map_table_op(vpsp, hbuf->g2h_tbls, - hbuf->map_tbls, 1)) { - pr_err("[%s]: guest_addr_map_table_op for restoring failed\n", - __func__); - ret = -EFAULT; + case VPSP_RUNNING: + prio = vcmd->is_high_rb ? CSV_COMMAND_PRIORITY_HIGH : + CSV_COMMAND_PRIORITY_LOW; + index = psp_async.index; + /* try to get the execution result from ringbuffer*/ + ret = vpsp_try_get_result(prio, index, data_hpa, &psp_async); + if (unlikely(ret)) { + pr_err("[%s]: vpsp_try_get_result failed\n", __func__); goto end; } - } + break; - /* restore cmdresp's buffer from context */ - if (unlikely(vpsp->write_guest(vpsp->kvm, data_gpa, hbuf->data, - hbuf->data_size))) { - pr_err("[%s]: kvm_write_guest for cmdresp data failed\n", - __func__); - ret = -EFAULT; - goto end; + default: + pr_err("[%s]: invalid command status\n", __func__); + break; } end: - /* release memory and clear hbuf */ - kvm_pv_psp_mem_free(hbuf->g2h_tbls, hbuf->map_tbls, hbuf->data); - memset(hbuf, 0, sizeof(*hbuf)); - - return ret; -} - -static int cmd_type_is_tkm(int cmd) -{ - if (cmd >= TKM_CMD_ID_MIN && cmd <= TKM_CMD_ID_MAX) - return 1; - return 0; + /** + * In order to indicate both system errors and PSP errors, + * the psp_async.pret field needs to be reused. + */ + psp_async.format = VPSP_RET_PSP_FORMAT; + if (ret) { + psp_async.format = VPSP_RET_SYS_FORMAT; + if (ret > 0) + ret = -ret; + psp_async.pret = (uint16_t)ret; + } + return *((int *)&psp_async); } +EXPORT_SYMBOL_GPL(kvm_pv_psp_forward_op); -/* - * The primary implementation interface of virtual PSP in kernel mode +/** + * @brief copy data in gpa to host memory and send it to psp for processing. + * + * @param vpsp points to kvm related data + * @param cmd psp cmd id, bit 31 indicates queue priority + * @param data_gpa guest physical address of input data + * @param psp_ret_gpa guest physical address of psp_ret */ -int kvm_pv_psp_op(struct kvm_vpsp *vpsp, int cmd, gpa_t data_gpa, gpa_t psp_ret_gpa, - gpa_t table_gpa) +int kvm_pv_psp_copy_forward_op(struct kvm_vpsp *vpsp, int cmd, gpa_t data_gpa, gpa_t psp_ret_gpa) { int ret = 0; struct vpsp_ret psp_ret = {0}; struct vpsp_hbuf_wrapper hbuf = {0}; struct vpsp_cmd *vcmd = (struct vpsp_cmd *)&cmd; + struct vpsp_context *vpsp_ctx = NULL; + phys_addr_t data_paddr = 0; uint8_t prio = CSV_COMMAND_PRIORITY_LOW; uint32_t index = 0; uint32_t vid = 0; - // only tkm cmd need vid - if (cmd_type_is_tkm(vcmd->cmd_id)) { - // check the permission to use the default vid when no vid is set - ret = vpsp_get_vid(&vid, vpsp->kvm->userspace_pid); - if (ret && !vpsp_get_default_vid_permission()) { - pr_err("[%s]: not allowed tkm command without vid\n", __func__); - return -EFAULT; - } + vpsp_get_context(&vpsp_ctx, vpsp->kvm->userspace_pid); + + ret = check_cmd_copy_forward_op_permission(vpsp, vpsp_ctx, data_gpa, cmd); + if (unlikely(ret)) { + pr_err("copy operation not allowed\n"); + return -EPERM; } + if (vpsp_ctx) + vid = vpsp_ctx->vid; + if (unlikely(vpsp->read_guest(vpsp->kvm, psp_ret_gpa, &psp_ret, sizeof(psp_ret)))) return -EFAULT; switch (psp_ret.status) { case VPSP_INIT: - /* multilevel pointer replace*/ - ret = kvm_pv_psp_cmd_pre_op(vpsp, data_gpa, table_gpa, &hbuf); + /* copy data from guest */ + ret = kvm_pv_psp_cmd_pre_op(vpsp, data_gpa, &hbuf); if (unlikely(ret)) { psp_ret.status = VPSP_FINISH; pr_err("[%s]: kvm_pv_psp_cmd_pre_op failed\n", @@ -560,25 +479,22 @@ int kvm_pv_psp_op(struct kvm_vpsp *vpsp, int cmd, gpa_t data_gpa, gpa_t psp_ret_ goto end; } + data_paddr = PUT_PSP_VID(__psp_pa(hbuf.data), vid); /* try to send command to the device for execution*/ - ret = vpsp_try_do_cmd(vid, cmd, (void *)hbuf.data, - (struct vpsp_ret *)&psp_ret); + ret = vpsp_try_do_cmd(cmd, data_paddr, (struct vpsp_ret *)&psp_ret); if (unlikely(ret)) { - pr_err("[%s]: vpsp_do_cmd failed\n", __func__); + pr_err("[%s]: vpsp_try_do_cmd failed\n", __func__); ret = -EFAULT; goto end; } - switch (psp_ret.status) { - case VPSP_RUNNING: - /* backup host memory message for restoring later*/ + if (psp_ret.status == VPSP_RUNNING) { prio = vcmd->is_high_rb ? CSV_COMMAND_PRIORITY_HIGH : CSV_COMMAND_PRIORITY_LOW; g_hbuf_wrap[prio][psp_ret.index] = hbuf; break; - case VPSP_FINISH: - /* restore multilevel pointer data */ + } else if (psp_ret.status == VPSP_FINISH) { ret = kvm_pv_psp_cmd_post_op(vpsp, data_gpa, &hbuf); if (unlikely(ret)) { pr_err("[%s]: kvm_pv_psp_cmd_post_op failed\n", @@ -586,11 +502,6 @@ int kvm_pv_psp_op(struct kvm_vpsp *vpsp, int cmd, gpa_t data_gpa, gpa_t psp_ret_ ret = -EFAULT; goto end; } - break; - - default: - ret = -EFAULT; - break; } break; @@ -598,35 +509,31 @@ int kvm_pv_psp_op(struct kvm_vpsp *vpsp, int cmd, gpa_t data_gpa, gpa_t psp_ret_ prio = vcmd->is_high_rb ? CSV_COMMAND_PRIORITY_HIGH : CSV_COMMAND_PRIORITY_LOW; index = psp_ret.index; + data_paddr = PUT_PSP_VID(__psp_pa(g_hbuf_wrap[prio][index].data), vid); /* try to get the execution result from ringbuffer*/ - ret = vpsp_try_get_result(vid, prio, index, g_hbuf_wrap[prio][index].data, - (struct vpsp_ret *)&psp_ret); + ret = vpsp_try_get_result(prio, index, data_paddr, + (struct vpsp_ret *)&psp_ret); if (unlikely(ret)) { pr_err("[%s]: vpsp_try_get_result failed\n", __func__); ret = -EFAULT; goto end; } - switch (psp_ret.status) { - case VPSP_RUNNING: - break; - - case VPSP_FINISH: - /* restore multilevel pointer data */ + if (psp_ret.status == VPSP_RUNNING) { + ret = 0; + goto end; + } else if (psp_ret.status == VPSP_FINISH) { + /* copy data to guest */ ret = kvm_pv_psp_cmd_post_op(vpsp, data_gpa, &g_hbuf_wrap[prio][index]); if (unlikely(ret)) { pr_err("[%s]: kvm_pv_psp_cmd_post_op failed\n", __func__); ret = -EFAULT; - goto end; } - break; - - default: - ret = -EFAULT; - break; + goto end; } + ret = -EFAULT; break; default: @@ -638,4 +545,5 @@ int kvm_pv_psp_op(struct kvm_vpsp *vpsp, int cmd, gpa_t data_gpa, gpa_t psp_ret_ /* return psp_ret to guest */ vpsp->write_guest(vpsp->kvm, psp_ret_gpa, &psp_ret, sizeof(psp_ret)); return ret; -} EXPORT_SYMBOL_GPL(kvm_pv_psp_op); +} +EXPORT_SYMBOL_GPL(kvm_pv_psp_copy_forward_op); diff --git a/include/linux/psp-hygon.h b/include/linux/psp-hygon.h index 26b271ea81a5c..1888d9d725925 100644 --- a/include/linux/psp-hygon.h +++ b/include/linux/psp-hygon.h @@ -433,31 +433,54 @@ struct vpsp_cmd { * * @pret: the return code from device * @resv: reserved bits + * @format: indicates that the error is a unix error code(is 0) or a psp error(is 1) * @index: used to distinguish the position of command in the ringbuffer * @status: indicates the current status of the related command */ struct vpsp_ret { u32 pret : 16; - u32 resv : 2; + u32 resv : 1; + u32 format : 1; u32 index : 12; u32 status : 2; }; +#define VPSP_RET_SYS_FORMAT 1 +#define VPSP_RET_PSP_FORMAT 0 struct kvm_vpsp { struct kvm *kvm; int (*write_guest)(struct kvm *kvm, gpa_t gpa, const void *data, unsigned long len); int (*read_guest)(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len); + kvm_pfn_t (*gfn_to_pfn)(struct kvm *kvm, gfn_t gfn); + u32 vm_handle; + u8 is_csv_guest; }; +#define PSP_2MB_MASK (2*1024*1024 - 1) +#define PSP_HUGEPAGE_2MB (2*1024*1024) +#define PSP_HUGEPAGE_NUM_MAX 128 +#define TKM_CMD_ID_MIN 0x120 +#define TKM_CMD_ID_MAX 0x12f +#define TKM_PSP_CMDID TKM_CMD_ID_MIN +#define TKM_PSP_CMDID_OFFSET 0x128 #define PSP_VID_MASK 0xff #define PSP_VID_SHIFT 56 #define PUT_PSP_VID(hpa, vid) ((__u64)(hpa) | ((__u64)(PSP_VID_MASK & vid) << PSP_VID_SHIFT)) #define GET_PSP_VID(hpa) ((__u16)((__u64)(hpa) >> PSP_VID_SHIFT) & PSP_VID_MASK) #define CLEAR_PSP_VID(hpa) ((__u64)(hpa) & ~((__u64)PSP_VID_MASK << PSP_VID_SHIFT)) -#ifdef CONFIG_CRYPTO_DEV_SP_PSP +struct vpsp_context { + u32 vid; + pid_t pid; + u64 gpa_start; + u64 gpa_end; + + // `vm_is_bound` indicates whether the binding operation has been performed + u32 vm_is_bound; + u32 vm_handle; // only for csv +}; -int vpsp_do_cmd(uint32_t vid, int cmd, void *data, int *psp_ret); +#ifdef CONFIG_CRYPTO_DEV_SP_PSP int psp_do_cmd(int cmd, void *data, int *psp_ret); @@ -472,20 +495,20 @@ int csv_check_stat_queue_status(int *psp_ret); */ int csv_issue_ringbuf_cmds_external_user(struct file *filep, int *psp_ret); -int vpsp_try_get_result(uint32_t vid, uint8_t prio, uint32_t index, - void *data, struct vpsp_ret *psp_ret); +int vpsp_try_get_result(uint8_t prio, uint32_t index, + phys_addr_t phy_addr, struct vpsp_ret *psp_ret); -int vpsp_try_do_cmd(uint32_t vid, int cmd, void *data, struct vpsp_ret *psp_ret); +int vpsp_try_do_cmd(int cmd, phys_addr_t phy_addr, struct vpsp_ret *psp_ret); -int vpsp_get_vid(uint32_t *vid, pid_t pid); +int vpsp_get_context(struct vpsp_context **ctx, pid_t pid); int vpsp_get_default_vid_permission(void); -int kvm_pv_psp_op(struct kvm_vpsp *vpsp, int cmd, gpa_t data_gpa, gpa_t psp_ret_gpa, - gpa_t table_gpa); -#else /* !CONFIG_CRYPTO_DEV_SP_PSP */ +int kvm_pv_psp_copy_forward_op(struct kvm_vpsp *vpsp, int cmd, gpa_t data_gpa, gpa_t psp_ret_gpa); -static inline int vpsp_do_cmd(uint32_t vid, int cmd, void *data, int *psp_ret) { return -ENODEV; } +int kvm_pv_psp_forward_op(struct kvm_vpsp *vpsp, uint32_t cmd, + gpa_t data_gpa, uint32_t psp_ret); +#else /* !CONFIG_CRYPTO_DEV_SP_PSP */ static inline int psp_do_cmd(int cmd, void *data, int *psp_ret) { return -ENODEV; } @@ -498,22 +521,31 @@ static inline int csv_issue_ringbuf_cmds_external_user(struct file *filep, int *psp_ret) { return -ENODEV; } static inline int -vpsp_try_get_result(uint32_t vid, uint8_t prio, - uint32_t index, void *data, struct vpsp_ret *psp_ret) { return -ENODEV; } +vpsp_try_get_result(uint8_t prio, + uint32_t index, phys_addr_t phy_addr, struct vpsp_ret *psp_ret) { return -ENODEV; } static inline int vpsp_try_do_cmd(uint32_t vid, int cmd, void *data, struct vpsp_ret *psp_ret) { return -ENODEV; } static inline int -vpsp_get_vid(uint32_t *vid, pid_t pid) { return -ENODEV; } +vpsp_try_do_cmd(int cmd, phys_addr_t phy_addr, + struct vpsp_ret *psp_ret) { return -ENODEV; } + +static inline int +vpsp_get_context(struct vpsp_context **ctx, pid_t pid) { return -ENODEV; } static inline int vpsp_get_default_vid_permission(void) { return -ENODEV; } static inline int -kvm_pv_psp_op(struct kvm_vpsp *vpsp, int cmd, gpa_t data_gpa, - gpa_t psp_ret_gpa, gpa_t table_gpa) { return -ENODEV; } +kvm_pv_psp_copy_forward_op(struct kvm_vpsp *vpsp, int cmd, gpa_t data_gpa, + gpa_t psp_ret_gpa) { return -ENODEV; } + +static inline int +kvm_pv_psp_forward_op(struct kvm_vpsp *vpsp, uint32_t cmd, + gpa_t data_gpa, uint32_t psp_ret) { return -ENODEV; } + #endif /* CONFIG_CRYPTO_DEV_SP_PSP */ typedef int (*p2c_notifier_t)(uint32_t id, uint64_t data); diff --git a/include/uapi/linux/kvm_para.h b/include/uapi/linux/kvm_para.h index 86369b7a57339..944fe133ae3c1 100644 --- a/include/uapi/linux/kvm_para.h +++ b/include/uapi/linux/kvm_para.h @@ -31,7 +31,9 @@ #define KVM_HC_SCHED_YIELD 11 #define KVM_HC_MAP_GPA_RANGE 12 #define KVM_HC_VM_ATTESTATION 100 /* Specific to Hygon CPU */ -#define KVM_HC_PSP_OP 101 /* Specific to Hygon platform */ +#define KVM_HC_PSP_OP_OBSOLETE 101 /* Specific to Hygon platform */ +#define KVM_HC_PSP_COPY_FORWARD_OP 102 /* Specific to Hygon platform */ +#define KVM_HC_PSP_FORWARD_OP 103 /* Specific to Hygon platform */ /* * hypercalls use architecture specific