diff --git a/Documentation/virt/kvm/arm/hypercalls.rst b/Documentation/virt/kvm/arm/hypercalls.rst index 3e23084644ba2..67bd102cf43b1 100644 --- a/Documentation/virt/kvm/arm/hypercalls.rst +++ b/Documentation/virt/kvm/arm/hypercalls.rst @@ -127,6 +127,10 @@ The pseudo-firmware bitmap register are as follows: Bit-1: KVM_REG_ARM_VENDOR_HYP_BIT_PTP: The bit represents the Precision Time Protocol KVM service. + Bit-2: KVM_REG_ARM_VENDOR_HYP_BIT_IPIV: + The bit represents the ARM_SMCCC_VENDOR_PV_SGI_FEATURES and + ARM_SMCCC_VENDOR_PV_SGI_ENABLE function-ids. + Errors: ======= ============================================================= diff --git a/Documentation/virt/kvm/arm/pvsgi.rst b/Documentation/virt/kvm/arm/pvsgi.rst new file mode 100644 index 0000000000000..5f12a3aaccd2d --- /dev/null +++ b/Documentation/virt/kvm/arm/pvsgi.rst @@ -0,0 +1,33 @@ +.. SPDX-License-Identifier: GPL-2.0 + +Paravirtualized SGI support for HiSilicon +========================================== + +KVM/arm64 provides some hypervisor service calls to support a paravirtualized +SGI(software generated interrupt) in HiSilicon Hip12 SoC. + +Some SMCCC compatible hypercalls are defined: + +* PV_SGI_FEATURES: 0xC6000090 +* PV_SGI_ENABLE: 0xC6000091 + +The existence of the PV_SGI hypercall should be probed using the SMCCC 1.1 +ARCH_FEATURES mechanism before calling it. + +PV_SGI_FEATURES + + ============= ======== ========== + Function ID: (uint32) 0xC6000090 + PV_call_id: (uint32) The function to query for support. + Currently only PV_SGI_ENABLE is supported. + Return value: (int64) NOT_SUPPORTED (-1) or SUCCESS (0) if the relevant + PV-sgi feature is supported by the hypervisor. + ============= ======== ========== + +PV_SGI_ENABLE + + ============= ======== ========== + Function ID: (uint32) 0xC6000091 + Return value: (int64) NOT_SUPPORTED (-1) or SUCCESS (0) if this feature + has been enabled. + ============= ======== ========== diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 3829167e97fc5..a8316cb666c36 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -2256,6 +2256,18 @@ config ARM64_HDBSS endmenu # "ARMv9.5 architectural features" +config ARM64_HISI_IPIV + bool "Enable support for IPIV" + default y + depends on ACPI + depends on ARM64 + help + IPIV optimizes vSGI on the basis of GICv4.1. The vCPU on the sending + side of vSGI needs to trap to Hypervisor. IPIv sends vSGI without + traping, improving performance. + + The feature will only be enabled if CPU in the system and Guest OS + support this feature. If unsure, say Y. config ARM64_SVE bool "ARM Scalable Vector Extension support" diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index df38eb2fc135e..c74fa800d1797 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -1232,6 +1232,5 @@ bool kvm_arm_vcpu_stopped(struct kvm_vcpu *vcpu); extern bool force_wfi_trap; extern bool kvm_ncsnp_support; extern bool kvm_dvmbm_support; -extern bool kvm_ipiv_support; #endif /* __ARM64_KVM_HOST_H__ */ diff --git a/arch/arm64/include/uapi/asm/kvm.h b/arch/arm64/include/uapi/asm/kvm.h index f7ddd73a8c0fa..bcdf9d6dc37b0 100644 --- a/arch/arm64/include/uapi/asm/kvm.h +++ b/arch/arm64/include/uapi/asm/kvm.h @@ -376,6 +376,11 @@ enum { enum { KVM_REG_ARM_VENDOR_HYP_BIT_FUNC_FEAT = 0, KVM_REG_ARM_VENDOR_HYP_BIT_PTP = 1, + /* + * If the mainline conflicts, do not change the + * current sequence, add in sequence. + */ + KVM_REG_ARM_VENDOR_HYP_BIT_IPIV = 2, #ifdef __KERNEL__ KVM_REG_ARM_VENDOR_HYP_BMAP_BIT_COUNT, #endif diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index 3208a0bc6eeab..1b5389004fd63 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -64,9 +64,6 @@ bool kvm_ncsnp_support; /* Capability of DVMBM */ bool kvm_dvmbm_support; -/* Capability of IPIV */ -bool kvm_ipiv_support; - static DEFINE_PER_CPU(unsigned char, kvm_hyp_initialized); DEFINE_STATIC_KEY_FALSE(userspace_irqchip_in_use); @@ -195,6 +192,14 @@ static int kvm_cap_arm_enable_hdbss(struct kvm *kvm, } #endif +#ifdef CONFIG_ARM64_HISI_IPIV +static int kvm_hisi_ipiv_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap) +{ + kvm->arch.vgic.its_vm.enable_ipiv_from_vmm = true; + return 0; +} +#endif + int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap) { @@ -246,6 +251,11 @@ int kvm_vm_ioctl_enable_cap(struct kvm *kvm, case KVM_CAP_ARM_HW_DIRTY_STATE_TRACK: r = kvm_cap_arm_enable_hdbss(kvm, cap); break; +#endif +#ifdef CONFIG_ARM64_HISI_IPIV + case KVM_CAP_ARM_HISI_IPIV: + r = kvm_hisi_ipiv_enable_cap(kvm, cap); + break; #endif default: r = -EINVAL; @@ -352,7 +362,9 @@ void kvm_arch_destroy_vm(struct kvm *kvm) kvm_arm_teardown_hypercalls(kvm); } +#ifdef CONFIG_ARM64_HISI_IPIV extern struct static_key_false ipiv_enable; +#endif int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) { @@ -469,12 +481,14 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) r = system_supports_hdbss(); break; #endif - case KVM_CAP_ARM_IPIV_MODE: +#ifdef CONFIG_ARM64_HISI_IPIV + case KVM_CAP_ARM_HISI_IPIV: if (static_branch_unlikely(&ipiv_enable)) r = 1; else r = 0; break; +#endif default: r = 0; } @@ -1405,6 +1419,16 @@ static int kvm_vcpu_init_check_features(struct kvm_vcpu *vcpu, if (test_bit(KVM_ARM_VCPU_HAS_EL2, &features)) return -EINVAL; +#ifdef CONFIG_ARM64_HISI_IPIV + if (static_branch_unlikely(&ipiv_enable) && + vcpu->kvm->arch.vgic.its_vm.enable_ipiv_from_vmm && + vcpu->vcpu_id != vcpu->vcpu_idx) { + kvm_err("IPIV ERROR: vcpu_id %d != vcpu_idx %d\n", + vcpu->vcpu_id, vcpu->vcpu_idx); + return -EINVAL; + } +#endif + return 0; } @@ -2659,13 +2683,8 @@ static __init int kvm_arm_init(void) probe_hisi_cpu_type(); kvm_ncsnp_support = hisi_ncsnp_supported(); kvm_dvmbm_support = hisi_dvmbm_supported(); - kvm_ipiv_support = hisi_ipiv_supported(); kvm_info("KVM ncsnp %s\n", kvm_ncsnp_support ? "enabled" : "disabled"); kvm_info("KVM dvmbm %s\n", kvm_dvmbm_support ? "enabled" : "disabled"); - kvm_info("KVM ipiv %s\n", kvm_ipiv_support ? "enabled" : "disabled"); - - if (kvm_ipiv_support) - ipiv_gicd_init(); if (kvm_dvmbm_support) kvm_get_pg_cfg(); diff --git a/arch/arm64/kvm/hisilicon/hisi_virt.c b/arch/arm64/kvm/hisilicon/hisi_virt.c index 3c06be7cd8a5f..d108b0ba31469 100644 --- a/arch/arm64/kvm/hisilicon/hisi_virt.c +++ b/arch/arm64/kvm/hisilicon/hisi_virt.c @@ -12,7 +12,10 @@ static enum hisi_cpu_type cpu_type = UNKNOWN_HI_TYPE; static bool dvmbm_enabled; + +#ifdef CONFIG_ARM64_HISI_IPIV static bool ipiv_enabled; +#endif static const char * const hisi_cpu_type_str[] = { "Hisi1612", @@ -158,6 +161,7 @@ static void hardware_disable_dvmbm(void *data) write_sysreg_s(val, SYS_LSUDVM_CTRL_EL2); } +#ifdef CONFIG_ARM64_HISI_IPIV static int __init early_ipiv_enable(char *buf) { return strtobool(buf, &ipiv_enabled); @@ -175,12 +179,17 @@ bool hisi_ipiv_supported(void) return false; } + if (!gic_get_ipiv_status()) { + kvm_info("Hisi ipiv is disabled by BIOS\n"); + return false; + } + /* User provided kernel command-line parameter */ if (!ipiv_enabled || !is_kernel_in_hyp_mode()) return false; /* Enable IPIV feature if necessary */ - if (!is_gicv4p1()) { + if (!kvm_vgic_global_state.has_gicv4_1) { kvm_info("Hisi ipiv needs to enable GICv4p1!\n"); return false; } @@ -189,11 +198,36 @@ bool hisi_ipiv_supported(void) return true; } +extern struct static_key_false ipiv_enable; + +bool hisi_ipiv_supported_per_vm(struct kvm_vcpu *vcpu) +{ + /* IPIV is supported by the hardware */ + if (!static_branch_unlikely(&ipiv_enable)) + return false; + + /* vSGI passthrough is configured */ + if (!vcpu->kvm->arch.vgic.nassgireq) + return false; + + /* IPIV is enabled by the user */ + if (!vcpu->kvm->arch.vgic.its_vm.enable_ipiv_from_vmm) + return false; + + return true; +} + +void hisi_ipiv_enable_per_vm(struct kvm_vcpu *vcpu) +{ + /* Enable IPIV feature */ + vcpu->kvm->arch.vgic.its_vm.enable_ipiv_from_guest = true; +} + void ipiv_gicd_init(void) { gic_dist_enable_ipiv(); } - +#endif /* CONFIG_ARM64_HISI_IPIV */ bool hisi_dvmbm_supported(void) { diff --git a/arch/arm64/kvm/hisilicon/hisi_virt.h b/arch/arm64/kvm/hisilicon/hisi_virt.h index 41906640c8a25..2609e360a2542 100644 --- a/arch/arm64/kvm/hisilicon/hisi_virt.h +++ b/arch/arm64/kvm/hisilicon/hisi_virt.h @@ -19,7 +19,9 @@ enum hisi_cpu_type { }; /* HIP12 */ +#ifdef CONFIG_ARM64_HISI_IPIV #define AIDR_EL1_IPIV_MASK GENMASK_ULL(17, 16) +#endif /* HIP10 */ #define AIDR_EL1_DVMBM_MASK GENMASK_ULL(13, 12) #define SYS_LSUDVM_CTRL_EL2 sys_reg(3, 4, 15, 7, 4) @@ -77,9 +79,13 @@ enum hisi_cpu_type { void probe_hisi_cpu_type(void); bool hisi_ncsnp_supported(void); bool hisi_dvmbm_supported(void); +#ifdef CONFIG_ARM64_HISI_IPIV bool hisi_ipiv_supported(void); -void kvm_get_pg_cfg(void); +bool hisi_ipiv_supported_per_vm(struct kvm_vcpu *vcpu); +void hisi_ipiv_enable_per_vm(struct kvm_vcpu *vcpu); void ipiv_gicd_init(void); +#endif /* CONFIG_ARM64_HISI_IPIV */ +void kvm_get_pg_cfg(void); int kvm_sched_affinity_vcpu_init(struct kvm_vcpu *vcpu); void kvm_sched_affinity_vcpu_destroy(struct kvm_vcpu *vcpu); @@ -100,17 +106,26 @@ static inline bool hisi_dvmbm_supported(void) } static inline void kvm_get_pg_cfg(void) {} +#ifdef CONFIG_ARM64_HISI_IPIV static inline bool hisi_ipiv_supported(void) { return false; } +#endif /* CONFIG_ARM64_HISI_IPIV */ static inline int kvm_sched_affinity_vcpu_init(struct kvm_vcpu *vcpu) { return 0; } static inline void kvm_sched_affinity_vcpu_destroy(struct kvm_vcpu *vcpu) {} +#ifdef CONFIG_ARM64_HISI_IPIV +static bool hisi_ipiv_supported_per_vm(struct kvm_vcpu *vcpu) +{ + return false; +} +static void hisi_ipiv_enable_per_vm(struct kvm_vcpu *vcpu) {} static inline void ipiv_gicd_init(void) {} +#endif /* CONFIG_ARM64_HISI_IPIV */ static inline int kvm_sched_affinity_vm_init(struct kvm *kvm) { return 0; @@ -121,6 +136,8 @@ static inline void kvm_tlbi_dvmbm_vcpu_put(struct kvm_vcpu *vcpu) {} static inline void kvm_hisi_reload_lsudvmbm(struct kvm *kvm) {} #endif /* CONFIG_KVM_HISI_VIRT */ +#ifdef CONFIG_ARM64_HISI_IPIV extern bool gic_dist_enable_ipiv(void); -extern bool is_gicv4p1(void); +extern bool gic_get_ipiv_status(void); +#endif /* CONFIG_ARM64_HISI_IPIV */ #endif /* __HISI_VIRT_H__ */ diff --git a/arch/arm64/kvm/hypercalls.c b/arch/arm64/kvm/hypercalls.c index 7fb4df0456dea..e63654972df4a 100644 --- a/arch/arm64/kvm/hypercalls.c +++ b/arch/arm64/kvm/hypercalls.c @@ -9,6 +9,10 @@ #include #include +#ifdef CONFIG_ARM64_HISI_IPIV +#include "hisilicon/hisi_virt.h" +#endif + #define KVM_ARM_SMCCC_STD_FEATURES \ GENMASK(KVM_REG_ARM_STD_BMAP_BIT_COUNT - 1, 0) #define KVM_ARM_SMCCC_STD_HYP_FEATURES \ @@ -116,6 +120,12 @@ static bool kvm_smccc_test_fw_bmap(struct kvm_vcpu *vcpu, u32 func_id) case ARM_SMCCC_VENDOR_HYP_KVM_PTP_FUNC_ID: return test_bit(KVM_REG_ARM_VENDOR_HYP_BIT_PTP, &smccc_feat->vendor_hyp_bmap); +#ifdef CONFIG_ARM64_HISI_IPIV + case ARM_SMCCC_VENDOR_PV_SGI_FEATURES: + case ARM_SMCCC_VENDOR_PV_SGI_ENABLE: + return test_bit(KVM_REG_ARM_VENDOR_HYP_BIT_IPIV, + &smccc_feat->vendor_hyp_bmap); +#endif default: return false; } @@ -342,6 +352,22 @@ int kvm_smccc_call_handler(struct kvm_vcpu *vcpu) if (gpa != INVALID_GPA) val[0] = gpa; break; +#ifdef CONFIG_ARM64_HISI_IPIV + case ARM_SMCCC_VENDOR_PV_SGI_FEATURES: + if (hisi_ipiv_supported_per_vm(vcpu)) + val[0] = SMCCC_RET_SUCCESS; + else + val[0] = SMCCC_RET_NOT_SUPPORTED; + break; + case ARM_SMCCC_VENDOR_PV_SGI_ENABLE: + if (hisi_ipiv_supported_per_vm(vcpu)) { + hisi_ipiv_enable_per_vm(vcpu); + val[0] = SMCCC_RET_SUCCESS; + } else { + val[0] = SMCCC_RET_NOT_SUPPORTED; + } + break; +#endif case ARM_SMCCC_VENDOR_HYP_CALL_UID_FUNC_ID: val[0] = ARM_SMCCC_VENDOR_HYP_UID_KVM_REG_0; val[1] = ARM_SMCCC_VENDOR_HYP_UID_KVM_REG_1; diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index 04db3508890b5..2f3f814558e2b 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -694,8 +694,6 @@ static u64 reset_actlr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) return actlr; } -extern struct static_key_false ipiv_enable; - static u64 reset_mpidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) { u64 mpidr; @@ -711,14 +709,6 @@ static u64 reset_mpidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) mpidr |= ((vcpu->vcpu_id >> 4) & 0xff) << MPIDR_LEVEL_SHIFT(1); mpidr |= ((vcpu->vcpu_id >> 12) & 0xff) << MPIDR_LEVEL_SHIFT(2); - if (static_branch_unlikely(&ipiv_enable)) { - /* - * To avoid sending multi-SGIs in guest OS, make aff1/aff2 unique - */ - mpidr = (vcpu->vcpu_id & 0x0f) << MPIDR_LEVEL_SHIFT(1); - mpidr |= ((vcpu->vcpu_id >> 4) & 0xff) << MPIDR_LEVEL_SHIFT(2); - } - mpidr |= (1ULL << 31); vcpu_write_sys_reg(vcpu, mpidr, MPIDR_EL1); @@ -1562,6 +1552,25 @@ static u64 read_sanitised_id_dfr0_el1(struct kvm_vcpu *vcpu, return val; } +#ifdef CONFIG_ARM64_HISI_IPIV +extern struct static_key_false ipiv_enable; +static int set_mpidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, + u64 val) +{ + if (static_branch_unlikely(&ipiv_enable) && + vcpu->kvm->arch.vgic.its_vm.enable_ipiv_from_vmm) { + if (val != __vcpu_sys_reg(vcpu, rd->reg)) { + kvm_err("IPIV ERROR: MPIDR changed\n"); + return -EINVAL; + } + } + + __vcpu_sys_reg(vcpu, rd->reg) = val; + + return 0; +} +#endif + static int set_id_dfr0_el1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, u64 val) @@ -2068,7 +2077,12 @@ static const struct sys_reg_desc sys_reg_descs[] = { { SYS_DESC(SYS_DBGVCR32_EL2), NULL, reset_val, DBGVCR32_EL2, 0 }, +#ifdef CONFIG_ARM64_HISI_IPIV + { SYS_DESC(SYS_MPIDR_EL1), + .reset = reset_mpidr, .reg = MPIDR_EL1, .set_user = set_mpidr}, +#else { SYS_DESC(SYS_MPIDR_EL1), NULL, reset_mpidr, MPIDR_EL1 }, +#endif /* * ID regs: all ID_SANITISED() entries here must have corresponding diff --git a/arch/arm64/kvm/vgic/vgic-init.c b/arch/arm64/kvm/vgic/vgic-init.c index b8e6316ba737d..63c7829c8338c 100644 --- a/arch/arm64/kvm/vgic/vgic-init.c +++ b/arch/arm64/kvm/vgic/vgic-init.c @@ -13,6 +13,11 @@ #include #include "vgic.h" +#ifdef CONFIG_ARM64_HISI_IPIV +#include +#include "hisilicon/hisi_virt.h" +#endif + /* * Initialization rules: there are multiple stages to the vgic * initialization, both for the distributor and the CPU interfaces. The basic @@ -520,7 +525,7 @@ int kvm_vgic_map_resources(struct kvm *kvm) return ret; } -#ifdef CONFIG_ACPI +#ifdef CONFIG_ARM64_HISI_IPIV extern struct static_key_false ipiv_enable; static int ipiv_irq; #endif @@ -530,7 +535,7 @@ static int ipiv_irq; void kvm_vgic_cpu_up(void) { enable_percpu_irq(kvm_vgic_global_state.maint_irq, 0); -#ifdef CONFIG_ACPI +#ifdef CONFIG_ARM64_HISI_IPIV if (static_branch_unlikely(&ipiv_enable)) enable_percpu_irq(ipiv_irq, 0); #endif @@ -540,16 +545,34 @@ void kvm_vgic_cpu_up(void) void kvm_vgic_cpu_down(void) { disable_percpu_irq(kvm_vgic_global_state.maint_irq); -#ifdef CONFIG_ACPI +#ifdef CONFIG_ARM64_HISI_IPIV if (static_branch_unlikely(&ipiv_enable)) disable_percpu_irq(ipiv_irq); #endif } -#ifdef CONFIG_ACPI +#ifdef CONFIG_ARM64_HISI_IPIV +extern void __iomem *gic_data_rdist_get_vlpi_base(void); static irqreturn_t vgic_ipiv_irq_handler(int irq, void *data) { - kvm_info("IPIV irq handler!\n"); + void __iomem *vlpi_base = gic_data_rdist_get_vlpi_base(); + u32 gicr_ipiv_st; + bool broadcast_err, grpbrd_err, vcpuidx_err; + + gicr_ipiv_st = readl_relaxed(vlpi_base + GICR_IPIV_ST); + + broadcast_err = !!(gicr_ipiv_st & GICR_IPIV_ST_IRM_ERR); + if (broadcast_err) + kvm_err("IPIV error: IRM=1 Guest broadcast error\n"); + + grpbrd_err = !!(gicr_ipiv_st & GICR_IPIV_ST_BRPBRD_ERR); + if (grpbrd_err) + kvm_err("IPIV error: Guest group broadcast error\n"); + + vcpuidx_err = !!(gicr_ipiv_st & GICR_IPIV_ST_VCPUIDX_ERR); + if (vcpuidx_err) + kvm_err("IPIV error: The VCPU index is out of range\n"); + return IRQ_HANDLED; } #endif @@ -662,7 +685,14 @@ int kvm_vgic_hyp_init(void) kvm_info("vgic interrupt IRQ%d\n", kvm_vgic_global_state.maint_irq); -#ifdef CONFIG_ACPI +#ifdef CONFIG_ARM64_HISI_IPIV + if (hisi_ipiv_supported()) { + ipiv_gicd_init(); + kvm_info("KVM ipiv enabled\n"); + } else { + kvm_info("KVM ipiv disabled\n"); + } + if (static_branch_unlikely(&ipiv_enable)) { ipiv_irq = acpi_register_gsi(NULL, 18, ACPI_EDGE_SENSITIVE, ACPI_ACTIVE_HIGH); diff --git a/arch/arm64/kvm/vgic/vgic-its.c b/arch/arm64/kvm/vgic/vgic-its.c index 4f9084ba7949c..1c1abc412b10d 100644 --- a/arch/arm64/kvm/vgic/vgic-its.c +++ b/arch/arm64/kvm/vgic/vgic-its.c @@ -498,6 +498,14 @@ static unsigned long vgic_mmio_read_its_typer(struct kvm *kvm, return extract_bytes(reg, addr & 7, len); } +#ifdef CONFIG_ARM64_HISI_IPIV +/* + * Use bit7 not used by GITS_IIDR to indicate whether IPIV is + * enabled for guest OS. + */ +#define HISI_GUEST_ENABLE_IPIV_SHIFT 7 +#endif + static unsigned long vgic_mmio_read_its_iidr(struct kvm *kvm, struct vgic_its *its, gpa_t addr, unsigned int len) @@ -518,6 +526,12 @@ static int vgic_mmio_uaccess_write_its_iidr(struct kvm *kvm, if (rev >= NR_ITS_ABIS) return -EINVAL; + +#ifdef CONFIG_ARM64_HISI_IPIV + if (val & (1UL << HISI_GUEST_ENABLE_IPIV_SHIFT)) + kvm->arch.vgic.its_vm.enable_ipiv_from_guest = true; +#endif + return vgic_its_set_abi(its, rev); } @@ -2103,6 +2117,11 @@ static int vgic_its_attr_regs_access(struct kvm_device *dev, region->its_write(dev->kvm, its, addr, len, *reg); } else { *reg = region->its_read(dev->kvm, its, addr, len); +#ifdef CONFIG_ARM64_HISI_IPIV + if (dev->kvm->arch.vgic.its_vm.enable_ipiv_from_guest && + offset == GITS_IIDR) + *reg |= 1UL << HISI_GUEST_ENABLE_IPIV_SHIFT; +#endif } out: mutex_unlock(&dev->kvm->arch.config_lock); @@ -2739,6 +2758,15 @@ static void vgic_its_reset(struct kvm *kvm, struct vgic_its *its) its->enabled = 0; vgic_its_free_device_list(kvm, its); vgic_its_free_collection_list(kvm, its); + +#ifdef CONFIG_ARM64_HISI_IPIV + /* + * For the para-virtualization feature IPIV, ensure that + * the flag of the guest OS is reset when the guest OS is + * reset. + */ + kvm->arch.vgic.its_vm.enable_ipiv_from_guest = false; +#endif } static int vgic_its_has_attr(struct kvm_device *dev, diff --git a/arch/arm64/kvm/vgic/vgic-mmio-v3.c b/arch/arm64/kvm/vgic/vgic-mmio-v3.c index c82caadc4edb5..7cba6653bfdcb 100644 --- a/arch/arm64/kvm/vgic/vgic-mmio-v3.c +++ b/arch/arm64/kvm/vgic/vgic-mmio-v3.c @@ -132,7 +132,6 @@ static void vgic_mmio_write_v3_misc(struct kvm_vcpu *vcpu, /* Switching HW SGIs? */ dist->nassgireq = val & GICD_CTLR_nASSGIreq; - dist->its_vm.nassgireq = dist->nassgireq; if (is_hwsgi != dist->nassgireq) vgic_v4_configure_vsgis(vcpu->kvm); diff --git a/config.aarch64 b/config.aarch64 index 6ad0c08367f29..e69af9a32ec15 100644 --- a/config.aarch64 +++ b/config.aarch64 @@ -525,6 +525,7 @@ CONFIG_ARM64_E0PD=y CONFIG_ARM64_EPAN=y # end of ARMv8.7 architectural features +CONFIG_ARM64_HISI_IPIV=y CONFIG_ARM64_SVE=y CONFIG_ARM64_SME=y CONFIG_ARM64_PSEUDO_NMI=y diff --git a/config.aarch64-64k b/config.aarch64-64k index 87d76e3aca7d7..5549099a20b9f 100644 --- a/config.aarch64-64k +++ b/config.aarch64-64k @@ -527,6 +527,7 @@ CONFIG_ARM64_E0PD=y CONFIG_ARM64_EPAN=y # end of ARMv8.7 architectural features +CONFIG_ARM64_HISI_IPIV=y CONFIG_ARM64_SVE=y CONFIG_ARM64_SME=y CONFIG_ARM64_PSEUDO_NMI=y diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c index 6aab57a9979cc..38e3602a12a9d 100644 --- a/drivers/irqchip/irq-gic-v3-its.c +++ b/drivers/irqchip/irq-gic-v3-its.c @@ -380,6 +380,14 @@ static int alloc_devid_from_rsv_pools(struct rsv_devid_pool **devid_pool, #define gic_data_rdist_rd_base() (gic_data_rdist()->rd_base) #define gic_data_rdist_vlpi_base() (gic_data_rdist_rd_base() + SZ_128K) +#ifdef CONFIG_ARM64_HISI_IPIV +void __iomem *gic_data_rdist_get_vlpi_base(void) +{ + return gic_data_rdist_vlpi_base(); +} +EXPORT_SYMBOL(gic_data_rdist_get_vlpi_base); +#endif + #ifdef CONFIG_VIRT_PLAT_DEV /* * Currently we only build *one* devid pool. @@ -4385,6 +4393,7 @@ static void its_vpe_4_1_unmask_irq(struct irq_data *d) its_vpe_4_1_send_inv(d); } +#ifdef CONFIG_ARM64_HISI_IPIV /* IPIV private register */ #define CPU_SYS_TRAP_EL2 sys_reg(3, 4, 15, 7, 2) #define CPU_SYS_TRAP_EL2_IPIV_ENABLE_SHIFT 0 @@ -4397,40 +4406,38 @@ static void its_vpe_4_1_unmask_irq(struct irq_data *d) */ static void ipiv_disable_vsgi_trap(void) { -#ifdef CONFIG_ARM64 u64 val; /* disable guest access ICC_SGI1R_EL1 trap, enable ipiv */ val = read_sysreg_s(CPU_SYS_TRAP_EL2); val |= CPU_SYS_TRAP_EL2_IPIV_ENABLE; write_sysreg_s(val, CPU_SYS_TRAP_EL2); -#endif } static void ipiv_enable_vsgi_trap(void) { -#ifdef CONFIG_ARM64 u64 val; /* enable guest access ICC_SGI1R_EL1 trap, disable ipiv */ val = read_sysreg_s(CPU_SYS_TRAP_EL2); val &= ~CPU_SYS_TRAP_EL2_IPIV_ENABLE; write_sysreg_s(val, CPU_SYS_TRAP_EL2); -#endif } +#endif /* CONFIG_ARM64_HISI_IPIV */ static void its_vpe_4_1_schedule(struct its_vpe *vpe, struct its_cmd_info *info) { void __iomem *vlpi_base = gic_data_rdist_vlpi_base(); + u64 val = 0; + +#ifdef CONFIG_ARM64_HISI_IPIV struct its_vm *vm = vpe->its_vm; unsigned long vpeid_page_addr; u64 ipiv_val = 0; - u64 val = 0; u32 nr_vpes; - if (static_branch_unlikely(&ipiv_enable) && - vm->nassgireq) { + if (vm->enable_ipiv_from_guest) { /* wait gicr_ipiv_busy */ WARN_ON_ONCE(readl_relaxed_poll_timeout_atomic(vlpi_base + GICR_IPIV_ST, ipiv_val, !(ipiv_val & GICR_IPIV_ST_IPIV_BUSY), 1, 500)); @@ -4448,6 +4455,7 @@ static void its_vpe_4_1_schedule(struct its_vpe *vpe, ipiv_disable_vsgi_trap(); } +#endif /* CONFIG_ARM64_HISI_IPIV */ /* Schedule the VPE */ val |= GICR_VPENDBASER_Valid; @@ -4462,9 +4470,12 @@ static void its_vpe_4_1_deschedule(struct its_vpe *vpe, struct its_cmd_info *info) { void __iomem *vlpi_base = gic_data_rdist_vlpi_base(); - struct its_vm *vm = vpe->its_vm; u64 val; +#ifdef CONFIG_ARM64_HISI_IPIV + struct its_vm *vm = vpe->its_vm; +#endif + if (info->req_db) { unsigned long flags; @@ -4495,8 +4506,8 @@ static void its_vpe_4_1_deschedule(struct its_vpe *vpe, vpe->pending_last = true; } - if (static_branch_unlikely(&ipiv_enable) && - vm->nassgireq) { +#ifdef CONFIG_ARM64_HISI_IPIV + if (vm->enable_ipiv_from_guest) { /* wait gicr_ipiv_busy */ WARN_ON_ONCE(readl_relaxed_poll_timeout_atomic(vlpi_base + GICR_IPIV_ST, val, !(val & GICR_IPIV_ST_IPIV_BUSY), 1, 500)); @@ -4505,6 +4516,7 @@ static void its_vpe_4_1_deschedule(struct its_vpe *vpe, ipiv_enable_vsgi_trap(); } +#endif } static void its_vpe_4_1_invall(struct its_vpe *vpe) @@ -4897,10 +4909,12 @@ static void its_vpe_irq_domain_free(struct irq_domain *domain, if (bitmap_empty(vm->db_bitmap, vm->nr_db_lpis)) { its_lpi_free(vm->db_bitmap, vm->db_lpi_base, vm->nr_db_lpis); its_free_prop_table(vm->vprop_page); - if (static_branch_unlikely(&ipiv_enable)) { +#ifdef CONFIG_ARM64_HISI_IPIV + if (vm->enable_ipiv_from_vmm) { free_pages((unsigned long)page_address(vm->vpeid_page), get_order(nr_irqs * 2)); } +#endif } } @@ -4910,10 +4924,14 @@ static int its_vpe_irq_domain_alloc(struct irq_domain *domain, unsigned int virq struct irq_chip *irqchip = &its_vpe_irq_chip; struct its_vm *vm = args; unsigned long *bitmap; - struct page *vprop_page, *vpeid_page; + struct page *vprop_page; int base, nr_ids, i, err = 0; + +#ifdef CONFIG_ARM64_HISI_IPIV + struct page *vpeid_page; void *vpeid_table_va; u16 *vpeid_entry; +#endif bitmap = its_lpi_alloc(roundup_pow_of_two(nr_irqs), &base, &nr_ids); if (!bitmap) @@ -4938,7 +4956,8 @@ static int its_vpe_irq_domain_alloc(struct irq_domain *domain, unsigned int virq if (gic_rdists->has_rvpeid) { irqchip = &its_vpe_4_1_irq_chip; - if (static_branch_unlikely(&ipiv_enable)) { +#ifdef CONFIG_ARM64_HISI_IPIV + if (vm->enable_ipiv_from_vmm) { /* * The vpeid's size is 2 bytes, so we need to allocate 2 * * (num of vcpus). nr_irqs is equal to the number of vCPUs. @@ -4952,6 +4971,7 @@ static int its_vpe_irq_domain_alloc(struct irq_domain *domain, unsigned int virq vm->vpeid_page = vpeid_page; vpeid_table_va = page_address(vpeid_page); } +#endif } for (i = 0; i < nr_irqs; i++) { @@ -4959,10 +4979,12 @@ static int its_vpe_irq_domain_alloc(struct irq_domain *domain, unsigned int virq err = its_vpe_init(vm->vpes[i]); if (err) break; - if (static_branch_unlikely(&ipiv_enable)) { +#ifdef CONFIG_ARM64_HISI_IPIV + if (vm->enable_ipiv_from_vmm) { vpeid_entry = (u16 *)vpeid_table_va + i; *vpeid_entry = vm->vpes[i]->vpe_id; } +#endif err = its_irq_gic_domain_alloc(domain, virq + i, vm->vpes[i]->vpe_db_lpi); if (err) diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c index a3db9e541579c..bfcc0f460b1ac 100644 --- a/drivers/irqchip/irq-gic-v3.c +++ b/drivers/irqchip/irq-gic-v3.c @@ -105,9 +105,15 @@ static DEFINE_STATIC_KEY_FALSE(supports_pseudo_nmis); DEFINE_STATIC_KEY_FALSE(gic_nonsecure_priorities); EXPORT_SYMBOL(gic_nonsecure_priorities); +#ifdef CONFIG_ARM64_HISI_IPIV +/* indicate if host supports IPIv */ DEFINE_STATIC_KEY_FALSE(ipiv_enable); EXPORT_SYMBOL(ipiv_enable); +/* indicate if guest is using IPIv */ +static bool hisi_pv_sgi_enabled; +#endif + /* * When the Non-secure world has access to group 0 interrupts (as a * consequence of SCR_EL3.FIQ == 0), reading the ICC_RPR_EL1 register will @@ -1364,27 +1370,15 @@ static int gic_dist_supports_lpis(void) !gicv3_nolpi); } -bool is_gicv4p1(void) -{ - if (!gic_data.rdists.has_rvpeid) - return false; - - return true; -} -EXPORT_SYMBOL(is_gicv4p1); - +#ifdef CONFIG_ARM64_HISI_IPIV void gic_dist_enable_ipiv(void) { u32 val; - val = readl_relaxed(gic_data.dist_base + GICD_MISC_CTRL); - val |= GICD_MISC_CTRL_CFG_IPIV_EN; - writel_relaxed(val, gic_data.dist_base + GICD_MISC_CTRL); static_branch_enable(&ipiv_enable); - val = (0 << GICD_IPIV_CTRL_AFF_DIRECT_VPEID_SHIFT) | - (0 << GICD_IPIV_CTRL_AFF1_LEFT_SHIFT_SHIFT) | - (4 << GICD_IPIV_CTRL_AFF2_LEFT_SHIFT_SHIFT) | + (4 << GICD_IPIV_CTRL_AFF1_LEFT_SHIFT_SHIFT) | + (12 << GICD_IPIV_CTRL_AFF2_LEFT_SHIFT_SHIFT) | (7 << GICD_IPIV_CTRL_VM_TABLE_INNERCACHE_SHIFT) | (2 << GICD_IPIV_CTRL_VM_TABLE_SHAREABILITY_SHIFT); writel_relaxed(val, gic_data.dist_base + GICD_IPIV_CTRL); @@ -1394,6 +1388,19 @@ void gic_dist_enable_ipiv(void) } EXPORT_SYMBOL(gic_dist_enable_ipiv); +bool gic_get_ipiv_status(void) +{ + u32 val; + + val = readl_relaxed(gic_data.dist_base + GICD_MISC_CTRL); + if (val & GICD_MISC_CTRL_CFG_IPIV_EN) + return true; + + return false; +} +EXPORT_SYMBOL(gic_get_ipiv_status); +#endif /* CONFIG_ARM64_HISI_IPIV */ + static void gic_cpu_init(void) { void __iomem *rbase; @@ -1502,7 +1509,15 @@ static void gic_ipi_send_mask(struct irq_data *d, const struct cpumask *mask) u64 cluster_id = MPIDR_TO_SGI_CLUSTER_ID(gic_cpu_to_affinity(cpu)); u16 tlist; +#ifdef CONFIG_ARM64_HISI_IPIV + if (!hisi_pv_sgi_enabled) + tlist = gic_compute_target_list(&cpu, mask, cluster_id); + else + tlist = 1 << (gic_cpu_to_affinity(cpu) & 0xf); +#else tlist = gic_compute_target_list(&cpu, mask, cluster_id); +#endif + gic_send_sgi(cluster_id, tlist, d->hwirq); } @@ -2738,6 +2753,28 @@ static struct fwnode_handle *gic_v3_get_gsi_domain_id(u32 gsi) return gsi_domain_handle; } +#ifdef CONFIG_ARM64_HISI_IPIV +static void hisi_pv_sgi_init(void) +{ + struct arm_smccc_res res; + + arm_smccc_1_1_invoke(ARM_SMCCC_VENDOR_PV_SGI_FEATURES, &res); + if (res.a0 != SMCCC_RET_SUCCESS) { + pr_info("Not Support HiSilicon PV SGI!\n"); + return; + } + + arm_smccc_1_1_invoke(ARM_SMCCC_VENDOR_PV_SGI_ENABLE, &res); + if (res.a0 != SMCCC_RET_SUCCESS) { + pr_info("Disable HiSilicon PV SGI!\n"); + return; + } + + hisi_pv_sgi_enabled = true; + pr_info("Enable HiSilicon PV SGI!\n"); +} +#endif + static int __init gic_acpi_init(union acpi_subtable_headers *header, const unsigned long end) { @@ -2790,6 +2827,10 @@ gic_acpi_init(union acpi_subtable_headers *header, const unsigned long end) if (static_branch_likely(&supports_deactivate_key)) gic_acpi_setup_kvm_info(); +#ifdef CONFIG_ARM64_HISI_IPIV + hisi_pv_sgi_init(); +#endif + return 0; out_fwhandle_free: diff --git a/include/linux/arm-smccc.h b/include/linux/arm-smccc.h index 374ff338755ca..73736e0dbc405 100644 --- a/include/linux/arm-smccc.h +++ b/include/linux/arm-smccc.h @@ -551,5 +551,20 @@ asmlinkage void __arm_smccc_hvc(unsigned long a0, unsigned long a1, method; \ }) +#ifdef CONFIG_ARM64_HISI_IPIV +/* HiSilicon paravirtualised sgi calls */ +#define ARM_SMCCC_VENDOR_PV_SGI_FEATURES \ + ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \ + ARM_SMCCC_SMC_64, \ + ARM_SMCCC_OWNER_VENDOR_HYP, \ + 0x90) + +#define ARM_SMCCC_VENDOR_PV_SGI_ENABLE \ + ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \ + ARM_SMCCC_SMC_64, \ + ARM_SMCCC_OWNER_VENDOR_HYP, \ + 0x91) +#endif /* CONFIG_ARM64_HISI_IPIV */ + #endif /*__ASSEMBLY__*/ #endif /*__LINUX_ARM_SMCCC_H*/ diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h index b817678aebb3d..ae139ff41f0ab 100644 --- a/include/linux/irqchip/arm-gic-v3.h +++ b/include/linux/irqchip/arm-gic-v3.h @@ -111,6 +111,7 @@ #define GIC_PAGE_SIZE_64K 2ULL #define GIC_PAGE_SIZE_MASK 3ULL +#ifdef CONFIG_ARM64_HISI_IPIV #define GICD_MISC_CTRL 0x2084 #define GICD_MISC_CTRL_CFG_IPIV_EN (1U << 19) @@ -122,6 +123,7 @@ #define GICD_IPIV_CTRL_VM_TABLE_INNERCACHE_SHIFT 16 #define GICD_IPIV_CTRL_VM_TABLE_SHAREABILITY_SHIFT 19 #define GICD_IPIV_ITS_TA_BASE 0xc010 +#endif /* * Re-Distributor registers, offsets from RD_base @@ -372,6 +374,7 @@ #define GICR_VSGIPENDR_BUSY (1U << 31) #define GICR_VSGIPENDR_PENDING GENMASK(15, 0) +#ifdef CONFIG_ARM64_HISI_IPIV /* IPIV VM table address */ #define GICR_VM_TABLE_BAR_L 0x140 #define GICR_VM_TABLE_BAR_H 0x144 @@ -386,6 +389,13 @@ #define GICR_IPIV_ST 0x14c #define GICR_IPIV_ST_IPIV_BUSY_SHIFT 0 #define GICR_IPIV_ST_IPIV_BUSY (1 << GICR_IPIV_ST_IPIV_BUSY_SHIFT) +#define GICR_IPIV_ST_IRM_ERR_ST_SHIFT 1 +#define GICR_IPIV_ST_IRM_ERR (1 << GICR_IPIV_ST_IRM_ERR_ST_SHIFT) +#define GICR_IPIV_ST_BRPBRD_ERR_ST_SHIFT 2 +#define GICR_IPIV_ST_BRPBRD_ERR (1 << GICR_IPIV_ST_BRPBRD_ERR_ST_SHIFT) +#define GICR_IPIV_ST_VCPUIDX_ERR_ST_SHIFT 3 +#define GICR_IPIV_ST_VCPUIDX_ERR (1 << GICR_IPIV_ST_VCPUIDX_ERR_ST_SHIFT) +#endif /* CONFIG_ARM64_HISI_IPIV */ /* * ITS registers, offsets from ITS_base diff --git a/include/linux/irqchip/arm-gic-v4.h b/include/linux/irqchip/arm-gic-v4.h index 474193e6d319b..335b4aab69063 100644 --- a/include/linux/irqchip/arm-gic-v4.h +++ b/include/linux/irqchip/arm-gic-v4.h @@ -34,8 +34,11 @@ struct its_vm { */ raw_spinlock_t vmapp_lock; u32 vlpi_count[GICv4_ITS_LIST_MAX]; +#ifdef CONFIG_ARM64_HISI_IPIV struct page *vpeid_page; - bool nassgireq; + bool enable_ipiv_from_vmm; + bool enable_ipiv_from_guest; +#endif }; /* Embedded in kvm_vcpu.arch */ diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index 6c1c4697c66ea..7dd5528019dcd 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -1197,7 +1197,7 @@ struct kvm_ppc_resize_hpt { #define KVM_CAP_ARM_HW_DIRTY_STATE_TRACK 502 -#define KVM_CAP_ARM_IPIV_MODE 503 +#define KVM_CAP_ARM_HISI_IPIV 798 #ifdef KVM_CAP_IRQ_ROUTING diff --git a/tools/arch/arm64/include/uapi/asm/kvm.h b/tools/arch/arm64/include/uapi/asm/kvm.h index f7ddd73a8c0fa..bcdf9d6dc37b0 100644 --- a/tools/arch/arm64/include/uapi/asm/kvm.h +++ b/tools/arch/arm64/include/uapi/asm/kvm.h @@ -376,6 +376,11 @@ enum { enum { KVM_REG_ARM_VENDOR_HYP_BIT_FUNC_FEAT = 0, KVM_REG_ARM_VENDOR_HYP_BIT_PTP = 1, + /* + * If the mainline conflicts, do not change the + * current sequence, add in sequence. + */ + KVM_REG_ARM_VENDOR_HYP_BIT_IPIV = 2, #ifdef __KERNEL__ KVM_REG_ARM_VENDOR_HYP_BMAP_BIT_COUNT, #endif diff --git a/tools/testing/selftests/kvm/aarch64/hypercalls.c b/tools/testing/selftests/kvm/aarch64/hypercalls.c index 31f66ba97228b..f433c809b8033 100644 --- a/tools/testing/selftests/kvm/aarch64/hypercalls.c +++ b/tools/testing/selftests/kvm/aarch64/hypercalls.c @@ -20,7 +20,7 @@ /* Last valid bits of the bitmapped firmware registers */ #define KVM_REG_ARM_STD_BMAP_BIT_MAX 0 #define KVM_REG_ARM_STD_HYP_BMAP_BIT_MAX 0 -#define KVM_REG_ARM_VENDOR_HYP_BMAP_BIT_MAX 1 +#define KVM_REG_ARM_VENDOR_HYP_BMAP_BIT_MAX 2 struct kvm_fw_reg_info { uint64_t reg; /* Register definition */