diff --git a/cranelift/codegen/src/isa/riscv64/inst.isle b/cranelift/codegen/src/isa/riscv64/inst.isle index aece273057ca..649d482ea32b 100644 --- a/cranelift/codegen/src/isa/riscv64/inst.isle +++ b/cranelift/codegen/src/isa/riscv64/inst.isle @@ -1210,6 +1210,12 @@ (rule (rv_andi rs1 imm) (alu_rr_imm12 (AluOPRRI.Andi) rs1 imm)) +;; Helper for emitting the `slt` ("Set Less Than") instruction. +;; rd ← rs1 < rs2 +(decl rv_slt (XReg XReg) XReg) +(rule (rv_slt rs1 rs2) + (alu_rrr (AluOPRRR.Slt) rs1 rs2)) + ;; Helper for emitting the `sltu` ("Set Less Than Unsigned") instruction. ;; rd ← rs1 < rs2 (decl rv_sltu (XReg XReg) XReg) @@ -1222,12 +1228,28 @@ (rule (rv_snez rs1) (rv_sltu (zero_reg) rs1)) +;; Helper for emiting the `slti` ("Set Less Than Immediate") instruction. +;; rd ← rs1 < imm +(decl rv_slti (XReg Imm12) XReg) +(rule (rv_slti rs1 imm) + (alu_rr_imm12 (AluOPRRI.Slti) rs1 imm)) + ;; Helper for emiting the `sltiu` ("Set Less Than Immediate Unsigned") instruction. ;; rd ← rs1 < imm (decl rv_sltiu (XReg Imm12) XReg) (rule (rv_sltiu rs1 imm) (alu_rr_imm12 (AluOPRRI.SltiU) rs1 imm)) +;; Helper for emitting the `sltz` instruction. +;; This instruction is a mnemonic for `slt rd, rs, zero`. +(decl rv_sltz (XReg) XReg) +(rule (rv_sltz rs) (rv_slt rs (zero_reg))) + +;; Helper for emitting the `sgtz` instruction. +;; This instruction is a mnemonic for `slt rd, zero, rs`. +(decl rv_sgtz (XReg) XReg) +(rule (rv_sgtz rs) (rv_slt (zero_reg) rs)) + ;; Helper for emitting the `seqz` instruction. ;; This instruction is a mnemonic for `sltiu rd, rs, 1`. (decl rv_seqz (XReg) XReg) @@ -1718,9 +1740,6 @@ (decl imm_from_bits (u64) Imm12) (extern constructor imm_from_bits imm_from_bits) -(decl imm_from_neg_bits (i64) Imm12) -(extern constructor imm_from_neg_bits imm_from_neg_bits) - (decl imm12_const_add (i32 i32) Imm12) (extern constructor imm12_const_add imm12_const_add) @@ -3035,7 +3054,7 @@ (extern constructor sp_reg sp_reg) ;; Helper for creating the zero register. -(decl zero_reg () Reg) +(decl zero_reg () XReg) (extern constructor zero_reg zero_reg) (decl value_regs_zero () ValueRegs) diff --git a/cranelift/codegen/src/isa/riscv64/lower.isle b/cranelift/codegen/src/isa/riscv64/lower.isle index cb8f8ff0e49f..aaf12f19d58b 100644 --- a/cranelift/codegen/src/isa/riscv64/lower.isle +++ b/cranelift/codegen/src/isa/riscv64/lower.isle @@ -1603,10 +1603,93 @@ (rule 0 (lower (icmp cc x @ (value_type (ty_int ty)) y)) (lower_icmp cc x y ty)) -(rule 1 (lower (icmp cc x @ (value_type (ty_vec_fits_in_register ty)) y)) +(rule 1 (lower (icmp cc x @ (value_type (ty_int_ref_scalar_64 _)) y)) + (scalar_icmp cc x y)) + +;; try to put constants on the right hand side of `scalar_icmp` to avoid +;; duplication of those rules +(rule 2 (lower (icmp cc x @ (iconst _) y @ (value_type (ty_int_ref_scalar_64 _)))) + (scalar_icmp (intcc_swap_args cc) y x)) + +(rule 3 (lower (icmp cc x @ (value_type (ty_vec_fits_in_register ty)) y)) (gen_expand_mask ty (gen_icmp_mask ty cc x y))) +;; Helper to compare just two scalars. Callers should try to put constants on +;; the right-hand side. +(decl scalar_icmp (IntCC Value Value) XReg) +(rule 0 (scalar_icmp (IntCC.Equal) x y) + (rv_seqz (rv_xor x y))) + +;; x == y <=> x - y == 0 +(rule 1 (scalar_icmp (IntCC.Equal) x (i64_from_iconst y)) + (if-let (imm12_from_i64 imm) (i64_neg y)) + (rv_seqz (rv_addi x imm))) + +(rule 2 (scalar_icmp (IntCC.Equal) x (u64_from_iconst 0)) + (rv_seqz x)) + +(rule 0 (scalar_icmp (IntCC.NotEqual) x y) + (rv_snez (rv_xor x y))) + +;; x != y <=> x - y != 0 +(rule 1 (scalar_icmp (IntCC.NotEqual) x (i64_from_iconst y)) + (if-let (imm12_from_i64 imm) (i64_neg y)) + (rv_snez (rv_addi x imm))) + +(rule 2 (scalar_icmp (IntCC.NotEqual) x (u64_from_iconst 0)) + (rv_snez x)) + +;; x != -1 <=> (unsigned) x < UINT_MAX +(rule 3 (scalar_icmp (IntCC.NotEqual) x (i64_from_iconst -1)) + (rv_sltiu x (imm12_const -1))) + +(rule 0 (scalar_icmp (IntCC.SignedLessThan) x y) + (rv_slt x y)) + +(rule 1 (scalar_icmp (IntCC.SignedLessThan) x (imm12_from_value y)) + (rv_slti x y)) + +(rule 2 (scalar_icmp (IntCC.SignedLessThan) x (u64_from_iconst 0)) + (rv_sltz x)) + +(rule 0 (scalar_icmp (IntCC.UnsignedLessThan) x y) + (rv_sltu x y)) + +(rule 1 (scalar_icmp (IntCC.UnsignedLessThan) x (imm12_from_value y)) + (rv_sltiu x y)) + +;; x < 1 <=> x == 0 +(rule 2 (scalar_icmp (IntCC.UnsignedLessThan) x (u64_from_iconst 1)) + (rv_seqz x)) + +;; x > y <=> y < x +(rule 0 (scalar_icmp (IntCC.SignedGreaterThan) x y) + (rv_slt y x)) + +(rule 1 (scalar_icmp (IntCC.SignedGreaterThan) x (u64_from_iconst 0)) + (rv_sgtz x)) + +;; x > y <=> y < x +(rule 0 (scalar_icmp (IntCC.UnsignedGreaterThan) x y) + (rv_sltu y x)) + +;; x > 0 <=> x != 0 +(rule 1 (scalar_icmp (IntCC.UnsignedGreaterThan) x (u64_from_iconst 0)) + (rv_snez x)) + +;; for remaining variants with have a "OrEqual" condition code invert the +;; condition and invert the result. +(rule 0 (scalar_icmp cc @ (IntCC.SignedGreaterThanOrEqual) x y) + (rv_xori (scalar_icmp (intcc_complement cc) x y) (imm12_const 1))) +(rule 0 (scalar_icmp cc @ (IntCC.UnsignedGreaterThanOrEqual) x y) + (rv_xori (scalar_icmp (intcc_complement cc) x y) (imm12_const 1))) +(rule 0 (scalar_icmp cc @ (IntCC.SignedLessThanOrEqual) x y) + (rv_xori (scalar_icmp (intcc_complement cc) x y) (imm12_const 1))) +(rule 0 (scalar_icmp cc @ (IntCC.UnsignedLessThanOrEqual) x y) + (rv_xori (scalar_icmp (intcc_complement cc) x y) (imm12_const 1))) + + ;;;;; Rules for `fcmp`;;;;;;;;; (rule 0 (lower (fcmp cc x @ (value_type (ty_scalar_float ty)) y)) (cmp_value (emit_fcmp cc ty x y))) diff --git a/cranelift/codegen/src/isa/riscv64/lower/isle.rs b/cranelift/codegen/src/isa/riscv64/lower/isle.rs index 88cf010f34ec..fbd58fae7610 100644 --- a/cranelift/codegen/src/isa/riscv64/lower/isle.rs +++ b/cranelift/codegen/src/isa/riscv64/lower/isle.rs @@ -244,9 +244,9 @@ impl generated_code::Context for RV64IsleContext<'_, '_, MInst, Riscv64Backend> fn int_zero_reg(&mut self, ty: Type) -> ValueRegs { assert!(ty.is_int(), "{:?}", ty); if ty.bits() == 128 { - ValueRegs::two(self.zero_reg(), self.zero_reg()) + ValueRegs::two(self.zero_reg().to_reg(), self.zero_reg().to_reg()) } else { - ValueRegs::one(self.zero_reg()) + ValueRegs::one(self.zero_reg().to_reg()) } } @@ -343,17 +343,13 @@ impl generated_code::Context for RV64IsleContext<'_, '_, MInst, Riscv64Backend> writable_zero_reg() } #[inline] - fn zero_reg(&mut self) -> Reg { - zero_reg() + fn zero_reg(&mut self) -> XReg { + XReg::new(zero_reg()).unwrap() } #[inline] fn imm_from_bits(&mut self, val: u64) -> Imm12 { Imm12::maybe_from_u64(val).unwrap() } - #[inline] - fn imm_from_neg_bits(&mut self, val: i64) -> Imm12 { - Imm12::maybe_from_i64(val).unwrap() - } fn gen_default_frm(&mut self) -> OptionFloatRoundingMode { None diff --git a/cranelift/filetests/filetests/isa/riscv64/condbr.clif b/cranelift/filetests/filetests/isa/riscv64/condbr.clif index e9d9d9d9addb..6aabe0bcc9c8 100644 --- a/cranelift/filetests/filetests/isa/riscv64/condbr.clif +++ b/cranelift/filetests/filetests/isa/riscv64/condbr.clif @@ -10,15 +10,14 @@ block0(v0: i64, v1: i64): ; VCode: ; block0: -; eq a0,a0,a1##ty=i64 +; xor a3,a0,a1 +; seqz a0,a3 ; ret ; ; Disassembled: ; block0: ; offset 0x0 -; bne a0, a1, 0xc -; addi a0, zero, 1 -; j 8 -; mv a0, zero +; xor a3, a0, a1 +; seqz a0, a3 ; ret function %icmp_eq_i128(i128, i128) -> i8 { diff --git a/cranelift/filetests/filetests/isa/riscv64/condops.clif b/cranelift/filetests/filetests/isa/riscv64/condops.clif index 578e27556a6a..36d06f806673 100644 --- a/cranelift/filetests/filetests/isa/riscv64/condops.clif +++ b/cranelift/filetests/filetests/isa/riscv64/condops.clif @@ -38,21 +38,14 @@ block0(v0: i8): ; VCode: ; block0: -; li a5,42 -; andi a3,a0,255 -; andi a5,a5,255 -; eq a0,a3,a5##ty=i8 +; addi a2,a0,-42 +; seqz a0,a2 ; ret ; ; Disassembled: ; block0: ; offset 0x0 -; addi a5, zero, 0x2a -; andi a3, a0, 0xff -; andi a5, a5, 0xff -; bne a3, a5, 0xc -; addi a0, zero, 1 -; j 8 -; mv a0, zero +; addi a2, a0, -0x2a +; seqz a0, a2 ; ret function %h(i8, i8, i8) -> i8 { diff --git a/cranelift/filetests/filetests/isa/riscv64/icmp.clif b/cranelift/filetests/filetests/isa/riscv64/icmp.clif new file mode 100644 index 000000000000..f2d123fb5f64 --- /dev/null +++ b/cranelift/filetests/filetests/isa/riscv64/icmp.clif @@ -0,0 +1,952 @@ +test compile precise-output +set unwind_info=false +target riscv64 + +function %eq(i32, i32) -> i8 system_v { +block0(v0: i32, v1: i32): + v2 = icmp eq v0, v1 + return v2 +} + +; VCode: +; block0: +; xor a3,a0,a1 +; seqz a0,a3 +; ret +; +; Disassembled: +; block0: ; offset 0x0 +; xor a3, a0, a1 +; seqz a0, a3 +; ret + +function %eqz1(i32) -> i8 system_v { +block0(v0: i32): + v1 = iconst.i32 0 + v2 = icmp eq v0, v1 + return v2 +} + +; VCode: +; block0: +; seqz a0,a0 +; ret +; +; Disassembled: +; block0: ; offset 0x0 +; seqz a0, a0 +; ret + +function %eqz2(i32) -> i8 system_v { +block0(v0: i32): + v1 = iconst.i32 0 + v2 = icmp eq v1, v0 + return v2 +} + +; VCode: +; block0: +; seqz a0,a0 +; ret +; +; Disassembled: +; block0: ; offset 0x0 +; seqz a0, a0 +; ret + +function %eq_const1(i32) -> i8 system_v { +block0(v0: i32): + v1 = iconst.i32 1 + v2 = icmp eq v1, v0 + return v2 +} + +; VCode: +; block0: +; addi a2,a0,-1 +; seqz a0,a2 +; ret +; +; Disassembled: +; block0: ; offset 0x0 +; addi a2, a0, -1 +; seqz a0, a2 +; ret + +function %eq_const2(i32) -> i8 system_v { +block0(v0: i32): + v1 = iconst.i32 2 + v2 = icmp eq v0, v1 + return v2 +} + +; VCode: +; block0: +; addi a2,a0,-2 +; seqz a0,a2 +; ret +; +; Disassembled: +; block0: ; offset 0x0 +; addi a2, a0, -2 +; seqz a0, a2 +; ret + +function %eq_const3(i32) -> i8 system_v { +block0(v0: i32): + v1 = iconst.i32 0xfff + v2 = icmp eq v1, v0 + return v2 +} + +; VCode: +; block0: +; lui a4,1 +; addi a1,a4,-1 +; xor a4,a0,a1 +; seqz a0,a4 +; ret +; +; Disassembled: +; block0: ; offset 0x0 +; lui a4, 1 +; addi a1, a4, -1 +; xor a4, a0, a1 +; seqz a0, a4 +; ret + +function %eq_const4(i32) -> i8 system_v { +block0(v0: i32): + v1 = iconst.i32 -1 + v2 = icmp eq v0, v1 + return v2 +} + +; VCode: +; block0: +; addi a2,a0,1 +; seqz a0,a2 +; ret +; +; Disassembled: +; block0: ; offset 0x0 +; addi a2, a0, 1 +; seqz a0, a2 +; ret + + +function %ne(i32, i32) -> i8 system_v { +block0(v0: i32, v1: i32): + v2 = icmp ne v0, v1 + return v2 +} + +; VCode: +; block0: +; xor a3,a0,a1 +; sltu a0,zero,a3 +; ret +; +; Disassembled: +; block0: ; offset 0x0 +; xor a3, a0, a1 +; snez a0, a3 +; ret + +function %nez1(i32) -> i8 system_v { +block0(v0: i32): + v1 = iconst.i32 0 + v2 = icmp ne v0, v1 + return v2 +} + +; VCode: +; block0: +; sltu a0,zero,a0 +; ret +; +; Disassembled: +; block0: ; offset 0x0 +; snez a0, a0 +; ret + +function %nez2(i32) -> i8 system_v { +block0(v0: i32): + v1 = iconst.i32 0 + v2 = icmp ne v1, v0 + return v2 +} + +; VCode: +; block0: +; sltu a0,zero,a0 +; ret +; +; Disassembled: +; block0: ; offset 0x0 +; snez a0, a0 +; ret + +function %ne_const1(i32) -> i8 system_v { +block0(v0: i32): + v1 = iconst.i32 1 + v2 = icmp ne v1, v0 + return v2 +} + +; VCode: +; block0: +; addi a2,a0,-1 +; sltu a0,zero,a2 +; ret +; +; Disassembled: +; block0: ; offset 0x0 +; addi a2, a0, -1 +; snez a0, a2 +; ret + +function %ne_const2(i32) -> i8 system_v { +block0(v0: i32): + v1 = iconst.i32 2 + v2 = icmp ne v0, v1 + return v2 +} + +; VCode: +; block0: +; addi a2,a0,-2 +; sltu a0,zero,a2 +; ret +; +; Disassembled: +; block0: ; offset 0x0 +; addi a2, a0, -2 +; snez a0, a2 +; ret + +function %ne_const3(i32) -> i8 system_v { +block0(v0: i32): + v1 = iconst.i32 0xfff + v2 = icmp ne v1, v0 + return v2 +} + +; VCode: +; block0: +; lui a4,1 +; addi a1,a4,-1 +; xor a4,a0,a1 +; sltu a0,zero,a4 +; ret +; +; Disassembled: +; block0: ; offset 0x0 +; lui a4, 1 +; addi a1, a4, -1 +; xor a4, a0, a1 +; snez a0, a4 +; ret + +function %ne_const4(i32) -> i8 system_v { +block0(v0: i32): + v1 = iconst.i32 -1 + v2 = icmp ne v0, v1 + return v2 +} + +; VCode: +; block0: +; sltiu a0,a0,-1 +; ret +; +; Disassembled: +; block0: ; offset 0x0 +; sltiu a0, a0, -1 +; ret + +function %ne_const5(i32) -> i8 system_v { +block0(v0: i32): + v1 = iconst.i32 -2 + v2 = icmp ne v0, v1 + return v2 +} + +; VCode: +; block0: +; addi a2,a0,2 +; sltu a0,zero,a2 +; ret +; +; Disassembled: +; block0: ; offset 0x0 +; addi a2, a0, 2 +; snez a0, a2 +; ret + +function %slt(i32, i32) -> i8 system_v { +block0(v0: i32, v1: i32): + v2 = icmp slt v0, v1 + return v2 +} + +; VCode: +; block0: +; slt a0,a0,a1 +; ret +; +; Disassembled: +; block0: ; offset 0x0 +; slt a0, a0, a1 +; ret + +function %slt_const1(i32) -> i8 system_v { +block0(v0: i32): + v2 = icmp_imm slt v0, 0 + return v2 +} + +; VCode: +; block0: +; slt a0,a0,zero +; ret +; +; Disassembled: +; block0: ; offset 0x0 +; sltz a0, a0 +; ret + +function %slt_const2(i64) -> i8 system_v { +block0(v0: i64): + v2 = icmp_imm slt v0, 0 + return v2 +} + +; VCode: +; block0: +; slt a0,a0,zero +; ret +; +; Disassembled: +; block0: ; offset 0x0 +; sltz a0, a0 +; ret + +function %slt_const3(i16) -> i8 system_v { +block0(v0: i16): + v2 = icmp_imm slt v0, 0 + return v2 +} + +; VCode: +; block0: +; slt a0,a0,zero +; ret +; +; Disassembled: +; block0: ; offset 0x0 +; sltz a0, a0 +; ret + +function %slt_const4(i8) -> i8 system_v { +block0(v0: i8): + v2 = icmp_imm slt v0, 0 + return v2 +} + +; VCode: +; block0: +; slt a0,a0,zero +; ret +; +; Disassembled: +; block0: ; offset 0x0 +; sltz a0, a0 +; ret + +function %slt_const5(i32) -> i8 system_v { +block0(v0: i32): + v2 = icmp_imm slt v0, 1 + return v2 +} + +; VCode: +; block0: +; slti a0,a0,1 +; ret +; +; Disassembled: +; block0: ; offset 0x0 +; slti a0, a0, 1 +; ret + +function %slt_const6(i32) -> i8 system_v { +block0(v0: i32): + v2 = icmp_imm slt v0, 2 + return v2 +} + +; VCode: +; block0: +; slti a0,a0,2 +; ret +; +; Disassembled: +; block0: ; offset 0x0 +; slti a0, a0, 2 +; ret + +function %slt_const7(i32) -> i8 system_v { +block0(v0: i32): + v2 = icmp_imm slt v0, -7 + return v2 +} + +; VCode: +; block0: +; slti a0,a0,-7 +; ret +; +; Disassembled: +; block0: ; offset 0x0 +; slti a0, a0, -7 +; ret + +function %slt_const8(i32) -> i8 system_v { +block0(v0: i32): + v2 = icmp_imm slt v0, 0xfff + return v2 +} + +; VCode: +; block0: +; lui a3,1 +; addi a5,a3,-1 +; slt a0,a0,a5 +; ret +; +; Disassembled: +; block0: ; offset 0x0 +; lui a3, 1 +; addi a5, a3, -1 +; slt a0, a0, a5 +; ret + +function %sgt(i32, i32) -> i8 system_v { +block0(v0: i32, v1: i32): + v2 = icmp sgt v0, v1 + return v2 +} + +; VCode: +; block0: +; slt a0,a1,a0 +; ret +; +; Disassembled: +; block0: ; offset 0x0 +; slt a0, a1, a0 +; ret + +function %sgt_const1(i32) -> i8 system_v { +block0(v0: i32): + v2 = icmp_imm sgt v0, 0 + return v2 +} + +; VCode: +; block0: +; slt a0,zero,a0 +; ret +; +; Disassembled: +; block0: ; offset 0x0 +; sgtz a0, a0 +; ret + +function %sgt_const2(i32) -> i8 system_v { +block0(v0: i32): + v2 = icmp_imm sgt v0, 1 + return v2 +} + +; VCode: +; block0: +; li a3,1 +; slt a0,a3,a0 +; ret +; +; Disassembled: +; block0: ; offset 0x0 +; addi a3, zero, 1 +; slt a0, a3, a0 +; ret + +function %sgt_const2(i16) -> i8 system_v { +block0(v0: i16): + v2 = icmp_imm sgt v0, 0 + return v2 +} + +; VCode: +; block0: +; slt a0,zero,a0 +; ret +; +; Disassembled: +; block0: ; offset 0x0 +; sgtz a0, a0 +; ret + +function %sle(i32, i32) -> i8 system_v { +block0(v0: i32, v1: i32): + v2 = icmp sle v0, v1 + return v2 +} + +; VCode: +; block0: +; slt a3,a1,a0 +; xori a0,a3,1 +; ret +; +; Disassembled: +; block0: ; offset 0x0 +; slt a3, a1, a0 +; xori a0, a3, 1 +; ret + +function %sge(i32, i32) -> i8 system_v { +block0(v0: i32, v1: i32): + v2 = icmp sge v0, v1 + return v2 +} + +; VCode: +; block0: +; slt a3,a0,a1 +; xori a0,a3,1 +; ret +; +; Disassembled: +; block0: ; offset 0x0 +; slt a3, a0, a1 +; xori a0, a3, 1 +; ret + +function %sge_const1(i32) -> i8 system_v { +block0(v0: i32): + v2 = icmp_imm sge v0, 0 + return v2 +} + +; VCode: +; block0: +; slt a2,a0,zero +; xori a0,a2,1 +; ret +; +; Disassembled: +; block0: ; offset 0x0 +; sltz a2, a0 +; xori a0, a2, 1 +; ret + +function %sge_const2(i32) -> i8 system_v { +block0(v0: i32): + v2 = icmp_imm sge v0, 1 + return v2 +} + +; VCode: +; block0: +; slti a2,a0,1 +; xori a0,a2,1 +; ret +; +; Disassembled: +; block0: ; offset 0x0 +; slti a2, a0, 1 +; xori a0, a2, 1 +; ret + +function %sge_const3(i32) -> i8 system_v { +block0(v0: i32): + v2 = icmp_imm sge v0, -1 + return v2 +} + +; VCode: +; block0: +; slti a2,a0,-1 +; xori a0,a2,1 +; ret +; +; Disassembled: +; block0: ; offset 0x0 +; slti a2, a0, -1 +; xori a0, a2, 1 +; ret + +function %ult(i32, i32) -> i8 system_v { +block0(v0: i32, v1: i32): + v2 = icmp ult v0, v1 + return v2 +} + +; VCode: +; block0: +; sltu a0,a0,a1 +; ret +; +; Disassembled: +; block0: ; offset 0x0 +; sltu a0, a0, a1 +; ret + +function %ult_const1(i32) -> i8 system_v { +block0(v0: i32): + v2 = icmp_imm ult v0, 1 + return v2 +} + +; VCode: +; block0: +; seqz a0,a0 +; ret +; +; Disassembled: +; block0: ; offset 0x0 +; seqz a0, a0 +; ret + +function %ult_const2(i32) -> i8 system_v { +block0(v0: i32): + v2 = icmp_imm ult v0, 2 + return v2 +} + +; VCode: +; block0: +; sltiu a0,a0,2 +; ret +; +; Disassembled: +; block0: ; offset 0x0 +; sltiu a0, a0, 2 +; ret + +function %ult_const3(i32) -> i8 system_v { +block0(v0: i32): + v2 = icmp_imm ult v0, -5 + return v2 +} + +; VCode: +; block0: +; sltiu a0,a0,-5 +; ret +; +; Disassembled: +; block0: ; offset 0x0 +; sltiu a0, a0, -5 +; ret + +function %ult_const4(i32) -> i8 system_v { +block0(v0: i32): + v2 = icmp_imm ult v0, 0xfff + return v2 +} + +; VCode: +; block0: +; lui a3,1 +; addi a5,a3,-1 +; sltu a0,a0,a5 +; ret +; +; Disassembled: +; block0: ; offset 0x0 +; lui a3, 1 +; addi a5, a3, -1 +; sltu a0, a0, a5 +; ret + +function %ugt(i32, i32) -> i8 system_v { +block0(v0: i32, v1: i32): + v2 = icmp ugt v0, v1 + return v2 +} + +; VCode: +; block0: +; sltu a0,a1,a0 +; ret +; +; Disassembled: +; block0: ; offset 0x0 +; sltu a0, a1, a0 +; ret + +function %ugt_const1(i32) -> i8 system_v { +block0(v0: i32): + v2 = icmp_imm ugt v0, 0 + return v2 +} + +; VCode: +; block0: +; sltu a0,zero,a0 +; ret +; +; Disassembled: +; block0: ; offset 0x0 +; snez a0, a0 +; ret + +function %ugt_const2(i32) -> i8 system_v { +block0(v0: i32): + v2 = icmp_imm ugt v0, 1 + return v2 +} + +; VCode: +; block0: +; li a3,1 +; sltu a0,a3,a0 +; ret +; +; Disassembled: +; block0: ; offset 0x0 +; addi a3, zero, 1 +; sltu a0, a3, a0 +; ret + +function %ugt_const3(i32) -> i8 system_v { +block0(v0: i32): + v2 = icmp_imm ugt v0, 2 + return v2 +} + +; VCode: +; block0: +; li a3,2 +; sltu a0,a3,a0 +; ret +; +; Disassembled: +; block0: ; offset 0x0 +; addi a3, zero, 2 +; sltu a0, a3, a0 +; ret + +function %ugt_const3(i32) -> i8 system_v { +block0(v0: i32): + v2 = icmp_imm ugt v0, -1 + return v2 +} + +; VCode: +; block0: +; li a3,-1 +; sltu a0,a3,a0 +; ret +; +; Disassembled: +; block0: ; offset 0x0 +; addi a3, zero, -1 +; sltu a0, a3, a0 +; ret + +function %ule(i32, i32) -> i8 system_v { +block0(v0: i32, v1: i32): + v2 = icmp ule v0, v1 + return v2 +} + +; VCode: +; block0: +; sltu a3,a1,a0 +; xori a0,a3,1 +; ret +; +; Disassembled: +; block0: ; offset 0x0 +; sltu a3, a1, a0 +; xori a0, a3, 1 +; ret + +function %ule_const1(i32) -> i8 system_v { +block0(v0: i32): + v2 = icmp_imm ule v0, 0 + return v2 +} + +; VCode: +; block0: +; sltu a2,zero,a0 +; xori a0,a2,1 +; ret +; +; Disassembled: +; block0: ; offset 0x0 +; snez a2, a0 +; xori a0, a2, 1 +; ret + +function %ule_const2(i32) -> i8 system_v { +block0(v0: i32): + v2 = icmp_imm ule v0, 1 + return v2 +} + +; VCode: +; block0: +; li a4,1 +; sltu a3,a4,a0 +; xori a0,a3,1 +; ret +; +; Disassembled: +; block0: ; offset 0x0 +; addi a4, zero, 1 +; sltu a3, a4, a0 +; xori a0, a3, 1 +; ret + +function %ule_const3(i32) -> i8 system_v { +block0(v0: i32): + v2 = icmp_imm ule v0, -1 + return v2 +} + +; VCode: +; block0: +; li a4,-1 +; sltu a3,a4,a0 +; xori a0,a3,1 +; ret +; +; Disassembled: +; block0: ; offset 0x0 +; addi a4, zero, -1 +; sltu a3, a4, a0 +; xori a0, a3, 1 +; ret + +function %ule_const4(i32) -> i8 system_v { +block0(v0: i32): + v2 = icmp_imm ule v0, 0xfff + return v2 +} + +; VCode: +; block0: +; lui a4,1 +; addi a1,a4,-1 +; sltu a4,a1,a0 +; xori a0,a4,1 +; ret +; +; Disassembled: +; block0: ; offset 0x0 +; lui a4, 1 +; addi a1, a4, -1 +; sltu a4, a1, a0 +; xori a0, a4, 1 +; ret + +function %uge(i32, i32) -> i8 system_v { +block0(v0: i32, v1: i32): + v2 = icmp uge v0, v1 + return v2 +} + +; VCode: +; block0: +; sltu a3,a0,a1 +; xori a0,a3,1 +; ret +; +; Disassembled: +; block0: ; offset 0x0 +; sltu a3, a0, a1 +; xori a0, a3, 1 +; ret + +function %uge_const1(i32) -> i8 system_v { +block0(v0: i32): + v2 = icmp_imm uge v0, 0 + return v2 +} + +; VCode: +; block0: +; sltiu a2,a0,0 +; xori a0,a2,1 +; ret +; +; Disassembled: +; block0: ; offset 0x0 +; sltiu a2, a0, 0 +; xori a0, a2, 1 +; ret + +function %uge_const2(i32) -> i8 system_v { +block0(v0: i32): + v2 = icmp_imm uge v0, 1 + return v2 +} + +; VCode: +; block0: +; seqz a2,a0 +; xori a0,a2,1 +; ret +; +; Disassembled: +; block0: ; offset 0x0 +; seqz a2, a0 +; xori a0, a2, 1 +; ret + +function %uge_const3(i32) -> i8 system_v { +block0(v0: i32): + v2 = icmp_imm uge v0, -1 + return v2 +} + +; VCode: +; block0: +; sltiu a2,a0,-1 +; xori a0,a2,1 +; ret +; +; Disassembled: +; block0: ; offset 0x0 +; sltiu a2, a0, -1 +; xori a0, a2, 1 +; ret + +function %uge_const4(i32) -> i8 system_v { +block0(v0: i32): + v2 = icmp_imm uge v0, 0xfff + return v2 +} + +; VCode: +; block0: +; lui a4,1 +; addi a1,a4,-1 +; sltu a4,a0,a1 +; xori a0,a4,1 +; ret +; +; Disassembled: +; block0: ; offset 0x0 +; lui a4, 1 +; addi a1, a4, -1 +; sltu a4, a0, a1 +; xori a0, a4, 1 +; ret + diff --git a/cranelift/filetests/filetests/isa/riscv64/iconst-icmp-small.clif b/cranelift/filetests/filetests/isa/riscv64/iconst-icmp-small.clif index bbb8be09d181..fb3d285aa325 100644 --- a/cranelift/filetests/filetests/isa/riscv64/iconst-icmp-small.clif +++ b/cranelift/filetests/filetests/isa/riscv64/iconst-icmp-small.clif @@ -12,26 +12,17 @@ block0: ; VCode: ; block0: -; lui a5,-2 -; addi a1,a5,-564 -; slli a2,a1,48 -; srli a4,a2,48 -; slli a0,a1,48 -; srli a2,a0,48 -; ne a0,a4,a2##ty=i16 +; lui a2,-2 +; addi a4,a2,-564 +; xor a2,a4,a4 +; sltu a0,zero,a2 ; ret ; ; Disassembled: ; block0: ; offset 0x0 -; lui a5, 0xffffe -; addi a1, a5, -0x234 -; slli a2, a1, 0x30 -; srli a4, a2, 0x30 -; slli a0, a1, 0x30 -; srli a2, a0, 0x30 -; beq a4, a2, 0xc -; addi a0, zero, 1 -; j 8 -; mv a0, zero +; lui a2, 0xffffe +; addi a4, a2, -0x234 +; xor a2, a4, a4 +; snez a0, a2 ; ret diff --git a/cranelift/filetests/filetests/isa/riscv64/return-call.clif b/cranelift/filetests/filetests/isa/riscv64/return-call.clif index 123f6dc0a395..ad96b68456c2 100644 --- a/cranelift/filetests/filetests/isa/riscv64/return-call.clif +++ b/cranelift/filetests/filetests/isa/riscv64/return-call.clif @@ -157,21 +157,12 @@ block0(v0: i8): ; VCode: ; block0: -; li a5,0 -; andi a3,s1,255 -; andi a5,a5,255 -; eq s1,a3,a5##ty=i8 +; seqz s1,s1 ; ret ; ; Disassembled: ; block0: ; offset 0x0 -; mv a5, zero -; andi a3, s1, 0xff -; andi a5, a5, 0xff -; bne a3, a5, 0xc -; addi s1, zero, 1 -; j 8 -; mv s1, zero +; seqz s1, s1 ; ret function %call_i8(i8) -> i8 tail { diff --git a/cranelift/filetests/filetests/isa/riscv64/select.clif b/cranelift/filetests/filetests/isa/riscv64/select.clif index ef3011860af4..b7d90541fd3b 100644 --- a/cranelift/filetests/filetests/isa/riscv64/select.clif +++ b/cranelift/filetests/filetests/isa/riscv64/select.clif @@ -119,35 +119,49 @@ block0(v0: i8, v1: i128, v2: i128): } ; VCode: +; add sp,-16 +; sd ra,8(sp) +; sd fp,0(sp) +; mv fp,sp +; sd s8,-8(sp) +; add sp,-16 ; block0: -; mv a5,a0 -; mv t2,a1 -; li a0,42 -; andi a5,a5,255 -; andi a0,a0,255 -; eq a5,a5,a0##ty=i8 +; mv s8,a1 +; addi a5,a0,-42 +; seqz a5,a5 ; andi a5,a5,255 -; select_i128 [a0,a1],[t2,a2],[a3,a4]##condition=a5 +; select_i128 [a0,a1],[s8,a2],[a3,a4]##condition=a5 +; add sp,+16 +; ld s8,-8(sp) +; ld ra,8(sp) +; ld fp,0(sp) +; add sp,+16 ; ret ; ; Disassembled: ; block0: ; offset 0x0 -; mv a5, a0 -; mv t2, a1 -; addi a0, zero, 0x2a -; andi a5, a5, 0xff -; andi a0, a0, 0xff -; bne a5, a0, 0xc -; addi a5, zero, 1 -; j 8 -; mv a5, zero +; addi sp, sp, -0x10 +; sd ra, 8(sp) +; sd s0, 0(sp) +; mv s0, sp +; sd s8, -8(sp) +; addi sp, sp, -0x10 +; block1: ; offset 0x18 +; mv s8, a1 +; addi a5, a0, -0x2a +; seqz a5, a5 ; andi a5, a5, 0xff ; beqz a5, 0x10 -; mv a0, t2 +; mv a0, s8 ; mv a1, a2 ; j 0xc ; mv a0, a3 ; mv a1, a4 +; addi sp, sp, 0x10 +; ld s8, -8(sp) +; ld ra, 8(sp) +; ld s0, 0(sp) +; addi sp, sp, 0x10 ; ret function %select_icmp_i16_i8(i16, i8, i8) -> i8 { @@ -283,37 +297,49 @@ block0(v0: i16, v1: i128, v2: i128): } ; VCode: +; add sp,-16 +; sd ra,8(sp) +; sd fp,0(sp) +; mv fp,sp +; sd s8,-8(sp) +; add sp,-16 ; block0: -; mv a7,a1 -; li a5,42 -; slli a0,a0,48 -; srli a0,a0,48 -; slli a5,a5,48 -; srli a5,a5,48 -; eq a0,a0,a5##ty=i16 -; andi a5,a0,255 -; select_i128 [a0,a1],[a7,a2],[a3,a4]##condition=a5 +; mv s8,a1 +; addi a5,a0,-42 +; seqz a5,a5 +; andi a5,a5,255 +; select_i128 [a0,a1],[s8,a2],[a3,a4]##condition=a5 +; add sp,+16 +; ld s8,-8(sp) +; ld ra,8(sp) +; ld fp,0(sp) +; add sp,+16 ; ret ; ; Disassembled: ; block0: ; offset 0x0 -; mv a7, a1 -; addi a5, zero, 0x2a -; slli a0, a0, 0x30 -; srli a0, a0, 0x30 -; slli a5, a5, 0x30 -; srli a5, a5, 0x30 -; bne a0, a5, 0xc -; addi a0, zero, 1 -; j 8 -; mv a0, zero -; andi a5, a0, 0xff +; addi sp, sp, -0x10 +; sd ra, 8(sp) +; sd s0, 0(sp) +; mv s0, sp +; sd s8, -8(sp) +; addi sp, sp, -0x10 +; block1: ; offset 0x18 +; mv s8, a1 +; addi a5, a0, -0x2a +; seqz a5, a5 +; andi a5, a5, 0xff ; beqz a5, 0x10 -; mv a0, a7 +; mv a0, s8 ; mv a1, a2 ; j 0xc ; mv a0, a3 ; mv a1, a4 +; addi sp, sp, 0x10 +; ld s8, -8(sp) +; ld ra, 8(sp) +; ld s0, 0(sp) +; addi sp, sp, 0x10 ; ret function %select_icmp_i32_i8(i32, i8, i8) -> i8 { @@ -449,37 +475,49 @@ block0(v0: i32, v1: i128, v2: i128): } ; VCode: +; add sp,-16 +; sd ra,8(sp) +; sd fp,0(sp) +; mv fp,sp +; sd s8,-8(sp) +; add sp,-16 ; block0: -; mv a7,a1 -; li a5,42 -; slli a0,a0,32 -; srli a0,a0,32 -; slli a5,a5,32 -; srli a5,a5,32 -; eq a0,a0,a5##ty=i32 -; andi a5,a0,255 -; select_i128 [a0,a1],[a7,a2],[a3,a4]##condition=a5 +; mv s8,a1 +; addi a5,a0,-42 +; seqz a5,a5 +; andi a5,a5,255 +; select_i128 [a0,a1],[s8,a2],[a3,a4]##condition=a5 +; add sp,+16 +; ld s8,-8(sp) +; ld ra,8(sp) +; ld fp,0(sp) +; add sp,+16 ; ret ; ; Disassembled: ; block0: ; offset 0x0 -; mv a7, a1 -; addi a5, zero, 0x2a -; slli a0, a0, 0x20 -; srli a0, a0, 0x20 -; slli a5, a5, 0x20 -; srli a5, a5, 0x20 -; bne a0, a5, 0xc -; addi a0, zero, 1 -; j 8 -; mv a0, zero -; andi a5, a0, 0xff +; addi sp, sp, -0x10 +; sd ra, 8(sp) +; sd s0, 0(sp) +; mv s0, sp +; sd s8, -8(sp) +; addi sp, sp, -0x10 +; block1: ; offset 0x18 +; mv s8, a1 +; addi a5, a0, -0x2a +; seqz a5, a5 +; andi a5, a5, 0xff ; beqz a5, 0x10 -; mv a0, a7 +; mv a0, s8 ; mv a1, a2 ; j 0xc ; mv a0, a3 ; mv a1, a4 +; addi sp, sp, 0x10 +; ld s8, -8(sp) +; ld ra, 8(sp) +; ld s0, 0(sp) +; addi sp, sp, 0x10 ; ret function %select_icmp_i64_i8(i64, i8, i8) -> i8 { @@ -591,8 +629,8 @@ block0(v0: i64, v1: i128, v2: i128): ; add sp,-16 ; block0: ; mv s8,a1 -; li a5,42 -; eq a5,a0,a5##ty=i64 +; addi a5,a0,-42 +; seqz a5,a5 ; andi a5,a5,255 ; select_i128 [a0,a1],[s8,a2],[a3,a4]##condition=a5 ; add sp,+16 @@ -612,11 +650,8 @@ block0(v0: i64, v1: i128, v2: i128): ; addi sp, sp, -0x10 ; block1: ; offset 0x18 ; mv s8, a1 -; addi a5, zero, 0x2a -; bne a0, a5, 0xc -; addi a5, zero, 1 -; j 8 -; mv a5, zero +; addi a5, a0, -0x2a +; seqz a5, a5 ; andi a5, a5, 0xff ; beqz a5, 0x10 ; mv a0, s8 diff --git a/cranelift/filetests/filetests/isa/riscv64/select_spectre_guard.clif b/cranelift/filetests/filetests/isa/riscv64/select_spectre_guard.clif index dec1cc69cfd1..a30c661c8388 100644 --- a/cranelift/filetests/filetests/isa/riscv64/select_spectre_guard.clif +++ b/cranelift/filetests/filetests/isa/riscv64/select_spectre_guard.clif @@ -12,35 +12,28 @@ block0(v0: i8, v1: i8, v2: i8): ; VCode: ; block0: -; li a3,42 -; andi a0,a0,255 -; andi a3,a3,255 -; eq a4,a0,a3##ty=i8 -; andi a3,a4,255 -; sltu a4,zero,a3 -; sub a0,zero,a4 -; and a3,a1,a0 -; not a4,a0 -; and a0,a2,a4 -; or a0,a3,a0 +; addi a5,a0,-42 +; seqz a3,a5 +; andi a0,a3,255 +; sltu a3,zero,a0 +; sub a4,zero,a3 +; and a0,a1,a4 +; not a3,a4 +; and a4,a2,a3 +; or a0,a0,a4 ; ret ; ; Disassembled: ; block0: ; offset 0x0 -; addi a3, zero, 0x2a -; andi a0, a0, 0xff -; andi a3, a3, 0xff -; bne a0, a3, 0xc -; addi a4, zero, 1 -; j 8 -; mv a4, zero -; andi a3, a4, 0xff -; snez a4, a3 -; neg a0, a4 -; and a3, a1, a0 -; not a4, a0 -; and a0, a2, a4 -; or a0, a3, a0 +; addi a5, a0, -0x2a +; seqz a3, a5 +; andi a0, a3, 0xff +; snez a3, a0 +; neg a4, a3 +; and a0, a1, a4 +; not a3, a4 +; and a4, a2, a3 +; or a0, a0, a4 ; ret function %f(i8, i16, i16) -> i16 { @@ -53,35 +46,28 @@ block0(v0: i8, v1: i16, v2: i16): ; VCode: ; block0: -; li a3,42 -; andi a0,a0,255 -; andi a3,a3,255 -; eq a4,a0,a3##ty=i8 -; andi a3,a4,255 -; sltu a4,zero,a3 -; sub a0,zero,a4 -; and a3,a1,a0 -; not a4,a0 -; and a0,a2,a4 -; or a0,a3,a0 +; addi a5,a0,-42 +; seqz a3,a5 +; andi a0,a3,255 +; sltu a3,zero,a0 +; sub a4,zero,a3 +; and a0,a1,a4 +; not a3,a4 +; and a4,a2,a3 +; or a0,a0,a4 ; ret ; ; Disassembled: ; block0: ; offset 0x0 -; addi a3, zero, 0x2a -; andi a0, a0, 0xff -; andi a3, a3, 0xff -; bne a0, a3, 0xc -; addi a4, zero, 1 -; j 8 -; mv a4, zero -; andi a3, a4, 0xff -; snez a4, a3 -; neg a0, a4 -; and a3, a1, a0 -; not a4, a0 -; and a0, a2, a4 -; or a0, a3, a0 +; addi a5, a0, -0x2a +; seqz a3, a5 +; andi a0, a3, 0xff +; snez a3, a0 +; neg a4, a3 +; and a0, a1, a4 +; not a3, a4 +; and a4, a2, a3 +; or a0, a0, a4 ; ret function %f(i8, i32, i32) -> i32 { @@ -94,35 +80,28 @@ block0(v0: i8, v1: i32, v2: i32): ; VCode: ; block0: -; li a3,42 -; andi a0,a0,255 -; andi a3,a3,255 -; eq a4,a0,a3##ty=i8 -; andi a3,a4,255 -; sltu a4,zero,a3 -; sub a0,zero,a4 -; and a3,a1,a0 -; not a4,a0 -; and a0,a2,a4 -; or a0,a3,a0 +; addi a5,a0,-42 +; seqz a3,a5 +; andi a0,a3,255 +; sltu a3,zero,a0 +; sub a4,zero,a3 +; and a0,a1,a4 +; not a3,a4 +; and a4,a2,a3 +; or a0,a0,a4 ; ret ; ; Disassembled: ; block0: ; offset 0x0 -; addi a3, zero, 0x2a -; andi a0, a0, 0xff -; andi a3, a3, 0xff -; bne a0, a3, 0xc -; addi a4, zero, 1 -; j 8 -; mv a4, zero -; andi a3, a4, 0xff -; snez a4, a3 -; neg a0, a4 -; and a3, a1, a0 -; not a4, a0 -; and a0, a2, a4 -; or a0, a3, a0 +; addi a5, a0, -0x2a +; seqz a3, a5 +; andi a0, a3, 0xff +; snez a3, a0 +; neg a4, a3 +; and a0, a1, a4 +; not a3, a4 +; and a4, a2, a3 +; or a0, a0, a4 ; ret function %f(i8, i64, i64) -> i64 { @@ -135,35 +114,28 @@ block0(v0: i8, v1: i64, v2: i64): ; VCode: ; block0: -; li a3,42 -; andi a0,a0,255 -; andi a3,a3,255 -; eq a4,a0,a3##ty=i8 -; andi a3,a4,255 -; sltu a4,zero,a3 -; sub a0,zero,a4 -; and a3,a1,a0 -; not a4,a0 -; and a0,a2,a4 -; or a0,a3,a0 +; addi a5,a0,-42 +; seqz a3,a5 +; andi a0,a3,255 +; sltu a3,zero,a0 +; sub a4,zero,a3 +; and a0,a1,a4 +; not a3,a4 +; and a4,a2,a3 +; or a0,a0,a4 ; ret ; ; Disassembled: ; block0: ; offset 0x0 -; addi a3, zero, 0x2a -; andi a0, a0, 0xff -; andi a3, a3, 0xff -; bne a0, a3, 0xc -; addi a4, zero, 1 -; j 8 -; mv a4, zero -; andi a3, a4, 0xff -; snez a4, a3 -; neg a0, a4 -; and a3, a1, a0 -; not a4, a0 -; and a0, a2, a4 -; or a0, a3, a0 +; addi a5, a0, -0x2a +; seqz a3, a5 +; andi a0, a3, 0xff +; snez a3, a0 +; neg a4, a3 +; and a0, a1, a4 +; not a3, a4 +; and a4, a2, a3 +; or a0, a0, a4 ; ret function %f(i8, i128, i128) -> i128 { @@ -179,26 +151,24 @@ block0(v0: i8, v1: i128, v2: i128): ; sd ra,8(sp) ; sd fp,0(sp) ; mv fp,sp -; sd s8,-8(sp) +; sd s6,-8(sp) ; add sp,-16 ; block0: -; li a5,42 -; andi a0,a0,255 +; addi a5,a0,-42 +; seqz a5,a5 ; andi a5,a5,255 -; eq a5,a0,a5##ty=i8 -; andi a5,a5,255 -; sltu a0,zero,a5 -; sub s8,zero,a0 -; and a5,a1,s8 -; and a1,a2,s8 -; not a0,s8 -; not a2,s8 -; and a0,a3,a0 -; and a2,a4,a2 -; or a0,a5,a0 -; or a1,a1,a2 +; sltu a5,zero,a5 +; sub s6,zero,a5 +; and a0,a1,s6 +; and a5,a2,s6 +; not a2,s6 +; not a1,s6 +; and a2,a3,a2 +; and a1,a4,a1 +; or a0,a0,a2 +; or a1,a5,a1 ; add sp,+16 -; ld s8,-8(sp) +; ld s6,-8(sp) ; ld ra,8(sp) ; ld fp,0(sp) ; add sp,+16 @@ -210,29 +180,24 @@ block0(v0: i8, v1: i128, v2: i128): ; sd ra, 8(sp) ; sd s0, 0(sp) ; mv s0, sp -; sd s8, -8(sp) +; sd s6, -8(sp) ; addi sp, sp, -0x10 ; block1: ; offset 0x18 -; addi a5, zero, 0x2a -; andi a0, a0, 0xff -; andi a5, a5, 0xff -; bne a0, a5, 0xc -; addi a5, zero, 1 -; j 8 -; mv a5, zero +; addi a5, a0, -0x2a +; seqz a5, a5 ; andi a5, a5, 0xff -; snez a0, a5 -; neg s8, a0 -; and a5, a1, s8 -; and a1, a2, s8 -; not a0, s8 -; not a2, s8 -; and a0, a3, a0 -; and a2, a4, a2 -; or a0, a5, a0 -; or a1, a1, a2 +; snez a5, a5 +; neg s6, a5 +; and a0, a1, s6 +; and a5, a2, s6 +; not a2, s6 +; not a1, s6 +; and a2, a3, a2 +; and a1, a4, a1 +; or a0, a0, a2 +; or a1, a5, a1 ; addi sp, sp, 0x10 -; ld s8, -8(sp) +; ld s6, -8(sp) ; ld ra, 8(sp) ; ld s0, 0(sp) ; addi sp, sp, 0x10 @@ -248,39 +213,28 @@ block0(v0: i16, v1: i8, v2: i8): ; VCode: ; block0: -; li a4,42 -; slli a0,a0,48 -; srli a3,a0,48 -; slli a4,a4,48 -; srli a0,a4,48 -; eq a3,a3,a0##ty=i16 -; andi a4,a3,255 -; sltu a0,zero,a4 -; sub a3,zero,a0 -; and a4,a1,a3 -; not a0,a3 -; and a2,a2,a0 -; or a0,a4,a2 +; addi a5,a0,-42 +; seqz a3,a5 +; andi a0,a3,255 +; sltu a3,zero,a0 +; sub a4,zero,a3 +; and a0,a1,a4 +; not a3,a4 +; and a4,a2,a3 +; or a0,a0,a4 ; ret ; ; Disassembled: ; block0: ; offset 0x0 -; addi a4, zero, 0x2a -; slli a0, a0, 0x30 -; srli a3, a0, 0x30 -; slli a4, a4, 0x30 -; srli a0, a4, 0x30 -; bne a3, a0, 0xc -; addi a3, zero, 1 -; j 8 -; mv a3, zero -; andi a4, a3, 0xff -; snez a0, a4 -; neg a3, a0 -; and a4, a1, a3 -; not a0, a3 -; and a2, a2, a0 -; or a0, a4, a2 +; addi a5, a0, -0x2a +; seqz a3, a5 +; andi a0, a3, 0xff +; snez a3, a0 +; neg a4, a3 +; and a0, a1, a4 +; not a3, a4 +; and a4, a2, a3 +; or a0, a0, a4 ; ret function %f(i16, i16, i16) -> i16 { @@ -293,39 +247,28 @@ block0(v0: i16, v1: i16, v2: i16): ; VCode: ; block0: -; li a4,42 -; slli a0,a0,48 -; srli a3,a0,48 -; slli a4,a4,48 -; srli a0,a4,48 -; eq a3,a3,a0##ty=i16 -; andi a4,a3,255 -; sltu a0,zero,a4 -; sub a3,zero,a0 -; and a4,a1,a3 -; not a0,a3 -; and a2,a2,a0 -; or a0,a4,a2 +; addi a5,a0,-42 +; seqz a3,a5 +; andi a0,a3,255 +; sltu a3,zero,a0 +; sub a4,zero,a3 +; and a0,a1,a4 +; not a3,a4 +; and a4,a2,a3 +; or a0,a0,a4 ; ret ; ; Disassembled: ; block0: ; offset 0x0 -; addi a4, zero, 0x2a -; slli a0, a0, 0x30 -; srli a3, a0, 0x30 -; slli a4, a4, 0x30 -; srli a0, a4, 0x30 -; bne a3, a0, 0xc -; addi a3, zero, 1 -; j 8 -; mv a3, zero -; andi a4, a3, 0xff -; snez a0, a4 -; neg a3, a0 -; and a4, a1, a3 -; not a0, a3 -; and a2, a2, a0 -; or a0, a4, a2 +; addi a5, a0, -0x2a +; seqz a3, a5 +; andi a0, a3, 0xff +; snez a3, a0 +; neg a4, a3 +; and a0, a1, a4 +; not a3, a4 +; and a4, a2, a3 +; or a0, a0, a4 ; ret function %f(i16, i32, i32) -> i32 { @@ -338,39 +281,28 @@ block0(v0: i16, v1: i32, v2: i32): ; VCode: ; block0: -; li a4,42 -; slli a0,a0,48 -; srli a3,a0,48 -; slli a4,a4,48 -; srli a0,a4,48 -; eq a3,a3,a0##ty=i16 -; andi a4,a3,255 -; sltu a0,zero,a4 -; sub a3,zero,a0 -; and a4,a1,a3 -; not a0,a3 -; and a2,a2,a0 -; or a0,a4,a2 +; addi a5,a0,-42 +; seqz a3,a5 +; andi a0,a3,255 +; sltu a3,zero,a0 +; sub a4,zero,a3 +; and a0,a1,a4 +; not a3,a4 +; and a4,a2,a3 +; or a0,a0,a4 ; ret ; ; Disassembled: ; block0: ; offset 0x0 -; addi a4, zero, 0x2a -; slli a0, a0, 0x30 -; srli a3, a0, 0x30 -; slli a4, a4, 0x30 -; srli a0, a4, 0x30 -; bne a3, a0, 0xc -; addi a3, zero, 1 -; j 8 -; mv a3, zero -; andi a4, a3, 0xff -; snez a0, a4 -; neg a3, a0 -; and a4, a1, a3 -; not a0, a3 -; and a2, a2, a0 -; or a0, a4, a2 +; addi a5, a0, -0x2a +; seqz a3, a5 +; andi a0, a3, 0xff +; snez a3, a0 +; neg a4, a3 +; and a0, a1, a4 +; not a3, a4 +; and a4, a2, a3 +; or a0, a0, a4 ; ret function %f(i16, i64, i64) -> i64 { @@ -383,39 +315,28 @@ block0(v0: i16, v1: i64, v2: i64): ; VCode: ; block0: -; li a4,42 -; slli a0,a0,48 -; srli a3,a0,48 -; slli a4,a4,48 -; srli a0,a4,48 -; eq a3,a3,a0##ty=i16 -; andi a4,a3,255 -; sltu a0,zero,a4 -; sub a3,zero,a0 -; and a4,a1,a3 -; not a0,a3 -; and a2,a2,a0 -; or a0,a4,a2 +; addi a5,a0,-42 +; seqz a3,a5 +; andi a0,a3,255 +; sltu a3,zero,a0 +; sub a4,zero,a3 +; and a0,a1,a4 +; not a3,a4 +; and a4,a2,a3 +; or a0,a0,a4 ; ret ; ; Disassembled: ; block0: ; offset 0x0 -; addi a4, zero, 0x2a -; slli a0, a0, 0x30 -; srli a3, a0, 0x30 -; slli a4, a4, 0x30 -; srli a0, a4, 0x30 -; bne a3, a0, 0xc -; addi a3, zero, 1 -; j 8 -; mv a3, zero -; andi a4, a3, 0xff -; snez a0, a4 -; neg a3, a0 -; and a4, a1, a3 -; not a0, a3 -; and a2, a2, a0 -; or a0, a4, a2 +; addi a5, a0, -0x2a +; seqz a3, a5 +; andi a0, a3, 0xff +; snez a3, a0 +; neg a4, a3 +; and a0, a1, a4 +; not a3, a4 +; and a4, a2, a3 +; or a0, a0, a4 ; ret function %f(i16, i128, i128) -> i128 { @@ -427,48 +348,60 @@ block0(v0: i16, v1: i128, v2: i128): } ; VCode: +; add sp,-16 +; sd ra,8(sp) +; sd fp,0(sp) +; mv fp,sp +; sd s6,-8(sp) +; add sp,-16 ; block0: -; li a5,42 -; slli a0,a0,48 -; srli a0,a0,48 -; slli a5,a5,48 -; srli a5,a5,48 -; eq a5,a0,a5##ty=i16 -; andi a0,a5,255 -; sltu a5,zero,a0 -; sub a5,zero,a5 -; and a0,a1,a5 -; and a2,a2,a5 -; not a7,a5 -; not a1,a5 -; and a3,a3,a7 -; and a4,a4,a1 -; or a0,a0,a3 -; or a1,a2,a4 +; addi a5,a0,-42 +; seqz a5,a5 +; andi a5,a5,255 +; sltu a5,zero,a5 +; sub s6,zero,a5 +; and a0,a1,s6 +; and a5,a2,s6 +; not a2,s6 +; not a1,s6 +; and a2,a3,a2 +; and a1,a4,a1 +; or a0,a0,a2 +; or a1,a5,a1 +; add sp,+16 +; ld s6,-8(sp) +; ld ra,8(sp) +; ld fp,0(sp) +; add sp,+16 ; ret ; ; Disassembled: ; block0: ; offset 0x0 -; addi a5, zero, 0x2a -; slli a0, a0, 0x30 -; srli a0, a0, 0x30 -; slli a5, a5, 0x30 -; srli a5, a5, 0x30 -; bne a0, a5, 0xc -; addi a5, zero, 1 -; j 8 -; mv a5, zero -; andi a0, a5, 0xff -; snez a5, a0 -; neg a5, a5 -; and a0, a1, a5 -; and a2, a2, a5 -; not a7, a5 -; not a1, a5 -; and a3, a3, a7 -; and a4, a4, a1 -; or a0, a0, a3 -; or a1, a2, a4 +; addi sp, sp, -0x10 +; sd ra, 8(sp) +; sd s0, 0(sp) +; mv s0, sp +; sd s6, -8(sp) +; addi sp, sp, -0x10 +; block1: ; offset 0x18 +; addi a5, a0, -0x2a +; seqz a5, a5 +; andi a5, a5, 0xff +; snez a5, a5 +; neg s6, a5 +; and a0, a1, s6 +; and a5, a2, s6 +; not a2, s6 +; not a1, s6 +; and a2, a3, a2 +; and a1, a4, a1 +; or a0, a0, a2 +; or a1, a5, a1 +; addi sp, sp, 0x10 +; ld s6, -8(sp) +; ld ra, 8(sp) +; ld s0, 0(sp) +; addi sp, sp, 0x10 ; ret function %f(i32, i8, i8) -> i8 { @@ -481,39 +414,28 @@ block0(v0: i32, v1: i8, v2: i8): ; VCode: ; block0: -; li a4,42 -; slli a0,a0,32 -; srli a3,a0,32 -; slli a4,a4,32 -; srli a0,a4,32 -; eq a3,a3,a0##ty=i32 -; andi a4,a3,255 -; sltu a0,zero,a4 -; sub a3,zero,a0 -; and a4,a1,a3 -; not a0,a3 -; and a2,a2,a0 -; or a0,a4,a2 +; addi a5,a0,-42 +; seqz a3,a5 +; andi a0,a3,255 +; sltu a3,zero,a0 +; sub a4,zero,a3 +; and a0,a1,a4 +; not a3,a4 +; and a4,a2,a3 +; or a0,a0,a4 ; ret ; ; Disassembled: ; block0: ; offset 0x0 -; addi a4, zero, 0x2a -; slli a0, a0, 0x20 -; srli a3, a0, 0x20 -; slli a4, a4, 0x20 -; srli a0, a4, 0x20 -; bne a3, a0, 0xc -; addi a3, zero, 1 -; j 8 -; mv a3, zero -; andi a4, a3, 0xff -; snez a0, a4 -; neg a3, a0 -; and a4, a1, a3 -; not a0, a3 -; and a2, a2, a0 -; or a0, a4, a2 +; addi a5, a0, -0x2a +; seqz a3, a5 +; andi a0, a3, 0xff +; snez a3, a0 +; neg a4, a3 +; and a0, a1, a4 +; not a3, a4 +; and a4, a2, a3 +; or a0, a0, a4 ; ret function %f(i32, i16, i16) -> i16 { @@ -526,39 +448,28 @@ block0(v0: i32, v1: i16, v2: i16): ; VCode: ; block0: -; li a4,42 -; slli a0,a0,32 -; srli a3,a0,32 -; slli a4,a4,32 -; srli a0,a4,32 -; eq a3,a3,a0##ty=i32 -; andi a4,a3,255 -; sltu a0,zero,a4 -; sub a3,zero,a0 -; and a4,a1,a3 -; not a0,a3 -; and a2,a2,a0 -; or a0,a4,a2 +; addi a5,a0,-42 +; seqz a3,a5 +; andi a0,a3,255 +; sltu a3,zero,a0 +; sub a4,zero,a3 +; and a0,a1,a4 +; not a3,a4 +; and a4,a2,a3 +; or a0,a0,a4 ; ret ; ; Disassembled: ; block0: ; offset 0x0 -; addi a4, zero, 0x2a -; slli a0, a0, 0x20 -; srli a3, a0, 0x20 -; slli a4, a4, 0x20 -; srli a0, a4, 0x20 -; bne a3, a0, 0xc -; addi a3, zero, 1 -; j 8 -; mv a3, zero -; andi a4, a3, 0xff -; snez a0, a4 -; neg a3, a0 -; and a4, a1, a3 -; not a0, a3 -; and a2, a2, a0 -; or a0, a4, a2 +; addi a5, a0, -0x2a +; seqz a3, a5 +; andi a0, a3, 0xff +; snez a3, a0 +; neg a4, a3 +; and a0, a1, a4 +; not a3, a4 +; and a4, a2, a3 +; or a0, a0, a4 ; ret function %f(i32, i32, i32) -> i32 { @@ -571,39 +482,28 @@ block0(v0: i32, v1: i32, v2: i32): ; VCode: ; block0: -; li a4,42 -; slli a0,a0,32 -; srli a3,a0,32 -; slli a4,a4,32 -; srli a0,a4,32 -; eq a3,a3,a0##ty=i32 -; andi a4,a3,255 -; sltu a0,zero,a4 -; sub a3,zero,a0 -; and a4,a1,a3 -; not a0,a3 -; and a2,a2,a0 -; or a0,a4,a2 +; addi a5,a0,-42 +; seqz a3,a5 +; andi a0,a3,255 +; sltu a3,zero,a0 +; sub a4,zero,a3 +; and a0,a1,a4 +; not a3,a4 +; and a4,a2,a3 +; or a0,a0,a4 ; ret ; ; Disassembled: ; block0: ; offset 0x0 -; addi a4, zero, 0x2a -; slli a0, a0, 0x20 -; srli a3, a0, 0x20 -; slli a4, a4, 0x20 -; srli a0, a4, 0x20 -; bne a3, a0, 0xc -; addi a3, zero, 1 -; j 8 -; mv a3, zero -; andi a4, a3, 0xff -; snez a0, a4 -; neg a3, a0 -; and a4, a1, a3 -; not a0, a3 -; and a2, a2, a0 -; or a0, a4, a2 +; addi a5, a0, -0x2a +; seqz a3, a5 +; andi a0, a3, 0xff +; snez a3, a0 +; neg a4, a3 +; and a0, a1, a4 +; not a3, a4 +; and a4, a2, a3 +; or a0, a0, a4 ; ret function %f(i32, i64, i64) -> i64 { @@ -616,39 +516,28 @@ block0(v0: i32, v1: i64, v2: i64): ; VCode: ; block0: -; li a4,42 -; slli a0,a0,32 -; srli a3,a0,32 -; slli a4,a4,32 -; srli a0,a4,32 -; eq a3,a3,a0##ty=i32 -; andi a4,a3,255 -; sltu a0,zero,a4 -; sub a3,zero,a0 -; and a4,a1,a3 -; not a0,a3 -; and a2,a2,a0 -; or a0,a4,a2 +; addi a5,a0,-42 +; seqz a3,a5 +; andi a0,a3,255 +; sltu a3,zero,a0 +; sub a4,zero,a3 +; and a0,a1,a4 +; not a3,a4 +; and a4,a2,a3 +; or a0,a0,a4 ; ret ; ; Disassembled: ; block0: ; offset 0x0 -; addi a4, zero, 0x2a -; slli a0, a0, 0x20 -; srli a3, a0, 0x20 -; slli a4, a4, 0x20 -; srli a0, a4, 0x20 -; bne a3, a0, 0xc -; addi a3, zero, 1 -; j 8 -; mv a3, zero -; andi a4, a3, 0xff -; snez a0, a4 -; neg a3, a0 -; and a4, a1, a3 -; not a0, a3 -; and a2, a2, a0 -; or a0, a4, a2 +; addi a5, a0, -0x2a +; seqz a3, a5 +; andi a0, a3, 0xff +; snez a3, a0 +; neg a4, a3 +; and a0, a1, a4 +; not a3, a4 +; and a4, a2, a3 +; or a0, a0, a4 ; ret function %f(i32, i128, i128) -> i128 { @@ -660,48 +549,60 @@ block0(v0: i32, v1: i128, v2: i128): } ; VCode: +; add sp,-16 +; sd ra,8(sp) +; sd fp,0(sp) +; mv fp,sp +; sd s6,-8(sp) +; add sp,-16 ; block0: -; li a5,42 -; slli a0,a0,32 -; srli a0,a0,32 -; slli a5,a5,32 -; srli a5,a5,32 -; eq a5,a0,a5##ty=i32 -; andi a0,a5,255 -; sltu a5,zero,a0 -; sub a5,zero,a5 -; and a0,a1,a5 -; and a2,a2,a5 -; not a7,a5 -; not a1,a5 -; and a3,a3,a7 -; and a4,a4,a1 -; or a0,a0,a3 -; or a1,a2,a4 +; addi a5,a0,-42 +; seqz a5,a5 +; andi a5,a5,255 +; sltu a5,zero,a5 +; sub s6,zero,a5 +; and a0,a1,s6 +; and a5,a2,s6 +; not a2,s6 +; not a1,s6 +; and a2,a3,a2 +; and a1,a4,a1 +; or a0,a0,a2 +; or a1,a5,a1 +; add sp,+16 +; ld s6,-8(sp) +; ld ra,8(sp) +; ld fp,0(sp) +; add sp,+16 ; ret ; ; Disassembled: ; block0: ; offset 0x0 -; addi a5, zero, 0x2a -; slli a0, a0, 0x20 -; srli a0, a0, 0x20 -; slli a5, a5, 0x20 -; srli a5, a5, 0x20 -; bne a0, a5, 0xc -; addi a5, zero, 1 -; j 8 -; mv a5, zero -; andi a0, a5, 0xff -; snez a5, a0 -; neg a5, a5 -; and a0, a1, a5 -; and a2, a2, a5 -; not a7, a5 -; not a1, a5 -; and a3, a3, a7 -; and a4, a4, a1 -; or a0, a0, a3 -; or a1, a2, a4 +; addi sp, sp, -0x10 +; sd ra, 8(sp) +; sd s0, 0(sp) +; mv s0, sp +; sd s6, -8(sp) +; addi sp, sp, -0x10 +; block1: ; offset 0x18 +; addi a5, a0, -0x2a +; seqz a5, a5 +; andi a5, a5, 0xff +; snez a5, a5 +; neg s6, a5 +; and a0, a1, s6 +; and a5, a2, s6 +; not a2, s6 +; not a1, s6 +; and a2, a3, a2 +; and a1, a4, a1 +; or a0, a0, a2 +; or a1, a5, a1 +; addi sp, sp, 0x10 +; ld s6, -8(sp) +; ld ra, 8(sp) +; ld s0, 0(sp) +; addi sp, sp, 0x10 ; ret function %f(i64, i8, i8) -> i8 { @@ -714,9 +615,9 @@ block0(v0: i64, v1: i8, v2: i8): ; VCode: ; block0: -; li a3,42 -; eq a0,a0,a3##ty=i64 -; andi a0,a0,255 +; addi a5,a0,-42 +; seqz a3,a5 +; andi a0,a3,255 ; sltu a3,zero,a0 ; sub a4,zero,a3 ; and a0,a1,a4 @@ -727,12 +628,9 @@ block0(v0: i64, v1: i8, v2: i8): ; ; Disassembled: ; block0: ; offset 0x0 -; addi a3, zero, 0x2a -; bne a0, a3, 0xc -; addi a0, zero, 1 -; j 8 -; mv a0, zero -; andi a0, a0, 0xff +; addi a5, a0, -0x2a +; seqz a3, a5 +; andi a0, a3, 0xff ; snez a3, a0 ; neg a4, a3 ; and a0, a1, a4 @@ -751,9 +649,9 @@ block0(v0: i64, v1: i16, v2: i16): ; VCode: ; block0: -; li a3,42 -; eq a0,a0,a3##ty=i64 -; andi a0,a0,255 +; addi a5,a0,-42 +; seqz a3,a5 +; andi a0,a3,255 ; sltu a3,zero,a0 ; sub a4,zero,a3 ; and a0,a1,a4 @@ -764,12 +662,9 @@ block0(v0: i64, v1: i16, v2: i16): ; ; Disassembled: ; block0: ; offset 0x0 -; addi a3, zero, 0x2a -; bne a0, a3, 0xc -; addi a0, zero, 1 -; j 8 -; mv a0, zero -; andi a0, a0, 0xff +; addi a5, a0, -0x2a +; seqz a3, a5 +; andi a0, a3, 0xff ; snez a3, a0 ; neg a4, a3 ; and a0, a1, a4 @@ -788,9 +683,9 @@ block0(v0: i64, v1: i32, v2: i32): ; VCode: ; block0: -; li a3,42 -; eq a0,a0,a3##ty=i64 -; andi a0,a0,255 +; addi a5,a0,-42 +; seqz a3,a5 +; andi a0,a3,255 ; sltu a3,zero,a0 ; sub a4,zero,a3 ; and a0,a1,a4 @@ -801,12 +696,9 @@ block0(v0: i64, v1: i32, v2: i32): ; ; Disassembled: ; block0: ; offset 0x0 -; addi a3, zero, 0x2a -; bne a0, a3, 0xc -; addi a0, zero, 1 -; j 8 -; mv a0, zero -; andi a0, a0, 0xff +; addi a5, a0, -0x2a +; seqz a3, a5 +; andi a0, a3, 0xff ; snez a3, a0 ; neg a4, a3 ; and a0, a1, a4 @@ -825,9 +717,9 @@ block0(v0: i64, v1: i64, v2: i64): ; VCode: ; block0: -; li a3,42 -; eq a0,a0,a3##ty=i64 -; andi a0,a0,255 +; addi a5,a0,-42 +; seqz a3,a5 +; andi a0,a3,255 ; sltu a3,zero,a0 ; sub a4,zero,a3 ; and a0,a1,a4 @@ -838,12 +730,9 @@ block0(v0: i64, v1: i64, v2: i64): ; ; Disassembled: ; block0: ; offset 0x0 -; addi a3, zero, 0x2a -; bne a0, a3, 0xc -; addi a0, zero, 1 -; j 8 -; mv a0, zero -; andi a0, a0, 0xff +; addi a5, a0, -0x2a +; seqz a3, a5 +; andi a0, a3, 0xff ; snez a3, a0 ; neg a4, a3 ; and a0, a1, a4 @@ -868,9 +757,9 @@ block0(v0: i64, v1: i128, v2: i128): ; sd s6,-8(sp) ; add sp,-16 ; block0: -; li a5,42 -; eq a0,a0,a5##ty=i64 -; andi a5,a0,255 +; addi a5,a0,-42 +; seqz a5,a5 +; andi a5,a5,255 ; sltu a5,zero,a5 ; sub s6,zero,a5 ; and a0,a1,s6 @@ -897,12 +786,9 @@ block0(v0: i64, v1: i128, v2: i128): ; sd s6, -8(sp) ; addi sp, sp, -0x10 ; block1: ; offset 0x18 -; addi a5, zero, 0x2a -; bne a0, a5, 0xc -; addi a0, zero, 1 -; j 8 -; mv a0, zero -; andi a5, a0, 0xff +; addi a5, a0, -0x2a +; seqz a5, a5 +; andi a5, a5, 0xff ; snez a5, a5 ; neg s6, a5 ; and a0, a1, s6 diff --git a/cranelift/filetests/filetests/isa/riscv64/wasm/load_store_dynamic_kind_i32_index_0_guard_yes_spectre_i32_access_0_offset.wat b/cranelift/filetests/filetests/isa/riscv64/wasm/load_store_dynamic_kind_i32_index_0_guard_yes_spectre_i32_access_0_offset.wat index cb4a70a8404b..7697271dbb11 100644 --- a/cranelift/filetests/filetests/isa/riscv64/wasm/load_store_dynamic_kind_i32_index_0_guard_yes_spectre_i32_access_0_offset.wat +++ b/cranelift/filetests/filetests/isa/riscv64/wasm/load_store_dynamic_kind_i32_index_0_guard_yes_spectre_i32_access_0_offset.wat @@ -45,7 +45,7 @@ ;; srli a3,a5,32 ;; ld a0,8(a2) ;; addi a0,a0,-4 -;; ugt a4,a3,a0##ty=i64 +;; sltu a4,a0,a3 ;; ld a0,0(a2) ;; add a0,a0,a3 ;; li a2,0 @@ -67,7 +67,7 @@ ;; srli a2,a5,32 ;; ld a0,8(a1) ;; addi a0,a0,-4 -;; ugt a3,a2,a0##ty=i64 +;; sltu a3,a0,a2 ;; ld a0,0(a1) ;; add a0,a0,a2 ;; li a1,0 diff --git a/cranelift/filetests/filetests/isa/riscv64/wasm/load_store_dynamic_kind_i32_index_0_guard_yes_spectre_i32_access_0x1000_offset.wat b/cranelift/filetests/filetests/isa/riscv64/wasm/load_store_dynamic_kind_i32_index_0_guard_yes_spectre_i32_access_0x1000_offset.wat index 7beb856e6ac4..a4a57c0706bc 100644 --- a/cranelift/filetests/filetests/isa/riscv64/wasm/load_store_dynamic_kind_i32_index_0_guard_yes_spectre_i32_access_0x1000_offset.wat +++ b/cranelift/filetests/filetests/isa/riscv64/wasm/load_store_dynamic_kind_i32_index_0_guard_yes_spectre_i32_access_0x1000_offset.wat @@ -47,7 +47,7 @@ ;; lui a3,-1 ;; addi a0,a3,-4 ;; add a4,a4,a0 -;; ugt a0,a5,a4##ty=i64 +;; sltu a0,a4,a5 ;; ld a4,0(a2) ;; add a4,a4,a5 ;; lui a5,1 @@ -73,7 +73,7 @@ ;; lui a3,-1 ;; addi a0,a3,-4 ;; add a4,a4,a0 -;; ugt a0,a5,a4##ty=i64 +;; sltu a0,a4,a5 ;; ld a4,0(a1) ;; add a4,a4,a5 ;; lui a5,1 diff --git a/cranelift/filetests/filetests/isa/riscv64/wasm/load_store_dynamic_kind_i32_index_0_guard_yes_spectre_i32_access_0xffff0000_offset.wat b/cranelift/filetests/filetests/isa/riscv64/wasm/load_store_dynamic_kind_i32_index_0_guard_yes_spectre_i32_access_0xffff0000_offset.wat index 22d4a1643b34..1b6abfbeeae8 100644 --- a/cranelift/filetests/filetests/isa/riscv64/wasm/load_store_dynamic_kind_i32_index_0_guard_yes_spectre_i32_access_0xffff0000_offset.wat +++ b/cranelift/filetests/filetests/isa/riscv64/wasm/load_store_dynamic_kind_i32_index_0_guard_yes_spectre_i32_access_0xffff0000_offset.wat @@ -48,7 +48,7 @@ ;; ult a0,a3,a5##ty=i64 ;; trap_if heap_oob##(a0 ne zero) ;; ld a0,8(a2) -;; ugt a0,a3,a0##ty=i64 +;; sltu a0,a0,a3 ;; ld a2,0(a2) ;; add a5,a2,a5 ;; ld a2,[const(0)] @@ -75,7 +75,7 @@ ;; ult a0,a3,a5##ty=i64 ;; trap_if heap_oob##(a0 ne zero) ;; ld a0,8(a1) -;; ugt a0,a3,a0##ty=i64 +;; sltu a0,a0,a3 ;; ld a1,0(a1) ;; add a5,a1,a5 ;; ld a1,[const(0)] diff --git a/cranelift/filetests/filetests/isa/riscv64/wasm/load_store_dynamic_kind_i32_index_0_guard_yes_spectre_i8_access_0_offset.wat b/cranelift/filetests/filetests/isa/riscv64/wasm/load_store_dynamic_kind_i32_index_0_guard_yes_spectre_i8_access_0_offset.wat index 549c75ada61b..254d9f3ec5c6 100644 --- a/cranelift/filetests/filetests/isa/riscv64/wasm/load_store_dynamic_kind_i32_index_0_guard_yes_spectre_i8_access_0_offset.wat +++ b/cranelift/filetests/filetests/isa/riscv64/wasm/load_store_dynamic_kind_i32_index_0_guard_yes_spectre_i8_access_0_offset.wat @@ -41,42 +41,44 @@ ;; function u0:0: ;; block0: -;; slli a4,a0,32 -;; srli a0,a4,32 -;; ld a5,8(a2) -;; uge a3,a0,a5##ty=i64 -;; ld a5,0(a2) -;; add a5,a5,a0 -;; li a0,0 -;; andi a2,a3,255 -;; sltu a2,zero,a2 -;; sub a3,zero,a2 -;; and a0,a0,a3 -;; not a2,a3 -;; and a3,a5,a2 -;; or a5,a0,a3 -;; sb a1,0(a5) +;; slli a5,a0,32 +;; srli a3,a5,32 +;; ld a0,8(a2) +;; sltu a5,a3,a0 +;; xori a4,a5,1 +;; ld a0,0(a2) +;; add a0,a0,a3 +;; li a2,0 +;; andi a3,a4,255 +;; sltu a3,zero,a3 +;; sub a4,zero,a3 +;; and a2,a2,a4 +;; not a3,a4 +;; and a4,a0,a3 +;; or a0,a2,a4 +;; sb a1,0(a0) ;; j label1 ;; block1: ;; ret ;; ;; function u0:1: ;; block0: -;; slli a4,a0,32 -;; srli a0,a4,32 -;; ld a5,8(a1) -;; uge a2,a0,a5##ty=i64 -;; ld a5,0(a1) -;; add a5,a5,a0 -;; li a0,0 -;; andi a1,a2,255 -;; sltu a1,zero,a1 -;; sub a3,zero,a1 -;; and a0,a0,a3 -;; not a1,a3 -;; and a3,a5,a1 -;; or a5,a0,a3 -;; lbu a0,0(a5) +;; slli a5,a0,32 +;; srli a2,a5,32 +;; ld a0,8(a1) +;; sltu a5,a2,a0 +;; xori a3,a5,1 +;; ld a0,0(a1) +;; add a0,a0,a2 +;; li a1,0 +;; andi a2,a3,255 +;; sltu a2,zero,a2 +;; sub a4,zero,a2 +;; and a1,a1,a4 +;; not a2,a4 +;; and a4,a0,a2 +;; or a0,a1,a4 +;; lbu a0,0(a0) ;; j label1 ;; block1: ;; ret diff --git a/cranelift/filetests/filetests/isa/riscv64/wasm/load_store_dynamic_kind_i32_index_0_guard_yes_spectre_i8_access_0x1000_offset.wat b/cranelift/filetests/filetests/isa/riscv64/wasm/load_store_dynamic_kind_i32_index_0_guard_yes_spectre_i8_access_0x1000_offset.wat index f5e443f3788f..ce48eeec9e18 100644 --- a/cranelift/filetests/filetests/isa/riscv64/wasm/load_store_dynamic_kind_i32_index_0_guard_yes_spectre_i8_access_0x1000_offset.wat +++ b/cranelift/filetests/filetests/isa/riscv64/wasm/load_store_dynamic_kind_i32_index_0_guard_yes_spectre_i8_access_0x1000_offset.wat @@ -47,7 +47,7 @@ ;; lui a3,-1 ;; addi a0,a3,-1 ;; add a4,a4,a0 -;; ugt a0,a5,a4##ty=i64 +;; sltu a0,a4,a5 ;; ld a4,0(a2) ;; add a4,a4,a5 ;; lui a5,1 @@ -73,7 +73,7 @@ ;; lui a3,-1 ;; addi a0,a3,-1 ;; add a4,a4,a0 -;; ugt a0,a5,a4##ty=i64 +;; sltu a0,a4,a5 ;; ld a4,0(a1) ;; add a4,a4,a5 ;; lui a5,1 diff --git a/cranelift/filetests/filetests/isa/riscv64/wasm/load_store_dynamic_kind_i32_index_0_guard_yes_spectre_i8_access_0xffff0000_offset.wat b/cranelift/filetests/filetests/isa/riscv64/wasm/load_store_dynamic_kind_i32_index_0_guard_yes_spectre_i8_access_0xffff0000_offset.wat index 4573556ee391..e50f75e4b6b5 100644 --- a/cranelift/filetests/filetests/isa/riscv64/wasm/load_store_dynamic_kind_i32_index_0_guard_yes_spectre_i8_access_0xffff0000_offset.wat +++ b/cranelift/filetests/filetests/isa/riscv64/wasm/load_store_dynamic_kind_i32_index_0_guard_yes_spectre_i8_access_0xffff0000_offset.wat @@ -48,7 +48,7 @@ ;; ult a0,a3,a5##ty=i64 ;; trap_if heap_oob##(a0 ne zero) ;; ld a0,8(a2) -;; ugt a0,a3,a0##ty=i64 +;; sltu a0,a0,a3 ;; ld a2,0(a2) ;; add a5,a2,a5 ;; ld a2,[const(0)] @@ -75,7 +75,7 @@ ;; ult a0,a3,a5##ty=i64 ;; trap_if heap_oob##(a0 ne zero) ;; ld a0,8(a1) -;; ugt a0,a3,a0##ty=i64 +;; sltu a0,a0,a3 ;; ld a1,0(a1) ;; add a5,a1,a5 ;; ld a1,[const(0)] diff --git a/cranelift/filetests/filetests/isa/riscv64/wasm/load_store_dynamic_kind_i32_index_0xffffffff_guard_yes_spectre_i32_access_0_offset.wat b/cranelift/filetests/filetests/isa/riscv64/wasm/load_store_dynamic_kind_i32_index_0xffffffff_guard_yes_spectre_i32_access_0_offset.wat index 2a551c45a516..408d2c1817b1 100644 --- a/cranelift/filetests/filetests/isa/riscv64/wasm/load_store_dynamic_kind_i32_index_0xffffffff_guard_yes_spectre_i32_access_0_offset.wat +++ b/cranelift/filetests/filetests/isa/riscv64/wasm/load_store_dynamic_kind_i32_index_0xffffffff_guard_yes_spectre_i32_access_0_offset.wat @@ -44,7 +44,7 @@ ;; slli a4,a0,32 ;; srli a0,a4,32 ;; ld a5,8(a2) -;; ugt a3,a0,a5##ty=i64 +;; sltu a3,a5,a0 ;; ld a5,0(a2) ;; add a5,a5,a0 ;; li a0,0 @@ -65,7 +65,7 @@ ;; slli a4,a0,32 ;; srli a0,a4,32 ;; ld a5,8(a1) -;; ugt a2,a0,a5##ty=i64 +;; sltu a2,a5,a0 ;; ld a5,0(a1) ;; add a5,a5,a0 ;; li a0,0 diff --git a/cranelift/filetests/filetests/isa/riscv64/wasm/load_store_dynamic_kind_i32_index_0xffffffff_guard_yes_spectre_i32_access_0x1000_offset.wat b/cranelift/filetests/filetests/isa/riscv64/wasm/load_store_dynamic_kind_i32_index_0xffffffff_guard_yes_spectre_i32_access_0x1000_offset.wat index 44d02911ef90..fab7ebf99f16 100644 --- a/cranelift/filetests/filetests/isa/riscv64/wasm/load_store_dynamic_kind_i32_index_0xffffffff_guard_yes_spectre_i32_access_0x1000_offset.wat +++ b/cranelift/filetests/filetests/isa/riscv64/wasm/load_store_dynamic_kind_i32_index_0xffffffff_guard_yes_spectre_i32_access_0x1000_offset.wat @@ -44,7 +44,7 @@ ;; slli a0,a0,32 ;; srli a4,a0,32 ;; ld a3,8(a2) -;; ugt a3,a4,a3##ty=i64 +;; sltu a3,a3,a4 ;; ld a2,0(a2) ;; add a2,a2,a4 ;; lui a4,1 @@ -67,7 +67,7 @@ ;; slli a0,a0,32 ;; srli a3,a0,32 ;; ld a2,8(a1) -;; ugt a2,a3,a2##ty=i64 +;; sltu a2,a2,a3 ;; ld a1,0(a1) ;; add a1,a1,a3 ;; lui a3,1 diff --git a/cranelift/filetests/filetests/isa/riscv64/wasm/load_store_dynamic_kind_i32_index_0xffffffff_guard_yes_spectre_i32_access_0xffff0000_offset.wat b/cranelift/filetests/filetests/isa/riscv64/wasm/load_store_dynamic_kind_i32_index_0xffffffff_guard_yes_spectre_i32_access_0xffff0000_offset.wat index b8a7173a9229..1209d633d085 100644 --- a/cranelift/filetests/filetests/isa/riscv64/wasm/load_store_dynamic_kind_i32_index_0xffffffff_guard_yes_spectre_i32_access_0xffff0000_offset.wat +++ b/cranelift/filetests/filetests/isa/riscv64/wasm/load_store_dynamic_kind_i32_index_0xffffffff_guard_yes_spectre_i32_access_0xffff0000_offset.wat @@ -44,7 +44,7 @@ ;; slli a0,a0,32 ;; srli a4,a0,32 ;; ld a3,8(a2) -;; ugt a3,a4,a3##ty=i64 +;; sltu a3,a3,a4 ;; ld a2,0(a2) ;; add a2,a2,a4 ;; ld a4,[const(0)] @@ -67,7 +67,7 @@ ;; slli a0,a0,32 ;; srli a3,a0,32 ;; ld a2,8(a1) -;; ugt a2,a3,a2##ty=i64 +;; sltu a2,a2,a3 ;; ld a1,0(a1) ;; add a1,a1,a3 ;; ld a3,[const(0)] diff --git a/cranelift/filetests/filetests/isa/riscv64/wasm/load_store_dynamic_kind_i32_index_0xffffffff_guard_yes_spectre_i8_access_0_offset.wat b/cranelift/filetests/filetests/isa/riscv64/wasm/load_store_dynamic_kind_i32_index_0xffffffff_guard_yes_spectre_i8_access_0_offset.wat index 5719472c2b23..85c149413c41 100644 --- a/cranelift/filetests/filetests/isa/riscv64/wasm/load_store_dynamic_kind_i32_index_0xffffffff_guard_yes_spectre_i8_access_0_offset.wat +++ b/cranelift/filetests/filetests/isa/riscv64/wasm/load_store_dynamic_kind_i32_index_0xffffffff_guard_yes_spectre_i8_access_0_offset.wat @@ -41,42 +41,44 @@ ;; function u0:0: ;; block0: -;; slli a4,a0,32 -;; srli a0,a4,32 -;; ld a5,8(a2) -;; uge a3,a0,a5##ty=i64 -;; ld a5,0(a2) -;; add a5,a5,a0 -;; li a0,0 -;; andi a2,a3,255 -;; sltu a2,zero,a2 -;; sub a3,zero,a2 -;; and a0,a0,a3 -;; not a2,a3 -;; and a3,a5,a2 -;; or a5,a0,a3 -;; sb a1,0(a5) +;; slli a5,a0,32 +;; srli a3,a5,32 +;; ld a0,8(a2) +;; sltu a5,a3,a0 +;; xori a4,a5,1 +;; ld a0,0(a2) +;; add a0,a0,a3 +;; li a2,0 +;; andi a3,a4,255 +;; sltu a3,zero,a3 +;; sub a4,zero,a3 +;; and a2,a2,a4 +;; not a3,a4 +;; and a4,a0,a3 +;; or a0,a2,a4 +;; sb a1,0(a0) ;; j label1 ;; block1: ;; ret ;; ;; function u0:1: ;; block0: -;; slli a4,a0,32 -;; srli a0,a4,32 -;; ld a5,8(a1) -;; uge a2,a0,a5##ty=i64 -;; ld a5,0(a1) -;; add a5,a5,a0 -;; li a0,0 -;; andi a1,a2,255 -;; sltu a1,zero,a1 -;; sub a3,zero,a1 -;; and a0,a0,a3 -;; not a1,a3 -;; and a3,a5,a1 -;; or a5,a0,a3 -;; lbu a0,0(a5) +;; slli a5,a0,32 +;; srli a2,a5,32 +;; ld a0,8(a1) +;; sltu a5,a2,a0 +;; xori a3,a5,1 +;; ld a0,0(a1) +;; add a0,a0,a2 +;; li a1,0 +;; andi a2,a3,255 +;; sltu a2,zero,a2 +;; sub a4,zero,a2 +;; and a1,a1,a4 +;; not a2,a4 +;; and a4,a0,a2 +;; or a0,a1,a4 +;; lbu a0,0(a0) ;; j label1 ;; block1: ;; ret diff --git a/cranelift/filetests/filetests/isa/riscv64/wasm/load_store_dynamic_kind_i32_index_0xffffffff_guard_yes_spectre_i8_access_0x1000_offset.wat b/cranelift/filetests/filetests/isa/riscv64/wasm/load_store_dynamic_kind_i32_index_0xffffffff_guard_yes_spectre_i8_access_0x1000_offset.wat index f6d8b7c29431..6eb64d6df27c 100644 --- a/cranelift/filetests/filetests/isa/riscv64/wasm/load_store_dynamic_kind_i32_index_0xffffffff_guard_yes_spectre_i8_access_0x1000_offset.wat +++ b/cranelift/filetests/filetests/isa/riscv64/wasm/load_store_dynamic_kind_i32_index_0xffffffff_guard_yes_spectre_i8_access_0x1000_offset.wat @@ -44,7 +44,7 @@ ;; slli a0,a0,32 ;; srli a4,a0,32 ;; ld a3,8(a2) -;; ugt a3,a4,a3##ty=i64 +;; sltu a3,a3,a4 ;; ld a2,0(a2) ;; add a2,a2,a4 ;; lui a4,1 @@ -67,7 +67,7 @@ ;; slli a0,a0,32 ;; srli a3,a0,32 ;; ld a2,8(a1) -;; ugt a2,a3,a2##ty=i64 +;; sltu a2,a2,a3 ;; ld a1,0(a1) ;; add a1,a1,a3 ;; lui a3,1 diff --git a/cranelift/filetests/filetests/isa/riscv64/wasm/load_store_dynamic_kind_i32_index_0xffffffff_guard_yes_spectre_i8_access_0xffff0000_offset.wat b/cranelift/filetests/filetests/isa/riscv64/wasm/load_store_dynamic_kind_i32_index_0xffffffff_guard_yes_spectre_i8_access_0xffff0000_offset.wat index eb087e87bd28..fd30beb064f0 100644 --- a/cranelift/filetests/filetests/isa/riscv64/wasm/load_store_dynamic_kind_i32_index_0xffffffff_guard_yes_spectre_i8_access_0xffff0000_offset.wat +++ b/cranelift/filetests/filetests/isa/riscv64/wasm/load_store_dynamic_kind_i32_index_0xffffffff_guard_yes_spectre_i8_access_0xffff0000_offset.wat @@ -44,7 +44,7 @@ ;; slli a0,a0,32 ;; srli a4,a0,32 ;; ld a3,8(a2) -;; ugt a3,a4,a3##ty=i64 +;; sltu a3,a3,a4 ;; ld a2,0(a2) ;; add a2,a2,a4 ;; ld a4,[const(0)] @@ -67,7 +67,7 @@ ;; slli a0,a0,32 ;; srli a3,a0,32 ;; ld a2,8(a1) -;; ugt a2,a3,a2##ty=i64 +;; sltu a2,a2,a3 ;; ld a1,0(a1) ;; add a1,a1,a3 ;; ld a3,[const(0)] diff --git a/cranelift/filetests/filetests/isa/riscv64/wasm/load_store_dynamic_kind_i64_index_0_guard_yes_spectre_i32_access_0_offset.wat b/cranelift/filetests/filetests/isa/riscv64/wasm/load_store_dynamic_kind_i64_index_0_guard_yes_spectre_i32_access_0_offset.wat index d139bc455b33..97d78a5b9961 100644 --- a/cranelift/filetests/filetests/isa/riscv64/wasm/load_store_dynamic_kind_i64_index_0_guard_yes_spectre_i32_access_0_offset.wat +++ b/cranelift/filetests/filetests/isa/riscv64/wasm/load_store_dynamic_kind_i64_index_0_guard_yes_spectre_i32_access_0_offset.wat @@ -43,7 +43,7 @@ ;; block0: ;; ld a4,8(a2) ;; addi a4,a4,-4 -;; ugt a5,a0,a4##ty=i64 +;; sltu a5,a4,a0 ;; ld a4,0(a2) ;; add a4,a4,a0 ;; li a0,0 @@ -63,7 +63,7 @@ ;; block0: ;; ld a4,8(a1) ;; addi a4,a4,-4 -;; ugt a5,a0,a4##ty=i64 +;; sltu a5,a4,a0 ;; ld a4,0(a1) ;; add a4,a4,a0 ;; li a0,0 diff --git a/cranelift/filetests/filetests/isa/riscv64/wasm/load_store_dynamic_kind_i64_index_0_guard_yes_spectre_i32_access_0x1000_offset.wat b/cranelift/filetests/filetests/isa/riscv64/wasm/load_store_dynamic_kind_i64_index_0_guard_yes_spectre_i32_access_0x1000_offset.wat index ef78d4658201..0d59a3a947cd 100644 --- a/cranelift/filetests/filetests/isa/riscv64/wasm/load_store_dynamic_kind_i64_index_0_guard_yes_spectre_i32_access_0x1000_offset.wat +++ b/cranelift/filetests/filetests/isa/riscv64/wasm/load_store_dynamic_kind_i64_index_0_guard_yes_spectre_i32_access_0x1000_offset.wat @@ -45,7 +45,7 @@ ;; lui a4,-1 ;; addi a4,a4,-4 ;; add a3,a3,a4 -;; ugt a3,a0,a3##ty=i64 +;; sltu a3,a3,a0 ;; ld a2,0(a2) ;; add a2,a2,a0 ;; lui a4,1 @@ -69,7 +69,7 @@ ;; lui a3,-1 ;; addi a3,a3,-4 ;; add a2,a2,a3 -;; ugt a3,a0,a2##ty=i64 +;; sltu a3,a2,a0 ;; ld a2,0(a1) ;; add a2,a2,a0 ;; lui a4,1 diff --git a/cranelift/filetests/filetests/isa/riscv64/wasm/load_store_dynamic_kind_i64_index_0_guard_yes_spectre_i32_access_0xffff0000_offset.wat b/cranelift/filetests/filetests/isa/riscv64/wasm/load_store_dynamic_kind_i64_index_0_guard_yes_spectre_i32_access_0xffff0000_offset.wat index d27e29e4b2f9..2d28e2c8bf58 100644 --- a/cranelift/filetests/filetests/isa/riscv64/wasm/load_store_dynamic_kind_i64_index_0_guard_yes_spectre_i32_access_0xffff0000_offset.wat +++ b/cranelift/filetests/filetests/isa/riscv64/wasm/load_store_dynamic_kind_i64_index_0_guard_yes_spectre_i32_access_0xffff0000_offset.wat @@ -46,7 +46,7 @@ ;; ult a4,a3,a0##ty=i64 ;; trap_if heap_oob##(a4 ne zero) ;; ld a4,8(a2) -;; ugt a4,a3,a4##ty=i64 +;; sltu a4,a4,a3 ;; ld a3,0(a2) ;; add a3,a3,a0 ;; ld a5,[const(0)] @@ -71,7 +71,7 @@ ;; ult a3,a2,a0##ty=i64 ;; trap_if heap_oob##(a3 ne zero) ;; ld a3,8(a1) -;; ugt a4,a2,a3##ty=i64 +;; sltu a4,a3,a2 ;; ld a3,0(a1) ;; add a3,a3,a0 ;; ld a5,[const(0)] diff --git a/cranelift/filetests/filetests/isa/riscv64/wasm/load_store_dynamic_kind_i64_index_0_guard_yes_spectre_i8_access_0_offset.wat b/cranelift/filetests/filetests/isa/riscv64/wasm/load_store_dynamic_kind_i64_index_0_guard_yes_spectre_i8_access_0_offset.wat index 5a3bc080b7db..dc8388c0bc8b 100644 --- a/cranelift/filetests/filetests/isa/riscv64/wasm/load_store_dynamic_kind_i64_index_0_guard_yes_spectre_i8_access_0_offset.wat +++ b/cranelift/filetests/filetests/isa/riscv64/wasm/load_store_dynamic_kind_i64_index_0_guard_yes_spectre_i8_access_0_offset.wat @@ -41,38 +41,40 @@ ;; function u0:0: ;; block0: -;; ld a3,8(a2) -;; uge a4,a0,a3##ty=i64 -;; ld a3,0(a2) -;; add a3,a3,a0 -;; li a5,0 -;; andi a4,a4,255 -;; sltu a0,zero,a4 -;; sub a2,zero,a0 -;; and a4,a5,a2 -;; not a5,a2 -;; and a2,a3,a5 -;; or a3,a4,a2 -;; sb a1,0(a3) +;; ld a4,8(a2) +;; sltu a3,a0,a4 +;; xori a5,a3,1 +;; ld a4,0(a2) +;; add a4,a4,a0 +;; li a0,0 +;; andi a5,a5,255 +;; sltu a2,zero,a5 +;; sub a2,zero,a2 +;; and a5,a0,a2 +;; not a0,a2 +;; and a2,a4,a0 +;; or a4,a5,a2 +;; sb a1,0(a4) ;; j label1 ;; block1: ;; ret ;; ;; function u0:1: ;; block0: -;; ld a3,8(a1) -;; uge a4,a0,a3##ty=i64 -;; ld a3,0(a1) -;; add a3,a3,a0 -;; li a5,0 -;; andi a4,a4,255 -;; sltu a0,zero,a4 -;; sub a1,zero,a0 -;; and a4,a5,a1 -;; not a5,a1 -;; and a1,a3,a5 -;; or a3,a4,a1 -;; lbu a0,0(a3) +;; ld a4,8(a1) +;; sltu a3,a0,a4 +;; xori a5,a3,1 +;; ld a4,0(a1) +;; add a4,a4,a0 +;; li a0,0 +;; andi a5,a5,255 +;; sltu a1,zero,a5 +;; sub a2,zero,a1 +;; and a5,a0,a2 +;; not a0,a2 +;; and a2,a4,a0 +;; or a4,a5,a2 +;; lbu a0,0(a4) ;; j label1 ;; block1: ;; ret diff --git a/cranelift/filetests/filetests/isa/riscv64/wasm/load_store_dynamic_kind_i64_index_0_guard_yes_spectre_i8_access_0x1000_offset.wat b/cranelift/filetests/filetests/isa/riscv64/wasm/load_store_dynamic_kind_i64_index_0_guard_yes_spectre_i8_access_0x1000_offset.wat index 422e1bc1c3de..d1ed25dd2aa6 100644 --- a/cranelift/filetests/filetests/isa/riscv64/wasm/load_store_dynamic_kind_i64_index_0_guard_yes_spectre_i8_access_0x1000_offset.wat +++ b/cranelift/filetests/filetests/isa/riscv64/wasm/load_store_dynamic_kind_i64_index_0_guard_yes_spectre_i8_access_0x1000_offset.wat @@ -45,7 +45,7 @@ ;; lui a4,-1 ;; addi a4,a4,-1 ;; add a3,a3,a4 -;; ugt a3,a0,a3##ty=i64 +;; sltu a3,a3,a0 ;; ld a2,0(a2) ;; add a2,a2,a0 ;; lui a4,1 @@ -69,7 +69,7 @@ ;; lui a3,-1 ;; addi a3,a3,-1 ;; add a2,a2,a3 -;; ugt a3,a0,a2##ty=i64 +;; sltu a3,a2,a0 ;; ld a2,0(a1) ;; add a2,a2,a0 ;; lui a4,1 diff --git a/cranelift/filetests/filetests/isa/riscv64/wasm/load_store_dynamic_kind_i64_index_0_guard_yes_spectre_i8_access_0xffff0000_offset.wat b/cranelift/filetests/filetests/isa/riscv64/wasm/load_store_dynamic_kind_i64_index_0_guard_yes_spectre_i8_access_0xffff0000_offset.wat index f51b54592618..121d55666f6b 100644 --- a/cranelift/filetests/filetests/isa/riscv64/wasm/load_store_dynamic_kind_i64_index_0_guard_yes_spectre_i8_access_0xffff0000_offset.wat +++ b/cranelift/filetests/filetests/isa/riscv64/wasm/load_store_dynamic_kind_i64_index_0_guard_yes_spectre_i8_access_0xffff0000_offset.wat @@ -46,7 +46,7 @@ ;; ult a4,a3,a0##ty=i64 ;; trap_if heap_oob##(a4 ne zero) ;; ld a4,8(a2) -;; ugt a4,a3,a4##ty=i64 +;; sltu a4,a4,a3 ;; ld a3,0(a2) ;; add a3,a3,a0 ;; ld a5,[const(0)] @@ -71,7 +71,7 @@ ;; ult a3,a2,a0##ty=i64 ;; trap_if heap_oob##(a3 ne zero) ;; ld a3,8(a1) -;; ugt a4,a2,a3##ty=i64 +;; sltu a4,a3,a2 ;; ld a3,0(a1) ;; add a3,a3,a0 ;; ld a5,[const(0)] diff --git a/cranelift/filetests/filetests/isa/riscv64/wasm/load_store_dynamic_kind_i64_index_0xffffffff_guard_yes_spectre_i32_access_0_offset.wat b/cranelift/filetests/filetests/isa/riscv64/wasm/load_store_dynamic_kind_i64_index_0xffffffff_guard_yes_spectre_i32_access_0_offset.wat index a6f0dfe87df4..05220911977d 100644 --- a/cranelift/filetests/filetests/isa/riscv64/wasm/load_store_dynamic_kind_i64_index_0xffffffff_guard_yes_spectre_i32_access_0_offset.wat +++ b/cranelift/filetests/filetests/isa/riscv64/wasm/load_store_dynamic_kind_i64_index_0xffffffff_guard_yes_spectre_i32_access_0_offset.wat @@ -42,7 +42,7 @@ ;; function u0:0: ;; block0: ;; ld a3,8(a2) -;; ugt a4,a0,a3##ty=i64 +;; sltu a4,a3,a0 ;; ld a3,0(a2) ;; add a3,a3,a0 ;; li a5,0 @@ -61,7 +61,7 @@ ;; function u0:1: ;; block0: ;; ld a3,8(a1) -;; ugt a4,a0,a3##ty=i64 +;; sltu a4,a3,a0 ;; ld a3,0(a1) ;; add a3,a3,a0 ;; li a5,0 diff --git a/cranelift/filetests/filetests/isa/riscv64/wasm/load_store_dynamic_kind_i64_index_0xffffffff_guard_yes_spectre_i32_access_0x1000_offset.wat b/cranelift/filetests/filetests/isa/riscv64/wasm/load_store_dynamic_kind_i64_index_0xffffffff_guard_yes_spectre_i32_access_0x1000_offset.wat index 3e23e55bfce9..08d506c51ccc 100644 --- a/cranelift/filetests/filetests/isa/riscv64/wasm/load_store_dynamic_kind_i64_index_0xffffffff_guard_yes_spectre_i32_access_0x1000_offset.wat +++ b/cranelift/filetests/filetests/isa/riscv64/wasm/load_store_dynamic_kind_i64_index_0xffffffff_guard_yes_spectre_i32_access_0x1000_offset.wat @@ -42,7 +42,7 @@ ;; function u0:0: ;; block0: ;; ld a5,8(a2) -;; ugt a3,a0,a5##ty=i64 +;; sltu a3,a5,a0 ;; ld a5,0(a2) ;; add a5,a5,a0 ;; lui a0,1 @@ -63,7 +63,7 @@ ;; function u0:1: ;; block0: ;; ld a5,8(a1) -;; ugt a2,a0,a5##ty=i64 +;; sltu a2,a5,a0 ;; ld a5,0(a1) ;; add a5,a5,a0 ;; lui a0,1 diff --git a/cranelift/filetests/filetests/isa/riscv64/wasm/load_store_dynamic_kind_i64_index_0xffffffff_guard_yes_spectre_i32_access_0xffff0000_offset.wat b/cranelift/filetests/filetests/isa/riscv64/wasm/load_store_dynamic_kind_i64_index_0xffffffff_guard_yes_spectre_i32_access_0xffff0000_offset.wat index 67f34728d8a5..7803459d4757 100644 --- a/cranelift/filetests/filetests/isa/riscv64/wasm/load_store_dynamic_kind_i64_index_0xffffffff_guard_yes_spectre_i32_access_0xffff0000_offset.wat +++ b/cranelift/filetests/filetests/isa/riscv64/wasm/load_store_dynamic_kind_i64_index_0xffffffff_guard_yes_spectre_i32_access_0xffff0000_offset.wat @@ -42,7 +42,7 @@ ;; function u0:0: ;; block0: ;; ld a5,8(a2) -;; ugt a3,a0,a5##ty=i64 +;; sltu a3,a5,a0 ;; ld a5,0(a2) ;; add a5,a5,a0 ;; ld a0,[const(0)] @@ -63,7 +63,7 @@ ;; function u0:1: ;; block0: ;; ld a5,8(a1) -;; ugt a2,a0,a5##ty=i64 +;; sltu a2,a5,a0 ;; ld a5,0(a1) ;; add a5,a5,a0 ;; ld a0,[const(0)] diff --git a/cranelift/filetests/filetests/isa/riscv64/wasm/load_store_dynamic_kind_i64_index_0xffffffff_guard_yes_spectre_i8_access_0_offset.wat b/cranelift/filetests/filetests/isa/riscv64/wasm/load_store_dynamic_kind_i64_index_0xffffffff_guard_yes_spectre_i8_access_0_offset.wat index c90e4738e5c4..e10e53315315 100644 --- a/cranelift/filetests/filetests/isa/riscv64/wasm/load_store_dynamic_kind_i64_index_0xffffffff_guard_yes_spectre_i8_access_0_offset.wat +++ b/cranelift/filetests/filetests/isa/riscv64/wasm/load_store_dynamic_kind_i64_index_0xffffffff_guard_yes_spectre_i8_access_0_offset.wat @@ -41,38 +41,40 @@ ;; function u0:0: ;; block0: -;; ld a3,8(a2) -;; uge a4,a0,a3##ty=i64 -;; ld a3,0(a2) -;; add a3,a3,a0 -;; li a5,0 -;; andi a4,a4,255 -;; sltu a0,zero,a4 -;; sub a2,zero,a0 -;; and a4,a5,a2 -;; not a5,a2 -;; and a2,a3,a5 -;; or a3,a4,a2 -;; sb a1,0(a3) +;; ld a4,8(a2) +;; sltu a3,a0,a4 +;; xori a5,a3,1 +;; ld a4,0(a2) +;; add a4,a4,a0 +;; li a0,0 +;; andi a5,a5,255 +;; sltu a2,zero,a5 +;; sub a2,zero,a2 +;; and a5,a0,a2 +;; not a0,a2 +;; and a2,a4,a0 +;; or a4,a5,a2 +;; sb a1,0(a4) ;; j label1 ;; block1: ;; ret ;; ;; function u0:1: ;; block0: -;; ld a3,8(a1) -;; uge a4,a0,a3##ty=i64 -;; ld a3,0(a1) -;; add a3,a3,a0 -;; li a5,0 -;; andi a4,a4,255 -;; sltu a0,zero,a4 -;; sub a1,zero,a0 -;; and a4,a5,a1 -;; not a5,a1 -;; and a1,a3,a5 -;; or a3,a4,a1 -;; lbu a0,0(a3) +;; ld a4,8(a1) +;; sltu a3,a0,a4 +;; xori a5,a3,1 +;; ld a4,0(a1) +;; add a4,a4,a0 +;; li a0,0 +;; andi a5,a5,255 +;; sltu a1,zero,a5 +;; sub a2,zero,a1 +;; and a5,a0,a2 +;; not a0,a2 +;; and a2,a4,a0 +;; or a4,a5,a2 +;; lbu a0,0(a4) ;; j label1 ;; block1: ;; ret diff --git a/cranelift/filetests/filetests/isa/riscv64/wasm/load_store_dynamic_kind_i64_index_0xffffffff_guard_yes_spectre_i8_access_0x1000_offset.wat b/cranelift/filetests/filetests/isa/riscv64/wasm/load_store_dynamic_kind_i64_index_0xffffffff_guard_yes_spectre_i8_access_0x1000_offset.wat index 74390da34be9..6993323965d9 100644 --- a/cranelift/filetests/filetests/isa/riscv64/wasm/load_store_dynamic_kind_i64_index_0xffffffff_guard_yes_spectre_i8_access_0x1000_offset.wat +++ b/cranelift/filetests/filetests/isa/riscv64/wasm/load_store_dynamic_kind_i64_index_0xffffffff_guard_yes_spectre_i8_access_0x1000_offset.wat @@ -42,7 +42,7 @@ ;; function u0:0: ;; block0: ;; ld a5,8(a2) -;; ugt a3,a0,a5##ty=i64 +;; sltu a3,a5,a0 ;; ld a5,0(a2) ;; add a5,a5,a0 ;; lui a0,1 @@ -63,7 +63,7 @@ ;; function u0:1: ;; block0: ;; ld a5,8(a1) -;; ugt a2,a0,a5##ty=i64 +;; sltu a2,a5,a0 ;; ld a5,0(a1) ;; add a5,a5,a0 ;; lui a0,1 diff --git a/cranelift/filetests/filetests/isa/riscv64/wasm/load_store_dynamic_kind_i64_index_0xffffffff_guard_yes_spectre_i8_access_0xffff0000_offset.wat b/cranelift/filetests/filetests/isa/riscv64/wasm/load_store_dynamic_kind_i64_index_0xffffffff_guard_yes_spectre_i8_access_0xffff0000_offset.wat index 0b9d9f0903fc..c09fcab131ef 100644 --- a/cranelift/filetests/filetests/isa/riscv64/wasm/load_store_dynamic_kind_i64_index_0xffffffff_guard_yes_spectre_i8_access_0xffff0000_offset.wat +++ b/cranelift/filetests/filetests/isa/riscv64/wasm/load_store_dynamic_kind_i64_index_0xffffffff_guard_yes_spectre_i8_access_0xffff0000_offset.wat @@ -42,7 +42,7 @@ ;; function u0:0: ;; block0: ;; ld a5,8(a2) -;; ugt a3,a0,a5##ty=i64 +;; sltu a3,a5,a0 ;; ld a5,0(a2) ;; add a5,a5,a0 ;; ld a0,[const(0)] @@ -63,7 +63,7 @@ ;; function u0:1: ;; block0: ;; ld a5,8(a1) -;; ugt a2,a0,a5##ty=i64 +;; sltu a2,a5,a0 ;; ld a5,0(a1) ;; add a5,a5,a0 ;; ld a0,[const(0)] diff --git a/cranelift/filetests/filetests/isa/riscv64/wasm/load_store_static_kind_i32_index_0_guard_yes_spectre_i32_access_0_offset.wat b/cranelift/filetests/filetests/isa/riscv64/wasm/load_store_static_kind_i32_index_0_guard_yes_spectre_i32_access_0_offset.wat index f8f8f2ec4288..d49edbada56b 100644 --- a/cranelift/filetests/filetests/isa/riscv64/wasm/load_store_static_kind_i32_index_0_guard_yes_spectre_i32_access_0_offset.wat +++ b/cranelift/filetests/filetests/isa/riscv64/wasm/load_store_static_kind_i32_index_0_guard_yes_spectre_i32_access_0_offset.wat @@ -43,7 +43,7 @@ ;; srli a3,a5,32 ;; lui a5,65536 ;; addi a4,a5,-4 -;; ugt a4,a3,a4##ty=i64 +;; sltu a4,a4,a3 ;; ld a0,0(a2) ;; add a0,a0,a3 ;; li a2,0 @@ -65,7 +65,7 @@ ;; srli a2,a5,32 ;; lui a5,65536 ;; addi a3,a5,-4 -;; ugt a3,a2,a3##ty=i64 +;; sltu a3,a3,a2 ;; ld a0,0(a1) ;; add a0,a0,a2 ;; li a1,0 diff --git a/cranelift/filetests/filetests/isa/riscv64/wasm/load_store_static_kind_i32_index_0_guard_yes_spectre_i32_access_0x1000_offset.wat b/cranelift/filetests/filetests/isa/riscv64/wasm/load_store_static_kind_i32_index_0_guard_yes_spectre_i32_access_0x1000_offset.wat index ca129eb94eb9..b5adec5c7ab7 100644 --- a/cranelift/filetests/filetests/isa/riscv64/wasm/load_store_static_kind_i32_index_0_guard_yes_spectre_i32_access_0x1000_offset.wat +++ b/cranelift/filetests/filetests/isa/riscv64/wasm/load_store_static_kind_i32_index_0_guard_yes_spectre_i32_access_0x1000_offset.wat @@ -43,7 +43,7 @@ ;; srli a4,a3,32 ;; lui a3,65535 ;; addi a3,a3,-4 -;; ugt a3,a4,a3##ty=i64 +;; sltu a3,a3,a4 ;; ld a2,0(a2) ;; add a2,a2,a4 ;; lui a4,1 @@ -67,7 +67,7 @@ ;; srli a4,a2,32 ;; lui a2,65535 ;; addi a3,a2,-4 -;; ugt a3,a4,a3##ty=i64 +;; sltu a3,a3,a4 ;; ld a2,0(a1) ;; add a2,a2,a4 ;; lui a4,1 diff --git a/cranelift/filetests/filetests/isa/riscv64/wasm/load_store_static_kind_i32_index_0_guard_yes_spectre_i8_access_0_offset.wat b/cranelift/filetests/filetests/isa/riscv64/wasm/load_store_static_kind_i32_index_0_guard_yes_spectre_i8_access_0_offset.wat index e1532d5465fa..834ab52ef00f 100644 --- a/cranelift/filetests/filetests/isa/riscv64/wasm/load_store_static_kind_i32_index_0_guard_yes_spectre_i8_access_0_offset.wat +++ b/cranelift/filetests/filetests/isa/riscv64/wasm/load_store_static_kind_i32_index_0_guard_yes_spectre_i8_access_0_offset.wat @@ -43,7 +43,7 @@ ;; srli a3,a5,32 ;; lui a5,65536 ;; addi a4,a5,-1 -;; ugt a4,a3,a4##ty=i64 +;; sltu a4,a4,a3 ;; ld a0,0(a2) ;; add a0,a0,a3 ;; li a2,0 @@ -65,7 +65,7 @@ ;; srli a2,a5,32 ;; lui a5,65536 ;; addi a3,a5,-1 -;; ugt a3,a2,a3##ty=i64 +;; sltu a3,a3,a2 ;; ld a0,0(a1) ;; add a0,a0,a2 ;; li a1,0 diff --git a/cranelift/filetests/filetests/isa/riscv64/wasm/load_store_static_kind_i32_index_0_guard_yes_spectre_i8_access_0x1000_offset.wat b/cranelift/filetests/filetests/isa/riscv64/wasm/load_store_static_kind_i32_index_0_guard_yes_spectre_i8_access_0x1000_offset.wat index 2d09800de3a3..79e24e4fec1b 100644 --- a/cranelift/filetests/filetests/isa/riscv64/wasm/load_store_static_kind_i32_index_0_guard_yes_spectre_i8_access_0x1000_offset.wat +++ b/cranelift/filetests/filetests/isa/riscv64/wasm/load_store_static_kind_i32_index_0_guard_yes_spectre_i8_access_0x1000_offset.wat @@ -43,7 +43,7 @@ ;; srli a4,a3,32 ;; lui a3,65535 ;; addi a3,a3,-1 -;; ugt a3,a4,a3##ty=i64 +;; sltu a3,a3,a4 ;; ld a2,0(a2) ;; add a2,a2,a4 ;; lui a4,1 @@ -67,7 +67,7 @@ ;; srli a4,a2,32 ;; lui a2,65535 ;; addi a3,a2,-1 -;; ugt a3,a4,a3##ty=i64 +;; sltu a3,a3,a4 ;; ld a2,0(a1) ;; add a2,a2,a4 ;; lui a4,1 diff --git a/cranelift/filetests/filetests/isa/riscv64/wasm/load_store_static_kind_i64_index_0_guard_yes_spectre_i32_access_0_offset.wat b/cranelift/filetests/filetests/isa/riscv64/wasm/load_store_static_kind_i64_index_0_guard_yes_spectre_i32_access_0_offset.wat index d5a07ae68ecb..c7184eea40b8 100644 --- a/cranelift/filetests/filetests/isa/riscv64/wasm/load_store_static_kind_i64_index_0_guard_yes_spectre_i32_access_0_offset.wat +++ b/cranelift/filetests/filetests/isa/riscv64/wasm/load_store_static_kind_i64_index_0_guard_yes_spectre_i32_access_0_offset.wat @@ -41,7 +41,7 @@ ;; block0: ;; lui a3,65536 ;; addi a5,a3,-4 -;; ugt a5,a0,a5##ty=i64 +;; sltu a5,a5,a0 ;; ld a4,0(a2) ;; add a4,a4,a0 ;; li a0,0 @@ -61,7 +61,7 @@ ;; block0: ;; lui a3,65536 ;; addi a5,a3,-4 -;; ugt a5,a0,a5##ty=i64 +;; sltu a5,a5,a0 ;; ld a4,0(a1) ;; add a4,a4,a0 ;; li a0,0 diff --git a/cranelift/filetests/filetests/isa/riscv64/wasm/load_store_static_kind_i64_index_0_guard_yes_spectre_i32_access_0x1000_offset.wat b/cranelift/filetests/filetests/isa/riscv64/wasm/load_store_static_kind_i64_index_0_guard_yes_spectre_i32_access_0x1000_offset.wat index 0e16efed51ca..fabf4be30c2e 100644 --- a/cranelift/filetests/filetests/isa/riscv64/wasm/load_store_static_kind_i64_index_0_guard_yes_spectre_i32_access_0x1000_offset.wat +++ b/cranelift/filetests/filetests/isa/riscv64/wasm/load_store_static_kind_i64_index_0_guard_yes_spectre_i32_access_0x1000_offset.wat @@ -41,7 +41,7 @@ ;; block0: ;; lui a5,65535 ;; addi a3,a5,-4 -;; ugt a4,a0,a3##ty=i64 +;; sltu a4,a3,a0 ;; ld a2,0(a2) ;; add a0,a2,a0 ;; lui a2,1 @@ -63,7 +63,7 @@ ;; block0: ;; lui a5,65535 ;; addi a2,a5,-4 -;; ugt a3,a0,a2##ty=i64 +;; sltu a3,a2,a0 ;; ld a1,0(a1) ;; add a0,a1,a0 ;; lui a1,1 diff --git a/cranelift/filetests/filetests/isa/riscv64/wasm/load_store_static_kind_i64_index_0_guard_yes_spectre_i8_access_0_offset.wat b/cranelift/filetests/filetests/isa/riscv64/wasm/load_store_static_kind_i64_index_0_guard_yes_spectre_i8_access_0_offset.wat index b9d9b85d9634..d54903efd9ed 100644 --- a/cranelift/filetests/filetests/isa/riscv64/wasm/load_store_static_kind_i64_index_0_guard_yes_spectre_i8_access_0_offset.wat +++ b/cranelift/filetests/filetests/isa/riscv64/wasm/load_store_static_kind_i64_index_0_guard_yes_spectre_i8_access_0_offset.wat @@ -41,7 +41,7 @@ ;; block0: ;; lui a3,65536 ;; addi a5,a3,-1 -;; ugt a5,a0,a5##ty=i64 +;; sltu a5,a5,a0 ;; ld a4,0(a2) ;; add a4,a4,a0 ;; li a0,0 @@ -61,7 +61,7 @@ ;; block0: ;; lui a3,65536 ;; addi a5,a3,-1 -;; ugt a5,a0,a5##ty=i64 +;; sltu a5,a5,a0 ;; ld a4,0(a1) ;; add a4,a4,a0 ;; li a0,0 diff --git a/cranelift/filetests/filetests/isa/riscv64/wasm/load_store_static_kind_i64_index_0_guard_yes_spectre_i8_access_0x1000_offset.wat b/cranelift/filetests/filetests/isa/riscv64/wasm/load_store_static_kind_i64_index_0_guard_yes_spectre_i8_access_0x1000_offset.wat index dd356dcb506b..fd9d885a7286 100644 --- a/cranelift/filetests/filetests/isa/riscv64/wasm/load_store_static_kind_i64_index_0_guard_yes_spectre_i8_access_0x1000_offset.wat +++ b/cranelift/filetests/filetests/isa/riscv64/wasm/load_store_static_kind_i64_index_0_guard_yes_spectre_i8_access_0x1000_offset.wat @@ -41,7 +41,7 @@ ;; block0: ;; lui a5,65535 ;; addi a3,a5,-1 -;; ugt a4,a0,a3##ty=i64 +;; sltu a4,a3,a0 ;; ld a2,0(a2) ;; add a0,a2,a0 ;; lui a2,1 @@ -63,7 +63,7 @@ ;; block0: ;; lui a5,65535 ;; addi a2,a5,-1 -;; ugt a3,a0,a2##ty=i64 +;; sltu a3,a2,a0 ;; ld a1,0(a1) ;; add a0,a1,a0 ;; lui a1,1 diff --git a/cranelift/filetests/filetests/isa/riscv64/wasm/load_store_static_kind_i64_index_0xffffffff_guard_yes_spectre_i32_access_0_offset.wat b/cranelift/filetests/filetests/isa/riscv64/wasm/load_store_static_kind_i64_index_0xffffffff_guard_yes_spectre_i32_access_0_offset.wat index 973ba5f4bdc1..76cd4432c8d6 100644 --- a/cranelift/filetests/filetests/isa/riscv64/wasm/load_store_static_kind_i64_index_0xffffffff_guard_yes_spectre_i32_access_0_offset.wat +++ b/cranelift/filetests/filetests/isa/riscv64/wasm/load_store_static_kind_i64_index_0xffffffff_guard_yes_spectre_i32_access_0_offset.wat @@ -41,7 +41,7 @@ ;; block0: ;; lui a3,65536 ;; addi a5,a3,-4 -;; ugt a5,a0,a5##ty=i64 +;; sltu a5,a5,a0 ;; ld a4,0(a2) ;; add a4,a4,a0 ;; li a0,0 @@ -61,7 +61,7 @@ ;; block0: ;; lui a3,65536 ;; addi a5,a3,-4 -;; ugt a5,a0,a5##ty=i64 +;; sltu a5,a5,a0 ;; ld a4,0(a1) ;; add a4,a4,a0 ;; li a0,0 diff --git a/cranelift/filetests/filetests/isa/riscv64/wasm/load_store_static_kind_i64_index_0xffffffff_guard_yes_spectre_i32_access_0x1000_offset.wat b/cranelift/filetests/filetests/isa/riscv64/wasm/load_store_static_kind_i64_index_0xffffffff_guard_yes_spectre_i32_access_0x1000_offset.wat index d202e72373e8..76813dfb8f34 100644 --- a/cranelift/filetests/filetests/isa/riscv64/wasm/load_store_static_kind_i64_index_0xffffffff_guard_yes_spectre_i32_access_0x1000_offset.wat +++ b/cranelift/filetests/filetests/isa/riscv64/wasm/load_store_static_kind_i64_index_0xffffffff_guard_yes_spectre_i32_access_0x1000_offset.wat @@ -41,7 +41,7 @@ ;; block0: ;; lui a5,65535 ;; addi a3,a5,-4 -;; ugt a4,a0,a3##ty=i64 +;; sltu a4,a3,a0 ;; ld a2,0(a2) ;; add a0,a2,a0 ;; lui a2,1 @@ -63,7 +63,7 @@ ;; block0: ;; lui a5,65535 ;; addi a2,a5,-4 -;; ugt a3,a0,a2##ty=i64 +;; sltu a3,a2,a0 ;; ld a1,0(a1) ;; add a0,a1,a0 ;; lui a1,1 diff --git a/cranelift/filetests/filetests/isa/riscv64/wasm/load_store_static_kind_i64_index_0xffffffff_guard_yes_spectre_i8_access_0_offset.wat b/cranelift/filetests/filetests/isa/riscv64/wasm/load_store_static_kind_i64_index_0xffffffff_guard_yes_spectre_i8_access_0_offset.wat index ee4599cd2566..0851ffed1504 100644 --- a/cranelift/filetests/filetests/isa/riscv64/wasm/load_store_static_kind_i64_index_0xffffffff_guard_yes_spectre_i8_access_0_offset.wat +++ b/cranelift/filetests/filetests/isa/riscv64/wasm/load_store_static_kind_i64_index_0xffffffff_guard_yes_spectre_i8_access_0_offset.wat @@ -41,7 +41,7 @@ ;; block0: ;; lui a3,65536 ;; addi a5,a3,-1 -;; ugt a5,a0,a5##ty=i64 +;; sltu a5,a5,a0 ;; ld a4,0(a2) ;; add a4,a4,a0 ;; li a0,0 @@ -61,7 +61,7 @@ ;; block0: ;; lui a3,65536 ;; addi a5,a3,-1 -;; ugt a5,a0,a5##ty=i64 +;; sltu a5,a5,a0 ;; ld a4,0(a1) ;; add a4,a4,a0 ;; li a0,0 diff --git a/cranelift/filetests/filetests/isa/riscv64/wasm/load_store_static_kind_i64_index_0xffffffff_guard_yes_spectre_i8_access_0x1000_offset.wat b/cranelift/filetests/filetests/isa/riscv64/wasm/load_store_static_kind_i64_index_0xffffffff_guard_yes_spectre_i8_access_0x1000_offset.wat index ea258a38ff87..65c5f70bc03f 100644 --- a/cranelift/filetests/filetests/isa/riscv64/wasm/load_store_static_kind_i64_index_0xffffffff_guard_yes_spectre_i8_access_0x1000_offset.wat +++ b/cranelift/filetests/filetests/isa/riscv64/wasm/load_store_static_kind_i64_index_0xffffffff_guard_yes_spectre_i8_access_0x1000_offset.wat @@ -41,7 +41,7 @@ ;; block0: ;; lui a5,65535 ;; addi a3,a5,-1 -;; ugt a4,a0,a3##ty=i64 +;; sltu a4,a3,a0 ;; ld a2,0(a2) ;; add a0,a2,a0 ;; lui a2,1 @@ -63,7 +63,7 @@ ;; block0: ;; lui a5,65535 ;; addi a2,a5,-1 -;; ugt a3,a0,a2##ty=i64 +;; sltu a3,a2,a0 ;; ld a1,0(a1) ;; add a0,a1,a0 ;; lui a1,1 diff --git a/cranelift/filetests/filetests/runtests/icmp-ne.clif b/cranelift/filetests/filetests/runtests/icmp-ne.clif index 2a8473cff86c..7e22b6dadb5c 100644 --- a/cranelift/filetests/filetests/runtests/icmp-ne.clif +++ b/cranelift/filetests/filetests/runtests/icmp-ne.clif @@ -41,3 +41,30 @@ block0(v0: i64, v1: i64): ; run: %icmp_ne_i64(0, 0) == 0 ; run: %icmp_ne_i64(1, 0) == 1 ; run: %icmp_ne_i64(-1, -1) == 0 + +function %icmp32_ne_imm0(i32) -> i8 { +block0(v0: i32): + v2 = icmp_imm ne v0, 0 + return v2 +} +; run: %icmp32_ne_imm0(0) == 0 +; run: %icmp32_ne_imm0(1) == 1 +; run: %icmp32_ne_imm0(-1) == 1 + +function %icmp32_ne_imm1(i32) -> i8 { +block0(v0: i32): + v2 = icmp_imm ne v0, 1 + return v2 +} +; run: %icmp32_ne_imm1(0) == 1 +; run: %icmp32_ne_imm1(1) == 0 +; run: %icmp32_ne_imm1(-1) == 1 + +function %icmp32_ne_imm2(i32) -> i8 { +block0(v0: i32): + v2 = icmp_imm ne v0, -1 + return v2 +} +; run: %icmp32_ne_imm2(0) == 1 +; run: %icmp32_ne_imm2(1) == 1 +; run: %icmp32_ne_imm2(-1) == 0 diff --git a/cranelift/filetests/filetests/runtests/icmp-sge.clif b/cranelift/filetests/filetests/runtests/icmp-sge.clif index a96bc0bc8e4b..90159c998084 100644 --- a/cranelift/filetests/filetests/runtests/icmp-sge.clif +++ b/cranelift/filetests/filetests/runtests/icmp-sge.clif @@ -54,3 +54,30 @@ block0(v0: i64, v1: i64): ; run: %icmp_sge_i64(0, 1) == 0 ; run: %icmp_sge_i64(-5, -1) == 0 ; run: %icmp_sge_i64(1, -1) == 1 + +function %icmp32_sge_imm0(i32) -> i8 { +block0(v0: i32): + v2 = icmp_imm sge v0, 0 + return v2 +} +; run: %icmp32_sge_imm0(0) == 1 +; run: %icmp32_sge_imm0(1) == 1 +; run: %icmp32_sge_imm0(-1) == 0 + +function %icmp32_sge_imm1(i32) -> i8 { +block0(v0: i32): + v2 = icmp_imm sge v0, 1 + return v2 +} +; run: %icmp32_sge_imm1(0) == 0 +; run: %icmp32_sge_imm1(1) == 1 +; run: %icmp32_sge_imm1(-1) == 0 + +function %icmp32_sge_imm2(i32) -> i8 { +block0(v0: i32): + v2 = icmp_imm sge v0, -1 + return v2 +} +; run: %icmp32_sge_imm2(0) == 1 +; run: %icmp32_sge_imm2(1) == 1 +; run: %icmp32_sge_imm2(-1) == 1 diff --git a/cranelift/filetests/filetests/runtests/icmp-sgt.clif b/cranelift/filetests/filetests/runtests/icmp-sgt.clif index 3763a21af079..9f2bb3648ae9 100644 --- a/cranelift/filetests/filetests/runtests/icmp-sgt.clif +++ b/cranelift/filetests/filetests/runtests/icmp-sgt.clif @@ -54,3 +54,31 @@ block0(v0: i64, v1: i64): ; run: %icmp_sgt_i64(0, 1) == 0 ; run: %icmp_sgt_i64(-5, -1) == 0 ; run: %icmp_sgt_i64(1, -1) == 1 + +function %icmp32_sgt_imm0(i32) -> i8 { +block0(v0: i32): + v2 = icmp_imm sgt v0, 0 + return v2 +} +; run: %icmp32_sgt_imm0(0) == 0 +; run: %icmp32_sgt_imm0(1) == 1 +; run: %icmp32_sgt_imm0(-1) == 0 + +function %icmp32_sgt_imm1(i32) -> i8 { +block0(v0: i32): + v2 = icmp_imm sgt v0, 1 + return v2 +} +; run: %icmp32_sgt_imm1(0) == 0 +; run: %icmp32_sgt_imm1(1) == 0 +; run: %icmp32_sgt_imm1(2) == 1 +; run: %icmp32_sgt_imm1(-1) == 0 + +function %icmp32_sgt_imm2(i32) -> i8 { +block0(v0: i32): + v2 = icmp_imm sgt v0, -1 + return v2 +} +; run: %icmp32_sgt_imm2(0) == 1 +; run: %icmp32_sgt_imm2(1) == 1 +; run: %icmp32_sgt_imm2(-1) == 0 diff --git a/cranelift/filetests/filetests/runtests/icmp-sle.clif b/cranelift/filetests/filetests/runtests/icmp-sle.clif index 9a4b64daaa03..70495acc7ced 100644 --- a/cranelift/filetests/filetests/runtests/icmp-sle.clif +++ b/cranelift/filetests/filetests/runtests/icmp-sle.clif @@ -54,3 +54,31 @@ block0(v0: i64, v1: i64): ; run: %icmp_sle_i64(0, 1) == 1 ; run: %icmp_sle_i64(-5, -1) == 1 ; run: %icmp_sle_i64(1, -1) == 0 + +function %icmp32_sle_imm0(i32) -> i8 { +block0(v0: i32): + v2 = icmp_imm sle v0, 0 + return v2 +} +; run: %icmp32_sle_imm0(0) == 1 +; run: %icmp32_sle_imm0(1) == 0 +; run: %icmp32_sle_imm0(-1) == 1 + +function %icmp32_sle_imm1(i32) -> i8 { +block0(v0: i32): + v2 = icmp_imm sle v0, 1 + return v2 +} +; run: %icmp32_sle_imm1(0) == 1 +; run: %icmp32_sle_imm1(1) == 1 +; run: %icmp32_sle_imm1(2) == 0 +; run: %icmp32_sle_imm1(-1) == 1 + +function %icmp32_sle_imm2(i32) -> i8 { +block0(v0: i32): + v2 = icmp_imm sle v0, -1 + return v2 +} +; run: %icmp32_sle_imm2(0) == 0 +; run: %icmp32_sle_imm2(1) == 0 +; run: %icmp32_sle_imm2(-1) == 1 diff --git a/cranelift/filetests/filetests/runtests/icmp-slt.clif b/cranelift/filetests/filetests/runtests/icmp-slt.clif index 02501b5b4305..36c02e05abb7 100644 --- a/cranelift/filetests/filetests/runtests/icmp-slt.clif +++ b/cranelift/filetests/filetests/runtests/icmp-slt.clif @@ -53,3 +53,31 @@ block0(v0: i64, v1: i64): ; run: %icmp_slt_i64(0, 1) == 1 ; run: %icmp_slt_i64(-5, -1) == 1 ; run: %icmp_slt_i64(1, -1) == 0 + +function %icmp32_slt_imm0(i32) -> i8 { +block0(v0: i32): + v2 = icmp_imm slt v0, 0 + return v2 +} +; run: %icmp32_slt_imm0(0) == 0 +; run: %icmp32_slt_imm0(1) == 0 +; run: %icmp32_slt_imm0(-1) == 1 + +function %icmp32_slt_imm1(i32) -> i8 { +block0(v0: i32): + v2 = icmp_imm slt v0, 1 + return v2 +} +; run: %icmp32_slt_imm1(0) == 1 +; run: %icmp32_slt_imm1(1) == 0 +; run: %icmp32_slt_imm1(-1) == 1 + +function %icmp32_slt_imm2(i32) -> i8 { +block0(v0: i32): + v2 = icmp_imm slt v0, -1 + return v2 +} +; run: %icmp32_slt_imm2(0) == 0 +; run: %icmp32_slt_imm2(1) == 0 +; run: %icmp32_slt_imm2(-1) == 0 +; run: %icmp32_slt_imm2(-2) == 1 diff --git a/cranelift/filetests/filetests/runtests/icmp-uge.clif b/cranelift/filetests/filetests/runtests/icmp-uge.clif index 72ef20767780..7e4adb4e6ae3 100644 --- a/cranelift/filetests/filetests/runtests/icmp-uge.clif +++ b/cranelift/filetests/filetests/runtests/icmp-uge.clif @@ -53,3 +53,29 @@ block0(v0: i64, v1: i64): ; run: %icmp_uge_i64(0, 1) == 0 ; run: %icmp_uge_i64(-5, -1) == 0 ; run: %icmp_uge_i64(1, -1) == 0 + +function %icmp32_uge_imm0(i32) -> i8 { +block0(v0: i32): + v2 = icmp_imm uge v0, 0 + return v2 +} +; run: %icmp32_uge_imm0(0) == 1 +; run: %icmp32_uge_imm0(1) == 1 + +function %icmp32_uge_imm1(i32) -> i8 { +block0(v0: i32): + v2 = icmp_imm uge v0, 1 + return v2 +} +; run: %icmp32_uge_imm1(0) == 0 +; run: %icmp32_uge_imm1(1) == 1 + +function %icmp32_uge_imm2(i32) -> i8 { +block0(v0: i32): + v2 = icmp_imm uge v0, -1 + return v2 +} +; run: %icmp32_uge_imm2(0) == 0 +; run: %icmp32_uge_imm2(1) == 0 +; run: %icmp32_uge_imm2(-2) == 0 +; run: %icmp32_uge_imm2(-1) == 1 diff --git a/cranelift/filetests/filetests/runtests/icmp-ugt.clif b/cranelift/filetests/filetests/runtests/icmp-ugt.clif index 3d2d33056a4b..c1545a2ad770 100644 --- a/cranelift/filetests/filetests/runtests/icmp-ugt.clif +++ b/cranelift/filetests/filetests/runtests/icmp-ugt.clif @@ -53,3 +53,30 @@ block0(v0: i64, v1: i64): ; run: %icmp_ugt_i64(0, 1) == 0 ; run: %icmp_ugt_i64(-5, -1) == 0 ; run: %icmp_ugt_i64(1, -1) == 0 + +function %icmp32_ugt_imm0(i32) -> i8 { +block0(v0: i32): + v2 = icmp_imm ugt v0, 0 + return v2 +} +; run: %icmp32_ugt_imm0(0) == 0 +; run: %icmp32_ugt_imm0(1) == 1 + +function %icmp32_ugt_imm1(i32) -> i8 { +block0(v0: i32): + v2 = icmp_imm ugt v0, 1 + return v2 +} +; run: %icmp32_ugt_imm1(0) == 0 +; run: %icmp32_ugt_imm1(1) == 0 +; run: %icmp32_ugt_imm1(2) == 1 + +function %icmp32_ugt_imm2(i32) -> i8 { +block0(v0: i32): + v2 = icmp_imm ugt v0, -1 + return v2 +} +; run: %icmp32_ugt_imm2(0) == 0 +; run: %icmp32_ugt_imm2(1) == 0 +; run: %icmp32_ugt_imm2(-2) == 0 +; run: %icmp32_ugt_imm2(-1) == 0 diff --git a/cranelift/filetests/filetests/runtests/icmp-ule.clif b/cranelift/filetests/filetests/runtests/icmp-ule.clif index f74e09bfb027..aad70591d4d0 100644 --- a/cranelift/filetests/filetests/runtests/icmp-ule.clif +++ b/cranelift/filetests/filetests/runtests/icmp-ule.clif @@ -53,3 +53,30 @@ block0(v0: i64, v1: i64): ; run: %icmp_ule_i64(0, 1) == 1 ; run: %icmp_ule_i64(-5, -1) == 1 ; run: %icmp_ule_i64(1, -1) == 1 + +function %icmp32_ule_imm0(i32) -> i8 { +block0(v0: i32): + v2 = icmp_imm ule v0, 0 + return v2 +} +; run: %icmp32_ule_imm0(0) == 1 +; run: %icmp32_ule_imm0(1) == 0 + +function %icmp32_ule_imm1(i32) -> i8 { +block0(v0: i32): + v2 = icmp_imm ule v0, 1 + return v2 +} +; run: %icmp32_ule_imm1(0) == 1 +; run: %icmp32_ule_imm1(1) == 1 +; run: %icmp32_ule_imm1(2) == 0 + +function %icmp32_ule_imm2(i32) -> i8 { +block0(v0: i32): + v2 = icmp_imm ule v0, -1 + return v2 +} +; run: %icmp32_ule_imm2(0) == 1 +; run: %icmp32_ule_imm2(1) == 1 +; run: %icmp32_ule_imm2(-2) == 1 +; run: %icmp32_ule_imm2(-1) == 1 diff --git a/cranelift/filetests/filetests/runtests/icmp-ult.clif b/cranelift/filetests/filetests/runtests/icmp-ult.clif index 3126ef0a1644..ccb1a78954dc 100644 --- a/cranelift/filetests/filetests/runtests/icmp-ult.clif +++ b/cranelift/filetests/filetests/runtests/icmp-ult.clif @@ -53,3 +53,30 @@ block0(v0: i64, v1: i64): ; run: %icmp_ult_i64(0, 1) == 1 ; run: %icmp_ult_i64(-5, -1) == 1 ; run: %icmp_ult_i64(1, -1) == 1 + +function %icmp32_ult_imm0(i32) -> i8 { +block0(v0: i32): + v2 = icmp_imm ult v0, 0 + return v2 +} +; run: %icmp32_ult_imm0(0) == 0 +; run: %icmp32_ult_imm0(1) == 0 + +function %icmp32_ult_imm1(i32) -> i8 { +block0(v0: i32): + v2 = icmp_imm ult v0, 1 + return v2 +} +; run: %icmp32_ult_imm1(0) == 1 +; run: %icmp32_ult_imm1(1) == 0 +; run: %icmp32_ult_imm1(2) == 0 + +function %icmp32_ult_imm2(i32) -> i8 { +block0(v0: i32): + v2 = icmp_imm ult v0, -1 + return v2 +} +; run: %icmp32_ult_imm2(0) == 1 +; run: %icmp32_ult_imm2(1) == 1 +; run: %icmp32_ult_imm2(-2) == 1 +; run: %icmp32_ult_imm2(-1) == 0