diff --git a/winch/codegen/src/codegen/context.rs b/winch/codegen/src/codegen/context.rs index 788f09fd4403..ce0acaa3b5c2 100644 --- a/winch/codegen/src/codegen/context.rs +++ b/winch/codegen/src/codegen/context.rs @@ -119,8 +119,8 @@ impl<'a> CodeGenContext<'a> { ) { match src { Val::Reg(src) => masm.mov(RegImm::reg(*src), RegImm::reg(dst), size), - Val::I32(imm) => masm.mov(RegImm::imm((*imm).into()), RegImm::reg(dst), size), - Val::I64(imm) => masm.mov(RegImm::imm(*imm), RegImm::reg(dst), size), + Val::I32(imm) => masm.mov(RegImm::i32((*imm).into()), RegImm::reg(dst), size), + Val::I64(imm) => masm.mov(RegImm::i64(*imm), RegImm::reg(dst), size), Val::Local(index) => { let slot = self .frame @@ -161,12 +161,7 @@ impl<'a> CodeGenContext<'a> { .pop_i32_const() .expect("i32 const value at stack top"); let reg = self.pop_to_reg(masm, None, OperandSize::S32); - emit( - masm, - RegImm::reg(reg), - RegImm::imm(val as i64), - OperandSize::S32, - ); + emit(masm, RegImm::reg(reg), RegImm::i32(val), OperandSize::S32); self.stack.push(Val::reg(reg)); } else { let src = self.pop_to_reg(masm, None, OperandSize::S32); @@ -190,7 +185,7 @@ impl<'a> CodeGenContext<'a> { .pop_i64_const() .expect("i64 const value at stack top"); let reg = self.pop_to_reg(masm, None, OperandSize::S64); - emit(masm, RegImm::reg(reg), RegImm::imm(val), OperandSize::S64); + emit(masm, RegImm::reg(reg), RegImm::i64(val), OperandSize::S64); self.stack.push(Val::reg(reg)); } else { let src = self.pop_to_reg(masm, None, OperandSize::S64); diff --git a/winch/codegen/src/isa/aarch64/asm.rs b/winch/codegen/src/isa/aarch64/asm.rs index c3655e3e6f03..57404f3d2902 100644 --- a/winch/codegen/src/isa/aarch64/asm.rs +++ b/winch/codegen/src/isa/aarch64/asm.rs @@ -13,20 +13,6 @@ use cranelift_codegen::{ Writable, }; -/// An Aarch64 instruction operand. -#[derive(Debug)] -pub(crate) enum Operand { - /// Register. - Reg(Reg), - /// Memory address. - Mem(Address), - /// 64-bit signed immediate. - Imm(i64), -} - -// Conversions between winch-codegen aarch64 types and cranelift-codegen -// aarch64 types. - impl From for inst::OperandSize { fn from(size: OperandSize) -> Self { match size { @@ -149,25 +135,6 @@ impl Assembler { }); } - /// Move instruction combinations. - pub fn mov(&mut self, src: Operand, dst: Operand, size: OperandSize) { - match &(src, dst) { - (Operand::Imm(imm), Operand::Reg(rd)) => { - let scratch = regs::scratch(); - self.load_constant(*imm as u64, scratch); - self.mov_rr(scratch, *rd, size); - } - (Operand::Reg(src), Operand::Reg(rd)) => { - self.mov_rr(*src, *rd, size); - } - - (src, dst) => panic!( - "Invalid combination for mov: src = {:?}, dst = {:?}", - src, dst - ), - } - } - /// Register to register move. pub fn mov_rr(&mut self, rm: Reg, rd: Reg, size: OperandSize) { let writable_rd = Writable::from_reg(rd.into()); @@ -178,20 +145,9 @@ impl Assembler { }); } - /// Add instruction combinations. - pub fn add(&mut self, opm: Operand, opn: Operand, opd: Operand, size: OperandSize) { - match &(opm, opn, opd) { - (Operand::Imm(imm), Operand::Reg(rn), Operand::Reg(rd)) => { - self.add_ir(*imm as u64, *rn, *rd, size); - } - (Operand::Reg(rm), Operand::Reg(rn), Operand::Reg(rd)) => { - self.emit_alu_rrr_extend(ALUOp::Add, *rm, *rn, *rd, size); - } - (rm, rn, rd) => panic!( - "Invalid combination for add: rm = {:?}, rn = {:?}, rd = {:?}", - rm, rn, rd - ), - } + /// Add with three registers. + pub fn add_rrr(&mut self, rm: Reg, rn: Reg, rd: Reg, size: OperandSize) { + self.emit_alu_rrr_extend(ALUOp::Add, rm, rn, rd, size); } /// Add immediate and register. @@ -206,20 +162,9 @@ impl Assembler { } } - /// Sub instruction combinations. - pub fn sub(&mut self, opm: Operand, opn: Operand, opd: Operand, size: OperandSize) { - match &(opm, opn, opd) { - (Operand::Imm(imm), Operand::Reg(rn), Operand::Reg(rd)) => { - self.sub_ir(*imm as u64, *rn, *rd, size); - } - (Operand::Reg(rm), Operand::Reg(rn), Operand::Reg(rd)) => { - self.emit_alu_rrr_extend(ALUOp::Sub, *rm, *rn, *rd, size); - } - (rm, rn, rd) => panic!( - "Invalid combination for sub: rm = {:?}, rn = {:?}, rd = {:?}", - rm, rn, rd - ), - } + /// Substract with three registers. + pub fn sub_rrr(&mut self, rm: Reg, rn: Reg, rd: Reg, size: OperandSize) { + self.emit_alu_rrr_extend(ALUOp::Sub, rm, rn, rd, size); } /// Subtract immediate and register. @@ -234,23 +179,12 @@ impl Assembler { } } - /// Mul instruction combinations. - pub fn mul(&mut self, opm: Operand, opn: Operand, opd: Operand, size: OperandSize) { - match &(opm, opn, opd) { - (Operand::Imm(imm), Operand::Reg(rn), Operand::Reg(rd)) => { - self.mul_ir(*imm as u64, *rn, *rd, size); - } - (Operand::Reg(rm), Operand::Reg(rn), Operand::Reg(rd)) => { - self.emit_alu_rrrr(ALUOp3::MAdd, *rm, *rn, *rd, regs::zero(), size); - } - (rm, rn, rd) => panic!( - "Invalid combination for sub: rm = {:?}, rn = {:?}, rd = {:?}", - rm, rn, rd - ), - } + /// Multiply with three registers. + pub fn mul_rrr(&mut self, rm: Reg, rn: Reg, rd: Reg, size: OperandSize) { + self.emit_alu_rrrr(ALUOp3::MAdd, rm, rn, rd, regs::zero(), size); } - /// Mul immediate and register. + /// Multiply immediate and register. pub fn mul_ir(&mut self, imm: u64, rn: Reg, rd: Reg, size: OperandSize) { let scratch = regs::scratch(); self.load_constant(imm, scratch); diff --git a/winch/codegen/src/isa/aarch64/masm.rs b/winch/codegen/src/isa/aarch64/masm.rs index 45b0d40c49f4..bd47fa998aee 100644 --- a/winch/codegen/src/isa/aarch64/masm.rs +++ b/winch/codegen/src/isa/aarch64/masm.rs @@ -1,16 +1,11 @@ -use super::{ - abi::Aarch64ABI, - address::Address, - asm::{Assembler, Operand}, - regs, -}; +use super::{abi::Aarch64ABI, address::Address, asm::Assembler, regs}; use crate::{ abi::{self, local::LocalSlot}, codegen::CodeGenContext, isa::reg::Reg, masm::{ - CalleeKind, CmpKind, DivKind, MacroAssembler as Masm, OperandSize, RegImm, RemKind, - ShiftKind, + CalleeKind, CmpKind, DivKind, Imm as I, MacroAssembler as Masm, OperandSize, RegImm, + RemKind, ShiftKind, }, }; use cranelift_codegen::{settings, Final, MachBufferFinalized, MachLabel}; @@ -23,29 +18,6 @@ pub(crate) struct MacroAssembler { sp_offset: u32, } -// Conversions between generic masm arguments and aarch64 operands. - -impl From for Operand { - fn from(rimm: RegImm) -> Self { - match rimm { - RegImm::Reg(r) => r.into(), - RegImm::Imm(imm) => Operand::Imm(imm), - } - } -} - -impl From for Operand { - fn from(reg: Reg) -> Self { - Operand::Reg(reg) - } -} - -impl From
for Operand { - fn from(addr: Address) -> Self { - Operand::Mem(addr) - } -} - impl MacroAssembler { /// Create an Aarch64 MacroAssembler. pub fn new(shared_flags: settings::Flags) -> Self { @@ -135,9 +107,14 @@ impl Masm for MacroAssembler { fn store(&mut self, src: RegImm, dst: Address, size: OperandSize) { let src = match src { - RegImm::Imm(imm) => { + RegImm::Imm(v) => { + let imm = match v { + I::I32(v) => v as u64, + I::I64(v) => v, + _ => unreachable!(), + }; let scratch = regs::scratch(); - self.asm.load_constant(imm as u64, scratch); + self.asm.load_constant(imm, scratch); scratch } RegImm::Reg(reg) => reg, @@ -171,19 +148,80 @@ impl Masm for MacroAssembler { } fn mov(&mut self, src: RegImm, dst: RegImm, size: OperandSize) { - self.asm.mov(src.into(), dst.into(), size); + match (src, dst) { + (RegImm::Imm(v), RegImm::Reg(rd)) => { + let imm = match v { + I::I32(v) => v as u64, + I::I64(v) => v, + _ => panic!(), + }; + + let scratch = regs::scratch(); + self.asm.load_constant(imm as u64, scratch); + self.asm.mov_rr(scratch, rd, size); + } + (RegImm::Reg(rs), RegImm::Reg(rd)) => { + self.asm.mov_rr(rs, rd, size); + } + _ => Self::handle_invalid_two_form_operand_combination(src, dst), + } } fn add(&mut self, dst: RegImm, lhs: RegImm, rhs: RegImm, size: OperandSize) { - self.asm.add(rhs.into(), lhs.into(), dst.into(), size); + match (rhs, lhs, dst) { + (RegImm::Imm(v), RegImm::Reg(rn), RegImm::Reg(rd)) => { + let imm = match v { + I::I32(v) => v as u64, + I::I64(v) => v, + _ => unreachable!(), + }; + + self.asm.add_ir(imm, rn, rd, size); + } + + (RegImm::Reg(rm), RegImm::Reg(rn), RegImm::Reg(rd)) => { + self.asm.add_rrr(rm, rn, rd, size); + } + _ => Self::handle_invalid_three_form_operand_combination(dst, lhs, rhs), + } } fn sub(&mut self, dst: RegImm, lhs: RegImm, rhs: RegImm, size: OperandSize) { - self.asm.sub(rhs.into(), lhs.into(), dst.into(), size); + match (rhs, lhs, dst) { + (RegImm::Imm(v), RegImm::Reg(rn), RegImm::Reg(rd)) => { + let imm = match v { + I::I32(v) => v as u64, + I::I64(v) => v, + _ => unreachable!(), + }; + + self.asm.sub_ir(imm, rn, rd, size); + } + + (RegImm::Reg(rm), RegImm::Reg(rn), RegImm::Reg(rd)) => { + self.asm.sub_rrr(rm, rn, rd, size); + } + _ => Self::handle_invalid_three_form_operand_combination(dst, lhs, rhs), + } } fn mul(&mut self, dst: RegImm, lhs: RegImm, rhs: RegImm, size: OperandSize) { - self.asm.mul(rhs.into(), lhs.into(), dst.into(), size); + match (rhs, lhs, dst) { + (RegImm::Imm(v), RegImm::Reg(rn), RegImm::Reg(rd)) => { + let imm = match v { + I::I32(v) => v as u64, + I::I64(v) => v, + _ => unreachable!(), + }; + + self.asm.mul_ir(imm, rn, rd, size); + } + + (RegImm::Reg(rm), RegImm::Reg(rn), RegImm::Reg(rd)) => { + self.asm.mul_rrr(rm, rn, rd, size); + } + _ => Self::handle_invalid_three_form_operand_combination(dst, lhs, rhs), + } } fn and(&mut self, _dst: RegImm, _lhs: RegImm, _rhs: RegImm, _size: OperandSize) { @@ -231,7 +269,11 @@ impl Masm for MacroAssembler { Address::offset(reg, offset as i64) } - fn cmp_with_set(&mut self, _src: RegImm, _dst: RegImm, _kind: CmpKind, _size: OperandSize) { + fn cmp_with_set(&mut self, _src: RegImm, _dst: Reg, _kind: CmpKind, _size: OperandSize) { + todo!() + } + + fn cmp(&mut self, _src: RegImm, _dest: Reg, _size: OperandSize) { todo!() } @@ -290,4 +332,15 @@ impl MacroAssembler { let shadow_sp = regs::shadow_sp(); self.asm.mov_rr(sp, shadow_sp, OperandSize::S64); } + + fn handle_invalid_two_form_operand_combination(src: RegImm, dst: RegImm) { + panic!("Invalid operand combination; src={:?}, dst={:?}", src, dst); + } + + fn handle_invalid_three_form_operand_combination(dst: RegImm, lhs: RegImm, rhs: RegImm) { + panic!( + "Invalid operand combination; dst={:?}, lhs={:?}, rhs={:?}", + dst, lhs, rhs + ); + } } diff --git a/winch/codegen/src/isa/x64/asm.rs b/winch/codegen/src/isa/x64/asm.rs index 0d0ab67d800d..ac213bcd48a4 100644 --- a/winch/codegen/src/isa/x64/asm.rs +++ b/winch/codegen/src/isa/x64/asm.rs @@ -6,8 +6,7 @@ use crate::{ }; use cranelift_codegen::{ entity::EntityRef, - ir::TrapCode, - ir::{ExternalName, Opcode, UserExternalNameRef}, + ir::{ExternalName, Opcode, TrapCode, UserExternalNameRef}, isa::{ x64::{ args::{ @@ -23,20 +22,9 @@ use cranelift_codegen::{ Writable, }; -use super::{address::Address, regs}; +use super::address::Address; use smallvec::smallvec; -/// A x64 instruction operand. -#[derive(Debug, Copy, Clone)] -pub(crate) enum Operand { - /// Register. - Reg(Reg), - /// Memory address. - Mem(Address), - /// Signed 64-bit immediate. - Imm(i64), -} - // Conversions between winch-codegen x64 types and cranelift-codegen x64 types. impl From for RegMemImm { @@ -160,10 +148,9 @@ impl Assembler { /// Return the emitted code. pub fn finalize(mut self) -> MachBufferFinalized { - let constants = Default::default(); let stencil = self .buffer - .finish(&constants, self.emit_state.ctrl_plane_mut()); + .finish(&Default::default(), self.emit_state.ctrl_plane_mut()); stencil.apply_base_srcloc(Default::default()) } @@ -171,6 +158,14 @@ impl Assembler { inst.emit(&[], &mut self.buffer, &self.emit_info, &mut self.emit_state); } + fn to_synthetic_amode(addr: &Address) -> SyntheticAmode { + match addr { + Address::Offset { base, offset } => { + SyntheticAmode::real(Amode::imm_reg(*offset as i32, (*base).into())) + } + } + } + /// Push register. pub fn push_r(&mut self, reg: Reg) { self.emit(Inst::Push64 { src: reg.into() }); @@ -191,29 +186,6 @@ impl Assembler { }); } - /// Move instruction variants. - pub fn mov(&mut self, src: Operand, dst: Operand, size: OperandSize) { - use self::Operand::*; - - match &(src, dst) { - (Reg(lhs), Reg(rhs)) => self.mov_rr(*lhs, *rhs, size), - (Reg(lhs), Mem(addr)) => match addr { - Address::Offset { base, offset: imm } => self.mov_rm(*lhs, *base, *imm, size), - }, - (Imm(imm), Mem(addr)) => match addr { - Address::Offset { base, offset: disp } => { - self.mov_im(*imm as u64, *base, *disp, size) - } - }, - (Imm(imm), Reg(reg)) => self.mov_ir(*imm as u64, *reg, size), - (Mem(addr), Reg(reg)) => match addr { - Address::Offset { base, offset: imm } => self.mov_mr(*base, *imm, *reg, size), - }, - - _ => Self::handle_invalid_operand_combination(src, dst), - } - } - /// Register-to-register move. pub fn mov_rr(&mut self, src: Reg, dst: Reg, size: OperandSize) { self.emit(Inst::MovRR { @@ -224,23 +196,22 @@ impl Assembler { } /// Register-to-memory move. - pub fn mov_rm(&mut self, src: Reg, base: Reg, disp: u32, size: OperandSize) { - let dst = Amode::imm_reg(disp as i32, base.into()); - + pub fn mov_rm(&mut self, src: Reg, addr: &Address, size: OperandSize) { + let dst = Self::to_synthetic_amode(addr); self.emit(Inst::MovRM { size: size.into(), src: src.into(), - dst: SyntheticAmode::real(dst), + dst, }); } /// Immediate-to-memory move. - pub fn mov_im(&mut self, src: u64, base: Reg, disp: u32, size: OperandSize) { - let dst = Amode::imm_reg(disp as i32, base.into()); + pub fn mov_im(&mut self, src: u64, addr: &Address, size: OperandSize) { + let dst = Self::to_synthetic_amode(addr); self.emit(Inst::MovImmM { size: size.into(), simm64: src, - dst: SyntheticAmode::real(dst), + dst, }); } @@ -257,11 +228,10 @@ impl Assembler { } /// Memory-to-register load. - pub fn mov_mr(&mut self, base: Reg, disp: u32, dst: Reg, size: OperandSize) { + pub fn mov_mr(&mut self, addr: &Address, dst: Reg, size: OperandSize) { use OperandSize::S64; - let amode = Amode::imm_reg(disp as i32, base.into()); - let src = SyntheticAmode::real(amode); + let src = Self::to_synthetic_amode(addr); if size == S64 { self.emit(Inst::Mov64MR { @@ -278,23 +248,6 @@ impl Assembler { } } - /// Subtract instruction variants. - pub fn sub(&mut self, src: Operand, dst: Operand, size: OperandSize) { - match &(src, dst) { - (Operand::Imm(imm), Operand::Reg(dst)) => { - if let Ok(val) = i32::try_from(*imm) { - self.sub_ir(val, *dst, size) - } else { - let scratch = regs::scratch(); - self.load_constant(imm, scratch, size); - self.sub_rr(scratch, *dst, size); - } - } - (Operand::Reg(src), Operand::Reg(dst)) => self.sub_rr(*src, *dst, size), - _ => Self::handle_invalid_operand_combination(src, dst), - } - } - /// Subtract register and register pub fn sub_rr(&mut self, src: Reg, dst: Reg, size: OperandSize) { self.emit(Inst::AluRmiR { @@ -319,40 +272,6 @@ impl Assembler { }); } - /// Signed multiplication instruction. - pub fn mul(&mut self, src: Operand, dst: Operand, size: OperandSize) { - match &(src, dst) { - (Operand::Imm(imm), Operand::Reg(dst)) => { - if let Ok(val) = i32::try_from(*imm) { - self.mul_ir(val, *dst, size); - } else { - let scratch = regs::scratch(); - self.load_constant(imm, scratch, size); - self.mul_rr(scratch, *dst, size); - } - } - (Operand::Reg(src), Operand::Reg(dst)) => self.mul_rr(*src, *dst, size), - _ => Self::handle_invalid_operand_combination(src, dst), - } - } - - /// Logical and instruction variants. - pub fn and(&mut self, src: Operand, dst: Operand, size: OperandSize) { - match &(src, dst) { - (Operand::Imm(imm), Operand::Reg(dst)) => { - if let Ok(val) = i32::try_from(*imm) { - self.and_ir(val, *dst, size); - } else { - let scratch = regs::scratch(); - self.load_constant(imm, scratch, size); - self.and_rr(scratch, *dst, size); - } - } - (Operand::Reg(src), Operand::Reg(dst)) => self.and_rr(*src, *dst, size), - _ => Self::handle_invalid_operand_combination(src, dst), - } - } - /// "and" two registers. pub fn and_rr(&mut self, src: Reg, dst: Reg, size: OperandSize) { self.emit(Inst::AluRmiR { @@ -364,7 +283,7 @@ impl Assembler { }); } - fn and_ir(&mut self, imm: i32, dst: Reg, size: OperandSize) { + pub fn and_ir(&mut self, imm: i32, dst: Reg, size: OperandSize) { let imm = RegMemImm::imm(imm as u32); self.emit(Inst::AluRmiR { @@ -376,24 +295,7 @@ impl Assembler { }); } - /// Logical or instruction variants. - pub fn or(&mut self, src: Operand, dst: Operand, size: OperandSize) { - match &(src, dst) { - (Operand::Imm(imm), Operand::Reg(dst)) => { - if let Ok(val) = i32::try_from(*imm) { - self.or_ir(val, *dst, size); - } else { - let scratch = regs::scratch(); - self.load_constant(imm, scratch, size); - self.or_rr(scratch, *dst, size); - } - } - (Operand::Reg(src), Operand::Reg(dst)) => self.or_rr(*src, *dst, size), - _ => Self::handle_invalid_operand_combination(src, dst), - } - } - - fn or_rr(&mut self, src: Reg, dst: Reg, size: OperandSize) { + pub fn or_rr(&mut self, src: Reg, dst: Reg, size: OperandSize) { self.emit(Inst::AluRmiR { size: size.into(), op: AluRmiROpcode::Or, @@ -403,7 +305,7 @@ impl Assembler { }); } - fn or_ir(&mut self, imm: i32, dst: Reg, size: OperandSize) { + pub fn or_ir(&mut self, imm: i32, dst: Reg, size: OperandSize) { let imm = RegMemImm::imm(imm as u32); self.emit(Inst::AluRmiR { @@ -415,23 +317,6 @@ impl Assembler { }); } - /// Logical exclusive or instruction variants. - pub fn xor(&mut self, src: Operand, dst: Operand, size: OperandSize) { - match &(src, dst) { - (Operand::Imm(imm), Operand::Reg(dst)) => { - if let Ok(val) = i32::try_from(*imm) { - self.xor_ir(val, *dst, size); - } else { - let scratch = regs::scratch(); - self.load_constant(imm, scratch, size); - self.xor_rr(scratch, *dst, size); - } - } - (Operand::Reg(src), Operand::Reg(dst)) => self.xor_rr(*src, *dst, size), - _ => Self::handle_invalid_operand_combination(src, dst), - } - } - /// Logical exclusive or with registers. pub fn xor_rr(&mut self, src: Reg, dst: Reg, size: OperandSize) { self.emit(Inst::AluRmiR { @@ -443,7 +328,7 @@ impl Assembler { }); } - fn xor_ir(&mut self, imm: i32, dst: Reg, size: OperandSize) { + pub fn xor_ir(&mut self, imm: i32, dst: Reg, size: OperandSize) { let imm = RegMemImm::imm(imm as u32); self.emit(Inst::AluRmiR { @@ -615,23 +500,6 @@ impl Assembler { }); } - /// Add instruction variants. - pub fn add(&mut self, src: Operand, dst: Operand, size: OperandSize) { - match &(src, dst) { - (Operand::Imm(imm), Operand::Reg(dst)) => { - if let Ok(val) = i32::try_from(*imm) { - self.add_ir(val, *dst, size) - } else { - let scratch = regs::scratch(); - self.load_constant(imm, scratch, size); - self.add_rr(scratch, *dst, size); - } - } - (Operand::Reg(src), Operand::Reg(dst)) => self.add_rr(*src, *dst, size), - _ => Self::handle_invalid_operand_combination(src, dst), - } - } - /// Add immediate and register. pub fn add_ir(&mut self, imm: i32, dst: Reg, size: OperandSize) { let imm = RegMemImm::imm(imm as u32); @@ -656,24 +524,7 @@ impl Assembler { }); } - /// Compare two operands and set status register flags. - pub fn cmp(&mut self, src: Operand, dst: Operand, size: OperandSize) { - match &(src, dst) { - (Operand::Imm(imm), Operand::Reg(dst)) => { - if let Ok(val) = i32::try_from(*imm) { - self.cmp_ir(val, *dst, size) - } else { - let scratch = regs::scratch(); - self.load_constant(imm, scratch, size); - self.cmp_rr(scratch, *dst, size); - } - } - (Operand::Reg(src), Operand::Reg(dst)) => self.cmp_rr(*src, *dst, size), - _ => Self::handle_invalid_operand_combination(src, dst), - } - } - - fn cmp_ir(&mut self, imm: i32, dst: Reg, size: OperandSize) { + pub fn cmp_ir(&mut self, imm: i32, dst: Reg, size: OperandSize) { let imm = RegMemImm::imm(imm as u32); self.emit(Inst::CmpRmiR { @@ -684,7 +535,7 @@ impl Assembler { }); } - fn cmp_rr(&mut self, src: Reg, dst: Reg, size: OperandSize) { + pub fn cmp_rr(&mut self, src: Reg, dst: Reg, size: OperandSize) { self.emit(Inst::CmpRmiR { size: size.into(), opcode: CmpOpcode::Cmp, @@ -715,11 +566,7 @@ impl Assembler { /// Set value in dst to `0` or `1` based on flags in status register and /// [`CmpKind`]. - pub fn setcc(&mut self, kind: CmpKind, dst: Operand) { - let dst = match dst { - Operand::Reg(r) => r, - _ => panic!("Invalid operand for dst"), - }; + pub fn setcc(&mut self, kind: CmpKind, dst: Reg) { // Clear the dst register or bits 1 to 31 may be incorrectly set. // Don't use xor since it updates the status register. self.emit(Inst::Imm { @@ -826,15 +673,6 @@ impl Assembler { } } - /// Load an imm constant into a register - pub fn load_constant(&mut self, imm: &i64, dst: Reg, size: OperandSize) { - self.mov_ir(*imm as u64, dst, size); - } - - fn handle_invalid_operand_combination(src: Operand, dst: Operand) { - panic!("Invalid operand combination; src={:?}, dst={:?}", src, dst); - } - /// Emits a conditional jump to the given label. pub fn jmp_if(&mut self, cc: impl Into, taken: MachLabel) { self.emit(Inst::JmpIf { diff --git a/winch/codegen/src/isa/x64/masm.rs b/winch/codegen/src/isa/x64/masm.rs index 299c1dc74e66..3969d5423889 100644 --- a/winch/codegen/src/isa/x64/masm.rs +++ b/winch/codegen/src/isa/x64/masm.rs @@ -1,11 +1,13 @@ use super::{ abi::X64ABI, address::Address, - asm::{Assembler, Operand}, + asm::Assembler, regs::{self, rbp, rsp}, }; + +use crate::abi::ABI; use crate::masm::{ - CmpKind, DivKind, MacroAssembler as Masm, OperandSize, RegImm, RemKind, ShiftKind, + CmpKind, DivKind, Imm as I, MacroAssembler as Masm, OperandSize, RegImm, RemKind, ShiftKind, }; use crate::{ abi::{self, align_to, calculate_frame_adjustment, LocalSlot}, @@ -28,29 +30,6 @@ pub(crate) struct MacroAssembler { flags: x64_settings::Flags, } -// Conversions between generic masm arguments and x64 operands. - -impl From for Operand { - fn from(rimm: RegImm) -> Self { - match rimm { - RegImm::Reg(r) => r.into(), - RegImm::Imm(imm) => Operand::Imm(imm), - } - } -} - -impl From for Operand { - fn from(reg: Reg) -> Self { - Operand::Reg(reg) - } -} - -impl From
for Operand { - fn from(addr: Address) -> Self { - Operand::Mem(addr) - } -} - impl Masm for MacroAssembler { type Address = Address; type Ptr = u8; @@ -67,8 +46,8 @@ impl Masm for MacroAssembler { fn push(&mut self, reg: Reg) -> u32 { self.asm.push_r(reg); - self.increment_sp(::word_bytes()); - + let increment = ::word_bytes(); + self.increment_sp(increment); self.sp_offset } @@ -117,10 +96,14 @@ impl Masm for MacroAssembler { } fn store(&mut self, src: RegImm, dst: Address, size: OperandSize) { - let src: Operand = src.into(); - let dst: Operand = dst.into(); - - self.asm.mov(src, dst, size); + match src { + RegImm::Imm(imm) => match imm { + I::I32(v) => self.asm.mov_im(v as u64, &dst, size), + I::I64(v) => self.asm.mov_im(v, &dst, size), + _ => unreachable!(), + }, + RegImm::Reg(reg) => self.asm.mov_rm(reg, &dst, size), + } } fn pop(&mut self, dst: Reg) { @@ -145,9 +128,7 @@ impl Masm for MacroAssembler { } fn load(&mut self, src: Address, dst: Reg, size: OperandSize) { - let src = src.into(); - let dst = dst.into(); - self.asm.mov(src, dst, size); + self.asm.mov_mr(&src, dst, size); } fn sp_offset(&self) -> u32 { @@ -159,88 +140,135 @@ impl Masm for MacroAssembler { } fn mov(&mut self, src: RegImm, dst: RegImm, size: OperandSize) { - let src: Operand = src.into(); - let dst: Operand = dst.into(); - - self.asm.mov(src, dst, size); + match (src, dst) { + (RegImm::Reg(src), RegImm::Reg(dst)) => self.asm.mov_rr(src, dst, size), + (RegImm::Imm(imm), RegImm::Reg(dst)) => match imm { + I::I32(v) => self.asm.mov_ir(v as u64, dst, size), + I::I64(v) => self.asm.mov_ir(v, dst, size), + _ => unreachable!(), + }, + _ => Self::handle_invalid_operand_combination(src, dst), + } } fn add(&mut self, dst: RegImm, lhs: RegImm, rhs: RegImm, size: OperandSize) { - let (src, dst): (Operand, Operand) = if dst == lhs { - (rhs.into(), dst.into()) - } else { - panic!( - "the destination and first source argument must be the same, dst={:?}, lhs={:?}", - dst, lhs - ); - }; + Self::ensure_two_argument_form(&dst, &lhs); + match (rhs, dst) { + (RegImm::Imm(imm), RegImm::Reg(reg)) => { + if let Some(v) = imm.to_i32() { + self.asm.add_ir(v, reg, size); + } else { + let scratch = regs::scratch(); + self.load_constant(&imm, scratch, size); + self.asm.add_rr(scratch, reg, size); + } + } - self.asm.add(src, dst, size); + (RegImm::Reg(src), RegImm::Reg(dst)) => { + self.asm.add_rr(src, dst, size); + } + _ => Self::handle_invalid_operand_combination(rhs, dst), + } } fn sub(&mut self, dst: RegImm, lhs: RegImm, rhs: RegImm, size: OperandSize) { - let (src, dst): (Operand, Operand) = if dst == lhs { - (rhs.into(), dst.into()) - } else { - panic!( - "the destination and first source argument must be the same, dst={:?}, lhs={:?}", - dst, lhs - ); - }; + Self::ensure_two_argument_form(&dst, &lhs); + match (rhs, dst) { + (RegImm::Imm(imm), RegImm::Reg(reg)) => { + if let Some(v) = imm.to_i32() { + self.asm.sub_ir(v, reg, size); + } else { + let scratch = regs::scratch(); + self.load_constant(&imm, scratch, size); + self.asm.sub_rr(scratch, reg, size); + } + } - self.asm.sub(src, dst, size); + (RegImm::Reg(src), RegImm::Reg(dst)) => { + self.asm.sub_rr(src, dst, size); + } + _ => Self::handle_invalid_operand_combination(rhs, dst), + } } fn mul(&mut self, dst: RegImm, lhs: RegImm, rhs: RegImm, size: OperandSize) { - let (src, dst): (Operand, Operand) = if dst == lhs { - (rhs.into(), dst.into()) - } else { - panic!( - "the destination and first source argument must be the same, dst={:?}, lhs={:?}", - dst, lhs - ); - }; + Self::ensure_two_argument_form(&dst, &lhs); + match (rhs, dst) { + (RegImm::Imm(imm), RegImm::Reg(reg)) => { + if let Some(v) = imm.to_i32() { + self.asm.mul_ir(v, reg, size); + } else { + let scratch = regs::scratch(); + self.load_constant(&imm, scratch, size); + self.asm.mul_rr(scratch, reg, size); + } + } - self.asm.mul(src, dst, size); + (RegImm::Reg(src), RegImm::Reg(dst)) => { + self.asm.mul_rr(src, dst, size); + } + _ => Self::handle_invalid_operand_combination(rhs, dst), + } } fn and(&mut self, dst: RegImm, lhs: RegImm, rhs: RegImm, size: OperandSize) { - let (src, dst): (Operand, Operand) = if dst == lhs { - (rhs.into(), dst.into()) - } else { - panic!( - "the destination and first source argument must be the same, dst={:?}, lhs={:?}", - dst, lhs - ); - }; + Self::ensure_two_argument_form(&dst, &lhs); + match (rhs, dst) { + (RegImm::Imm(imm), RegImm::Reg(reg)) => { + if let Some(v) = imm.to_i32() { + self.asm.and_ir(v, reg, size); + } else { + let scratch = regs::scratch(); + self.load_constant(&imm, scratch, size); + self.asm.and_rr(scratch, reg, size); + } + } - self.asm.and(src, dst, size); + (RegImm::Reg(src), RegImm::Reg(dst)) => { + self.asm.and_rr(src, dst, size); + } + _ => Self::handle_invalid_operand_combination(rhs, dst), + } } fn or(&mut self, dst: RegImm, lhs: RegImm, rhs: RegImm, size: OperandSize) { - let (src, dst): (Operand, Operand) = if dst == lhs { - (rhs.into(), dst.into()) - } else { - panic!( - "the destination and first source argument must be the same, dst={:?}, lhs={:?}", - dst, lhs - ); - }; + Self::ensure_two_argument_form(&dst, &lhs); + match (rhs, dst) { + (RegImm::Imm(imm), RegImm::Reg(reg)) => { + if let Some(v) = imm.to_i32() { + self.asm.or_ir(v, reg, size); + } else { + let scratch = regs::scratch(); + self.load_constant(&imm, scratch, size); + self.asm.or_rr(scratch, reg, size); + } + } - self.asm.or(src, dst, size); + (RegImm::Reg(src), RegImm::Reg(dst)) => { + self.asm.or_rr(src, dst, size); + } + _ => Self::handle_invalid_operand_combination(rhs, dst), + } } fn xor(&mut self, dst: RegImm, lhs: RegImm, rhs: RegImm, size: OperandSize) { - let (src, dst): (Operand, Operand) = if dst == lhs { - (rhs.into(), dst.into()) - } else { - panic!( - "the destination and first source argument must be the same, dst={:?}, lhs={:?}", - dst, lhs - ); - }; + Self::ensure_two_argument_form(&dst, &lhs); + match (rhs, dst) { + (RegImm::Imm(imm), RegImm::Reg(reg)) => { + if let Some(v) = imm.to_i32() { + self.asm.xor_ir(v, reg, size); + } else { + let scratch = regs::scratch(); + self.load_constant(&imm, scratch, size); + self.asm.xor_rr(scratch, reg, size); + } + } - self.asm.xor(src, dst, size); + (RegImm::Reg(src), RegImm::Reg(dst)) => { + self.asm.xor_rr(src, dst, size); + } + _ => Self::handle_invalid_operand_combination(rhs, dst), + } } fn shift(&mut self, context: &mut CodeGenContext, kind: ShiftKind, size: OperandSize) { @@ -341,9 +369,25 @@ impl Masm for MacroAssembler { Address::offset(reg, offset) } - fn cmp_with_set(&mut self, src: RegImm, dst: RegImm, kind: CmpKind, size: OperandSize) { - let dst = dst.into(); - self.asm.cmp(src.into(), dst, size); + fn cmp(&mut self, src: RegImm, dst: Reg, size: OperandSize) { + match src { + RegImm::Imm(imm) => { + if let Some(v) = imm.to_i32() { + self.asm.cmp_ir(v, dst, size); + } else { + let scratch = regs::scratch(); + self.load_constant(&imm, scratch, size); + self.asm.cmp_rr(scratch, dst, size); + } + } + RegImm::Reg(src) => { + self.asm.cmp_rr(src, dst, size); + } + } + } + + fn cmp_with_set(&mut self, src: RegImm, dst: Reg, kind: CmpKind, size: OperandSize) { + self.cmp(src, dst, size); self.asm.setcc(kind, dst); } @@ -412,10 +456,10 @@ impl Masm for MacroAssembler { if (kind == Eq || kind == Ne) && (rlhs == rrhs) { self.asm.test_rr(*rrhs, *rlhs, size); } else { - self.asm.cmp(lhs.into(), rhs.into(), size); + self.cmp(lhs, rhs.get_reg().unwrap(), size); } } - _ => self.asm.cmp(lhs.into(), rhs.into(), size), + _ => self.cmp(lhs, rhs.get_reg().unwrap(), size), } self.asm.jmp_if(kind, taken); } @@ -456,7 +500,8 @@ impl Masm for MacroAssembler { // x -= (x >> 1) & m1; self.asm.shift_ir(1u8, dst, ShiftKind::ShrU, size); - self.asm.and(RegImm::imm(masks[0]).into(), dst.into(), size); + let lhs = dst.into(); + self.and(lhs, lhs, RegImm::i64(masks[0]), size); self.asm.sub_rr(dst, tmp, size); // x = (x & m2) + ((x >> 2) & m2); @@ -464,20 +509,22 @@ impl Masm for MacroAssembler { // Load `0x3333...` into the scratch reg once, allowing us to use // `and_rr` and avoid inadvertently loading it twice as with `and` let scratch = regs::scratch(); - self.asm.load_constant(&masks[1], scratch, size); + self.load_constant(&I::i64(masks[1]), scratch, size); self.asm.and_rr(scratch, dst.into(), size); self.asm.shift_ir(2u8, tmp, ShiftKind::ShrU, size); self.asm.and_rr(scratch, tmp, size); self.asm.add_rr(dst, tmp, size); // x = (x + (x >> 4)) & m4; - self.asm.mov(tmp.into(), dst.into(), size); + self.asm.mov_rr(tmp.into(), dst.into(), size); self.asm.shift_ir(4u8, dst, ShiftKind::ShrU, size); self.asm.add_rr(tmp, dst, size); - self.asm.and(RegImm::imm(masks[2]).into(), dst.into(), size); + let lhs = dst.into(); + self.and(lhs, lhs, RegImm::i64(masks[2]), size); // (x * h01) >> shift_amt - self.asm.mul(RegImm::imm(masks[3]).into(), dst.into(), size); + let lhs = dst.into(); + self.mul(lhs, lhs, RegImm::i64(masks[3]), size); self.asm.shift_ir(shift_amt, dst, ShiftKind::ShrU, size); context.stack.push(Val::reg(dst)); @@ -513,4 +560,25 @@ impl MacroAssembler { ); self.sp_offset -= bytes; } + + fn load_constant(&mut self, constant: &I, dst: Reg, size: OperandSize) { + match constant { + I::I32(v) => self.asm.mov_ir(*v as u64, dst, size), + I::I64(v) => self.asm.mov_ir(*v, dst, size), + _ => panic!(), + } + } + + fn handle_invalid_operand_combination(src: RegImm, dst: RegImm) -> T { + panic!("Invalid operand combination; src={:?}, dst={:?}", src, dst); + } + + fn ensure_two_argument_form(dst: &RegImm, lhs: &RegImm) { + assert!( + dst == lhs, + "the destination and first source argument must be the same, dst={:?}, lhs={:?}", + dst, + lhs + ); + } } diff --git a/winch/codegen/src/masm.rs b/winch/codegen/src/masm.rs index 7bf009f57940..7561ae4413b3 100644 --- a/winch/codegen/src/masm.rs +++ b/winch/codegen/src/masm.rs @@ -97,8 +97,56 @@ impl OperandSize { pub(crate) enum RegImm { /// A register. Reg(Reg), - /// 64-bit signed immediate. - Imm(i64), + /// A tagged immediate argument. + Imm(Imm), +} + +/// An tagged representation of an immediate. +#[derive(Copy, Clone, Debug, PartialEq, Eq)] +pub(crate) enum Imm { + /// I32 immediate. + I32(u32), + /// I64 immediate. + I64(u64), + /// F32 immediate. + F32(u32), + /// F64 immediate. + F64(u64), +} + +impl Imm { + /// Create a new I64 immediate. + pub fn i64(val: i64) -> Self { + Self::I64(val as u64) + } + + /// Create a new I32 immediate. + pub fn i32(val: i32) -> Self { + Self::I32(val as u32) + } + + /// Create a new F32 immediate. + // Temporary until support for f32.const is added. + #[allow(dead_code)] + pub fn f32(bits: u32) -> Self { + Self::F32(bits) + } + + /// Create a new F64 immediate. + // Temporary until support for f64.const is added. + #[allow(dead_code)] + pub fn f64(bits: u64) -> Self { + Self::F64(bits) + } + + /// Convert the immediate to i32, if possible. + pub fn to_i32(&self) -> Option { + match self { + Self::I32(v) => Some(*v as i32), + Self::I64(v) => i32::try_from(*v as i64).ok(), + _ => None, + } + } } #[derive(Clone)] @@ -115,9 +163,37 @@ impl RegImm { RegImm::Reg(r) } - /// Immediate constructor. - pub fn imm(imm: i64) -> Self { - RegImm::Imm(imm) + /// I64 immediate constructor. + pub fn i64(val: i64) -> Self { + RegImm::Imm(Imm::i64(val)) + } + + /// I32 immediate constructor. + pub fn i32(val: i32) -> Self { + RegImm::Imm(Imm::i32(val)) + } + + /// F32 immediate, stored using its bits representation. + // Temporary until support for f32.const is added. + #[allow(dead_code)] + pub fn f32(bits: u32) -> Self { + RegImm::Imm(Imm::f32(bits)) + } + + /// F64 immediate, stored using its bits representation. + // Temporary until support for f64.const is added. + #[allow(dead_code)] + pub fn f64(bits: u64) -> Self { + RegImm::Imm(Imm::f64(bits)) + } + + /// Get the underlying register of the operand, + /// if it is one. + pub fn get_reg(&self) -> Option { + match self { + Self::Reg(r) => Some(*r), + _ => None, + } } } @@ -249,9 +325,12 @@ pub(crate) trait MacroAssembler { /// Calculate remainder. fn rem(&mut self, context: &mut CodeGenContext, kind: RemKind, size: OperandSize); + /// Compare src and dst and put the result in dst. + fn cmp(&mut self, src: RegImm, dest: Reg, size: OperandSize); + /// Compare src and dst and put the result in dst. /// This function will potentially emit a series of instructions. - fn cmp_with_set(&mut self, src: RegImm, dst: RegImm, kind: CmpKind, size: OperandSize); + fn cmp_with_set(&mut self, src: RegImm, dst: Reg, kind: CmpKind, size: OperandSize); /// Count the number of leading zeroes in src and put the result in dst. /// In x64, this will emit multiple instructions if the `has_lzcnt` flag is @@ -294,7 +373,7 @@ pub(crate) trait MacroAssembler { assert!(mem.start % 4 == 0); let start = align_to(mem.start, word_size); let addr: Self::Address = self.local_address(&LocalSlot::i32(start)); - self.store(RegImm::imm(0), addr, OperandSize::S32); + self.store(RegImm::i32(0), addr, OperandSize::S32); // Ensure that the new start of the range, is word-size aligned. assert!(start % word_size == 0); start @@ -306,7 +385,7 @@ pub(crate) trait MacroAssembler { if slots == 1 { let slot = LocalSlot::i64(start + word_size); let addr: Self::Address = self.local_address(&slot); - self.store(RegImm::imm(0), addr, OperandSize::S64); + self.store(RegImm::i64(0), addr, OperandSize::S64); } else { // TODO // Add an upper bound to this generation; diff --git a/winch/codegen/src/visitor.rs b/winch/codegen/src/visitor.rs index 27bf2028f19b..7e87e7f82bcf 100644 --- a/winch/codegen/src/visitor.rs +++ b/winch/codegen/src/visitor.rs @@ -303,7 +303,7 @@ where use OperandSize::*; self.context.unop(self.masm, S32, &mut |masm, reg, size| { - masm.cmp_with_set(RegImm::imm(0), reg.into(), CmpKind::Eq, size); + masm.cmp_with_set(RegImm::i64(0), reg.into(), CmpKind::Eq, size); }); } @@ -311,7 +311,7 @@ where use OperandSize::*; self.context.unop(self.masm, S64, &mut |masm, reg, size| { - masm.cmp_with_set(RegImm::imm(0), reg.into(), CmpKind::Eq, size); + masm.cmp_with_set(RegImm::i64(0), reg.into(), CmpKind::Eq, size); }); } @@ -633,14 +633,14 @@ where { fn cmp_i32s(&mut self, kind: CmpKind) { self.context.i32_binop(self.masm, |masm, dst, src, size| { - masm.cmp_with_set(src, dst, kind, size); + masm.cmp_with_set(src, dst.get_reg().unwrap(), kind, size); }); } fn cmp_i64s(&mut self, kind: CmpKind) { self.context .i64_binop(self.masm, move |masm, dst, src, size| { - masm.cmp_with_set(src, dst, kind, size); + masm.cmp_with_set(src, dst.get_reg().unwrap(), kind, size); }); } } diff --git a/winch/filetests/filetests/aarch64/i32_add/max_one.wat b/winch/filetests/filetests/aarch64/i32_add/max_one.wat index 52a46b16b489..2ac6afbcab92 100644 --- a/winch/filetests/filetests/aarch64/i32_add/max_one.wat +++ b/winch/filetests/filetests/aarch64/i32_add/max_one.wat @@ -13,9 +13,9 @@ ;; c: ff2300d1 sub sp, sp, #8 ;; 10: fc030091 mov x28, sp ;; 14: 890300f8 stur x9, [x28] -;; 18: f08361b2 orr x16, xzr, #0xffffffff80000000 +;; 18: 1000b0d2 mov x16, #0x80000000 ;; 1c: e003102a mov w0, w16 -;; 20: 10008092 mov x16, #-1 +;; 20: f07f40b2 orr x16, xzr, #0xffffffff ;; 24: 0060300b add w0, w0, w16, uxtx ;; 28: ff230091 add sp, sp, #8 ;; 2c: fc030091 mov x28, sp diff --git a/winch/filetests/filetests/aarch64/i32_add/mixed.wat b/winch/filetests/filetests/aarch64/i32_add/mixed.wat index d90d6a122c0f..e9819e450df9 100644 --- a/winch/filetests/filetests/aarch64/i32_add/mixed.wat +++ b/winch/filetests/filetests/aarch64/i32_add/mixed.wat @@ -13,7 +13,7 @@ ;; c: ff2300d1 sub sp, sp, #8 ;; 10: fc030091 mov x28, sp ;; 14: 890300f8 stur x9, [x28] -;; 18: 10008092 mov x16, #-1 +;; 18: f07f40b2 orr x16, xzr, #0xffffffff ;; 1c: e003102a mov w0, w16 ;; 20: 00040011 add w0, w0, #1 ;; 24: ff230091 add sp, sp, #8 diff --git a/winch/filetests/filetests/aarch64/i32_add/signed.wat b/winch/filetests/filetests/aarch64/i32_add/signed.wat index 8b19dd61b87b..3f92531adca8 100644 --- a/winch/filetests/filetests/aarch64/i32_add/signed.wat +++ b/winch/filetests/filetests/aarch64/i32_add/signed.wat @@ -13,9 +13,9 @@ ;; c: ff2300d1 sub sp, sp, #8 ;; 10: fc030091 mov x28, sp ;; 14: 890300f8 stur x9, [x28] -;; 18: 10008092 mov x16, #-1 +;; 18: f07f40b2 orr x16, xzr, #0xffffffff ;; 1c: e003102a mov w0, w16 -;; 20: 10008092 mov x16, #-1 +;; 20: f07f40b2 orr x16, xzr, #0xffffffff ;; 24: 0060300b add w0, w0, w16, uxtx ;; 28: ff230091 add sp, sp, #8 ;; 2c: fc030091 mov x28, sp diff --git a/winch/filetests/filetests/aarch64/i32_mul/max.wat b/winch/filetests/filetests/aarch64/i32_mul/max.wat index 443ca5c15a8d..28243041d81e 100644 --- a/winch/filetests/filetests/aarch64/i32_mul/max.wat +++ b/winch/filetests/filetests/aarch64/i32_mul/max.wat @@ -15,7 +15,7 @@ ;; 14: 890300f8 stur x9, [x28] ;; 18: f07b40b2 orr x16, xzr, #0x7fffffff ;; 1c: e003102a mov w0, w16 -;; 20: 10008092 mov x16, #-1 +;; 20: f07f40b2 orr x16, xzr, #0xffffffff ;; 24: 007c101b mul w0, w0, w16 ;; 28: ff230091 add sp, sp, #8 ;; 2c: fc030091 mov x28, sp diff --git a/winch/filetests/filetests/aarch64/i32_mul/max_one.wat b/winch/filetests/filetests/aarch64/i32_mul/max_one.wat index 2f9b9737ac73..e8c9316183bd 100644 --- a/winch/filetests/filetests/aarch64/i32_mul/max_one.wat +++ b/winch/filetests/filetests/aarch64/i32_mul/max_one.wat @@ -13,9 +13,9 @@ ;; c: ff2300d1 sub sp, sp, #8 ;; 10: fc030091 mov x28, sp ;; 14: 890300f8 stur x9, [x28] -;; 18: f08361b2 orr x16, xzr, #0xffffffff80000000 +;; 18: 1000b0d2 mov x16, #0x80000000 ;; 1c: e003102a mov w0, w16 -;; 20: 10008092 mov x16, #-1 +;; 20: f07f40b2 orr x16, xzr, #0xffffffff ;; 24: 007c101b mul w0, w0, w16 ;; 28: ff230091 add sp, sp, #8 ;; 2c: fc030091 mov x28, sp diff --git a/winch/filetests/filetests/aarch64/i32_mul/mixed.wat b/winch/filetests/filetests/aarch64/i32_mul/mixed.wat index f9d6f9b1f12f..dc6c1ea4a887 100644 --- a/winch/filetests/filetests/aarch64/i32_mul/mixed.wat +++ b/winch/filetests/filetests/aarch64/i32_mul/mixed.wat @@ -13,7 +13,7 @@ ;; c: ff2300d1 sub sp, sp, #8 ;; 10: fc030091 mov x28, sp ;; 14: 890300f8 stur x9, [x28] -;; 18: 10008092 mov x16, #-1 +;; 18: f07f40b2 orr x16, xzr, #0xffffffff ;; 1c: e003102a mov w0, w16 ;; 20: 300080d2 mov x16, #1 ;; 24: 007c101b mul w0, w0, w16 diff --git a/winch/filetests/filetests/aarch64/i32_mul/signed.wat b/winch/filetests/filetests/aarch64/i32_mul/signed.wat index d4e4d48fa631..1f1e1cff663b 100644 --- a/winch/filetests/filetests/aarch64/i32_mul/signed.wat +++ b/winch/filetests/filetests/aarch64/i32_mul/signed.wat @@ -13,9 +13,9 @@ ;; c: ff2300d1 sub sp, sp, #8 ;; 10: fc030091 mov x28, sp ;; 14: 890300f8 stur x9, [x28] -;; 18: 10008092 mov x16, #-1 +;; 18: f07f40b2 orr x16, xzr, #0xffffffff ;; 1c: e003102a mov w0, w16 -;; 20: 10008092 mov x16, #-1 +;; 20: f07f40b2 orr x16, xzr, #0xffffffff ;; 24: 007c101b mul w0, w0, w16 ;; 28: ff230091 add sp, sp, #8 ;; 2c: fc030091 mov x28, sp diff --git a/winch/filetests/filetests/aarch64/i32_sub/max.wat b/winch/filetests/filetests/aarch64/i32_sub/max.wat index 499b86911b3f..75e061ff5124 100644 --- a/winch/filetests/filetests/aarch64/i32_sub/max.wat +++ b/winch/filetests/filetests/aarch64/i32_sub/max.wat @@ -14,7 +14,7 @@ ;; 14: 890300f8 stur x9, [x28] ;; 18: f07b40b2 orr x16, xzr, #0x7fffffff ;; 1c: e003102a mov w0, w16 -;; 20: 10008092 mov x16, #-1 +;; 20: f07f40b2 orr x16, xzr, #0xffffffff ;; 24: 0060304b sub w0, w0, w16, uxtx ;; 28: ff230091 add sp, sp, #8 ;; 2c: fc030091 mov x28, sp diff --git a/winch/filetests/filetests/aarch64/i32_sub/max_one.wat b/winch/filetests/filetests/aarch64/i32_sub/max_one.wat index fdcfe6707890..fe9a1440c121 100644 --- a/winch/filetests/filetests/aarch64/i32_sub/max_one.wat +++ b/winch/filetests/filetests/aarch64/i32_sub/max_one.wat @@ -13,7 +13,7 @@ ;; c: ff2300d1 sub sp, sp, #8 ;; 10: fc030091 mov x28, sp ;; 14: 890300f8 stur x9, [x28] -;; 18: f08361b2 orr x16, xzr, #0xffffffff80000000 +;; 18: 1000b0d2 mov x16, #0x80000000 ;; 1c: e003102a mov w0, w16 ;; 20: 00040051 sub w0, w0, #1 ;; 24: ff230091 add sp, sp, #8 diff --git a/winch/filetests/filetests/aarch64/i32_sub/mixed.wat b/winch/filetests/filetests/aarch64/i32_sub/mixed.wat index 6482bed6e4ab..68f42a2ca013 100644 --- a/winch/filetests/filetests/aarch64/i32_sub/mixed.wat +++ b/winch/filetests/filetests/aarch64/i32_sub/mixed.wat @@ -13,7 +13,7 @@ ;; c: ff2300d1 sub sp, sp, #8 ;; 10: fc030091 mov x28, sp ;; 14: 890300f8 stur x9, [x28] -;; 18: 10008092 mov x16, #-1 +;; 18: f07f40b2 orr x16, xzr, #0xffffffff ;; 1c: e003102a mov w0, w16 ;; 20: 00040051 sub w0, w0, #1 ;; 24: ff230091 add sp, sp, #8 diff --git a/winch/filetests/filetests/aarch64/i32_sub/signed.wat b/winch/filetests/filetests/aarch64/i32_sub/signed.wat index b201255faddd..d54e87a526e8 100644 --- a/winch/filetests/filetests/aarch64/i32_sub/signed.wat +++ b/winch/filetests/filetests/aarch64/i32_sub/signed.wat @@ -13,9 +13,9 @@ ;; c: ff2300d1 sub sp, sp, #8 ;; 10: fc030091 mov x28, sp ;; 14: 890300f8 stur x9, [x28] -;; 18: 10008092 mov x16, #-1 +;; 18: f07f40b2 orr x16, xzr, #0xffffffff ;; 1c: e003102a mov w0, w16 -;; 20: 10008092 mov x16, #-1 +;; 20: f07f40b2 orr x16, xzr, #0xffffffff ;; 24: 0060304b sub w0, w0, w16, uxtx ;; 28: ff230091 add sp, sp, #8 ;; 2c: fc030091 mov x28, sp diff --git a/winch/filetests/filetests/x64/br/as_call_all.wat b/winch/filetests/filetests/x64/br/as_call_all.wat index 2cad4fb1051a..bf2305740f4b 100644 --- a/winch/filetests/filetests/x64/br/as_call_all.wat +++ b/winch/filetests/filetests/x64/br/as_call_all.wat @@ -12,10 +12,11 @@ ;; c: 89742410 mov dword ptr [rsp + 0x10], esi ;; 10: 8954240c mov dword ptr [rsp + 0xc], edx ;; 14: 4c89742404 mov qword ptr [rsp + 4], r14 -;; 19: 48c7c0ffffffff mov rax, 0xffffffffffffffff -;; 20: 4883c418 add rsp, 0x18 -;; 24: 5d pop rbp -;; 25: c3 ret +;; 19: 48b8ffffffff00000000 +;; movabs rax, 0xffffffff +;; 23: 4883c418 add rsp, 0x18 +;; 27: 5d pop rbp +;; 28: c3 ret ;; ;; 0: 55 push rbp ;; 1: 4889e5 mov rbp, rsp diff --git a/winch/filetests/filetests/x64/br/as_call_first.wat b/winch/filetests/filetests/x64/br/as_call_first.wat index 1fd377779dfe..083c2d5d0f00 100644 --- a/winch/filetests/filetests/x64/br/as_call_first.wat +++ b/winch/filetests/filetests/x64/br/as_call_first.wat @@ -15,10 +15,11 @@ ;; c: 89742410 mov dword ptr [rsp + 0x10], esi ;; 10: 8954240c mov dword ptr [rsp + 0xc], edx ;; 14: 4c89742404 mov qword ptr [rsp + 4], r14 -;; 19: 48c7c0ffffffff mov rax, 0xffffffffffffffff -;; 20: 4883c418 add rsp, 0x18 -;; 24: 5d pop rbp -;; 25: c3 ret +;; 19: 48b8ffffffff00000000 +;; movabs rax, 0xffffffff +;; 23: 4883c418 add rsp, 0x18 +;; 27: 5d pop rbp +;; 28: c3 ret ;; ;; 0: 55 push rbp ;; 1: 4889e5 mov rbp, rsp diff --git a/winch/filetests/filetests/x64/br/as_call_last.wat b/winch/filetests/filetests/x64/br/as_call_last.wat index 9ff34fcd3d3a..b1844f42dd4d 100644 --- a/winch/filetests/filetests/x64/br/as_call_last.wat +++ b/winch/filetests/filetests/x64/br/as_call_last.wat @@ -14,10 +14,11 @@ ;; c: 89742410 mov dword ptr [rsp + 0x10], esi ;; 10: 8954240c mov dword ptr [rsp + 0xc], edx ;; 14: 4c89742404 mov qword ptr [rsp + 4], r14 -;; 19: 48c7c0ffffffff mov rax, 0xffffffffffffffff -;; 20: 4883c418 add rsp, 0x18 -;; 24: 5d pop rbp -;; 25: c3 ret +;; 19: 48b8ffffffff00000000 +;; movabs rax, 0xffffffff +;; 23: 4883c418 add rsp, 0x18 +;; 27: 5d pop rbp +;; 28: c3 ret ;; ;; 0: 55 push rbp ;; 1: 4889e5 mov rbp, rsp diff --git a/winch/filetests/filetests/x64/br/as_call_mid.wat b/winch/filetests/filetests/x64/br/as_call_mid.wat index eb822c163459..cbd7093062d9 100644 --- a/winch/filetests/filetests/x64/br/as_call_mid.wat +++ b/winch/filetests/filetests/x64/br/as_call_mid.wat @@ -15,10 +15,11 @@ ;; c: 89742410 mov dword ptr [rsp + 0x10], esi ;; 10: 8954240c mov dword ptr [rsp + 0xc], edx ;; 14: 4c89742404 mov qword ptr [rsp + 4], r14 -;; 19: 48c7c0ffffffff mov rax, 0xffffffffffffffff -;; 20: 4883c418 add rsp, 0x18 -;; 24: 5d pop rbp -;; 25: c3 ret +;; 19: 48b8ffffffff00000000 +;; movabs rax, 0xffffffff +;; 23: 4883c418 add rsp, 0x18 +;; 27: 5d pop rbp +;; 28: c3 ret ;; ;; 0: 55 push rbp ;; 1: 4889e5 mov rbp, rsp diff --git a/winch/filetests/filetests/x64/br_if/as_call_first.wat b/winch/filetests/filetests/x64/br_if/as_call_first.wat index 958e1bdc40ef..a46a3fba7a0c 100644 --- a/winch/filetests/filetests/x64/br_if/as_call_first.wat +++ b/winch/filetests/filetests/x64/br_if/as_call_first.wat @@ -16,10 +16,11 @@ ;; c: 89742410 mov dword ptr [rsp + 0x10], esi ;; 10: 8954240c mov dword ptr [rsp + 0xc], edx ;; 14: 4c89742404 mov qword ptr [rsp + 4], r14 -;; 19: 48c7c0ffffffff mov rax, 0xffffffffffffffff -;; 20: 4883c418 add rsp, 0x18 -;; 24: 5d pop rbp -;; 25: c3 ret +;; 19: 48b8ffffffff00000000 +;; movabs rax, 0xffffffff +;; 23: 4883c418 add rsp, 0x18 +;; 27: 5d pop rbp +;; 28: c3 ret ;; ;; 0: 55 push rbp ;; 1: 4889e5 mov rbp, rsp diff --git a/winch/filetests/filetests/x64/br_if/as_call_last.wat b/winch/filetests/filetests/x64/br_if/as_call_last.wat index ad29110065e8..e618a29955eb 100644 --- a/winch/filetests/filetests/x64/br_if/as_call_last.wat +++ b/winch/filetests/filetests/x64/br_if/as_call_last.wat @@ -16,10 +16,11 @@ ;; c: 89742410 mov dword ptr [rsp + 0x10], esi ;; 10: 8954240c mov dword ptr [rsp + 0xc], edx ;; 14: 4c89742404 mov qword ptr [rsp + 4], r14 -;; 19: 48c7c0ffffffff mov rax, 0xffffffffffffffff -;; 20: 4883c418 add rsp, 0x18 -;; 24: 5d pop rbp -;; 25: c3 ret +;; 19: 48b8ffffffff00000000 +;; movabs rax, 0xffffffff +;; 23: 4883c418 add rsp, 0x18 +;; 27: 5d pop rbp +;; 28: c3 ret ;; ;; 0: 55 push rbp ;; 1: 4889e5 mov rbp, rsp diff --git a/winch/filetests/filetests/x64/br_if/as_call_mid.wat b/winch/filetests/filetests/x64/br_if/as_call_mid.wat index f922798fc80f..37362e35980e 100644 --- a/winch/filetests/filetests/x64/br_if/as_call_mid.wat +++ b/winch/filetests/filetests/x64/br_if/as_call_mid.wat @@ -16,10 +16,11 @@ ;; c: 89742410 mov dword ptr [rsp + 0x10], esi ;; 10: 8954240c mov dword ptr [rsp + 0xc], edx ;; 14: 4c89742404 mov qword ptr [rsp + 4], r14 -;; 19: 48c7c0ffffffff mov rax, 0xffffffffffffffff -;; 20: 4883c418 add rsp, 0x18 -;; 24: 5d pop rbp -;; 25: c3 ret +;; 19: 48b8ffffffff00000000 +;; movabs rax, 0xffffffff +;; 23: 4883c418 add rsp, 0x18 +;; 27: 5d pop rbp +;; 28: c3 ret ;; ;; 0: 55 push rbp ;; 1: 4889e5 mov rbp, rsp diff --git a/winch/filetests/filetests/x64/br_if/as_local_set_value.wat b/winch/filetests/filetests/x64/br_if/as_local_set_value.wat index e82836354687..bf8108f8a69e 100644 --- a/winch/filetests/filetests/x64/br_if/as_local_set_value.wat +++ b/winch/filetests/filetests/x64/br_if/as_local_set_value.wat @@ -16,9 +16,10 @@ ;; 10: 8b4c240c mov ecx, dword ptr [rsp + 0xc] ;; 14: 48c7c011000000 mov rax, 0x11 ;; 1b: 85c9 test ecx, ecx -;; 1d: 0f850b000000 jne 0x2e +;; 1d: 0f850e000000 jne 0x31 ;; 23: 8944240c mov dword ptr [rsp + 0xc], eax -;; 27: 48c7c0ffffffff mov rax, 0xffffffffffffffff -;; 2e: 4883c410 add rsp, 0x10 -;; 32: 5d pop rbp -;; 33: c3 ret +;; 27: 48b8ffffffff00000000 +;; movabs rax, 0xffffffff +;; 31: 4883c410 add rsp, 0x10 +;; 35: 5d pop rbp +;; 36: c3 ret diff --git a/winch/filetests/filetests/x64/if/as_binop.wat b/winch/filetests/filetests/x64/if/as_binop.wat index ae9008c0f4f1..e9d64094aaa2 100644 --- a/winch/filetests/filetests/x64/if/as_binop.wat +++ b/winch/filetests/filetests/x64/if/as_binop.wat @@ -34,25 +34,27 @@ ;; 1a: 0f8411000000 je 0x31 ;; 20: e800000000 call 0x25 ;; 25: 48c7c003000000 mov rax, 3 -;; 2c: e90c000000 jmp 0x3d +;; 2c: e90f000000 jmp 0x40 ;; 31: e800000000 call 0x36 -;; 36: 48c7c0fdffffff mov rax, 0xfffffffffffffffd -;; 3d: 8b4c2408 mov ecx, dword ptr [rsp + 8] -;; 41: 50 push rax -;; 42: 85c9 test ecx, ecx -;; 44: 0f8419000000 je 0x63 -;; 4a: 4883ec08 sub rsp, 8 -;; 4e: e800000000 call 0x53 -;; 53: 4883c408 add rsp, 8 -;; 57: 48c7c004000000 mov rax, 4 -;; 5e: e914000000 jmp 0x77 -;; 63: 4883ec08 sub rsp, 8 -;; 67: e800000000 call 0x6c -;; 6c: 4883c408 add rsp, 8 -;; 70: 48c7c0fbffffff mov rax, 0xfffffffffffffffb -;; 77: 59 pop rcx -;; 78: 0fafc8 imul ecx, eax -;; 7b: 4889c8 mov rax, rcx -;; 7e: 4883c410 add rsp, 0x10 -;; 82: 5d pop rbp -;; 83: c3 ret +;; 36: 48b8fdffffff00000000 +;; movabs rax, 0xfffffffd +;; 40: 8b4c2408 mov ecx, dword ptr [rsp + 8] +;; 44: 50 push rax +;; 45: 85c9 test ecx, ecx +;; 47: 0f8419000000 je 0x66 +;; 4d: 4883ec08 sub rsp, 8 +;; 51: e800000000 call 0x56 +;; 56: 4883c408 add rsp, 8 +;; 5a: 48c7c004000000 mov rax, 4 +;; 61: e917000000 jmp 0x7d +;; 66: 4883ec08 sub rsp, 8 +;; 6a: e800000000 call 0x6f +;; 6f: 4883c408 add rsp, 8 +;; 73: 48b8fbffffff00000000 +;; movabs rax, 0xfffffffb +;; 7d: 59 pop rcx +;; 7e: 0fafc8 imul ecx, eax +;; 81: 4889c8 mov rax, rcx +;; 84: 4883c410 add rsp, 0x10 +;; 88: 5d pop rbp +;; 89: c3 ret diff --git a/winch/filetests/filetests/x64/return/as_call_fist.wat b/winch/filetests/filetests/x64/return/as_call_fist.wat index d298ffcb66b7..d301d88d388d 100644 --- a/winch/filetests/filetests/x64/return/as_call_fist.wat +++ b/winch/filetests/filetests/x64/return/as_call_fist.wat @@ -13,10 +13,11 @@ ;; c: 89742410 mov dword ptr [rsp + 0x10], esi ;; 10: 8954240c mov dword ptr [rsp + 0xc], edx ;; 14: 4c89742404 mov qword ptr [rsp + 4], r14 -;; 19: 48c7c0ffffffff mov rax, 0xffffffffffffffff -;; 20: 4883c418 add rsp, 0x18 -;; 24: 5d pop rbp -;; 25: c3 ret +;; 19: 48b8ffffffff00000000 +;; movabs rax, 0xffffffff +;; 23: 4883c418 add rsp, 0x18 +;; 27: 5d pop rbp +;; 28: c3 ret ;; ;; 0: 55 push rbp ;; 1: 4889e5 mov rbp, rsp diff --git a/winch/filetests/filetests/x64/return/as_call_last.wat b/winch/filetests/filetests/x64/return/as_call_last.wat index aebfe4d00482..c0ed53182131 100644 --- a/winch/filetests/filetests/x64/return/as_call_last.wat +++ b/winch/filetests/filetests/x64/return/as_call_last.wat @@ -13,10 +13,11 @@ ;; c: 89742410 mov dword ptr [rsp + 0x10], esi ;; 10: 8954240c mov dword ptr [rsp + 0xc], edx ;; 14: 4c89742404 mov qword ptr [rsp + 4], r14 -;; 19: 48c7c0ffffffff mov rax, 0xffffffffffffffff -;; 20: 4883c418 add rsp, 0x18 -;; 24: 5d pop rbp -;; 25: c3 ret +;; 19: 48b8ffffffff00000000 +;; movabs rax, 0xffffffff +;; 23: 4883c418 add rsp, 0x18 +;; 27: 5d pop rbp +;; 28: c3 ret ;; ;; 0: 55 push rbp ;; 1: 4889e5 mov rbp, rsp diff --git a/winch/filetests/filetests/x64/return/as_call_mid.wat b/winch/filetests/filetests/x64/return/as_call_mid.wat index 2af82fed451b..0b0642ff6680 100644 --- a/winch/filetests/filetests/x64/return/as_call_mid.wat +++ b/winch/filetests/filetests/x64/return/as_call_mid.wat @@ -13,10 +13,11 @@ ;; c: 89742410 mov dword ptr [rsp + 0x10], esi ;; 10: 8954240c mov dword ptr [rsp + 0xc], edx ;; 14: 4c89742404 mov qword ptr [rsp + 4], r14 -;; 19: 48c7c0ffffffff mov rax, 0xffffffffffffffff -;; 20: 4883c418 add rsp, 0x18 -;; 24: 5d pop rbp -;; 25: c3 ret +;; 19: 48b8ffffffff00000000 +;; movabs rax, 0xffffffff +;; 23: 4883c418 add rsp, 0x18 +;; 27: 5d pop rbp +;; 28: c3 ret ;; ;; 0: 55 push rbp ;; 1: 4889e5 mov rbp, rsp