diff --git a/cranelift/codegen/src/ir/memflags.rs b/cranelift/codegen/src/ir/memflags.rs index 546e11fa3703..781e1da90f15 100644 --- a/cranelift/codegen/src/ir/memflags.rs +++ b/cranelift/codegen/src/ir/memflags.rs @@ -67,27 +67,25 @@ pub struct MemFlags { impl MemFlags { /// Create a new empty set of flags. - pub fn new() -> Self { + pub const fn new() -> Self { Self { bits: 0 } } /// Create a set of flags representing an access from a "trusted" address, meaning it's /// known to be aligned and non-trapping. - pub fn trusted() -> Self { - let mut result = Self::new(); - result.set_notrap(); - result.set_aligned(); - result + pub const fn trusted() -> Self { + Self::new().with_notrap().with_aligned() } /// Read a flag bit. - fn read(self, bit: FlagBit) -> bool { + const fn read(self, bit: FlagBit) -> bool { self.bits & (1 << bit as usize) != 0 } - /// Set a flag bit. - fn set(&mut self, bit: FlagBit) { - self.bits |= 1 << bit as usize + /// Return a new `MemFlags` with this flag bit set. + const fn with(mut self, bit: FlagBit) -> Self { + self.bits |= 1 << bit as usize; + self } /// Set a flag bit by name. @@ -116,7 +114,7 @@ impl MemFlags { /// endianness otherwise. The native endianness has to be provided by the /// caller since it is not explicitly encoded in CLIF IR -- this allows a /// front end to create IR without having to know the target endianness. - pub fn endianness(self, native_endianness: Endianness) -> Endianness { + pub const fn endianness(self, native_endianness: Endianness) -> Endianness { if self.read(FlagBit::LittleEndian) { Endianness::Little } else if self.read(FlagBit::BigEndian) { @@ -128,17 +126,16 @@ impl MemFlags { /// Set endianness of the memory access. pub fn set_endianness(&mut self, endianness: Endianness) { - match endianness { - Endianness::Little => self.set(FlagBit::LittleEndian), - Endianness::Big => self.set(FlagBit::BigEndian), - }; + *self = self.with_endianness(endianness); assert!(!(self.read(FlagBit::LittleEndian) && self.read(FlagBit::BigEndian))); } /// Set endianness of the memory access, returning new flags. - pub fn with_endianness(mut self, endianness: Endianness) -> Self { - self.set_endianness(endianness); - self + pub const fn with_endianness(self, endianness: Endianness) -> Self { + match endianness { + Endianness::Little => self.with(FlagBit::LittleEndian), + Endianness::Big => self.with(FlagBit::BigEndian), + } } /// Test if the `notrap` flag is set. @@ -150,19 +147,18 @@ impl MemFlags { /// The `notrap` flag tells Cranelift that the memory is *accessible*, which means that /// accesses will not trap. This makes it possible to delete an unused load or a dead store /// instruction. - pub fn notrap(self) -> bool { + pub const fn notrap(self) -> bool { self.read(FlagBit::Notrap) } /// Set the `notrap` flag. pub fn set_notrap(&mut self) { - self.set(FlagBit::Notrap) + *self = self.with_notrap(); } /// Set the `notrap` flag, returning new flags. - pub fn with_notrap(mut self) -> Self { - self.set_notrap(); - self + pub const fn with_notrap(self) -> Self { + self.with(FlagBit::Notrap) } /// Test if the `aligned` flag is set. @@ -170,19 +166,18 @@ impl MemFlags { /// By default, Cranelift memory instructions work with any unaligned effective address. If the /// `aligned` flag is set, the instruction is permitted to trap or return a wrong result if the /// effective address is misaligned. - pub fn aligned(self) -> bool { + pub const fn aligned(self) -> bool { self.read(FlagBit::Aligned) } /// Set the `aligned` flag. pub fn set_aligned(&mut self) { - self.set(FlagBit::Aligned) + *self = self.with_aligned(); } /// Set the `aligned` flag, returning new flags. - pub fn with_aligned(mut self) -> Self { - self.set_aligned(); - self + pub const fn with_aligned(self) -> Self { + self.with(FlagBit::Aligned) } /// Test if the `readonly` flag is set. @@ -190,19 +185,18 @@ impl MemFlags { /// Loads with this flag have no memory dependencies. /// This results in undefined behavior if the dereferenced memory is mutated at any time /// between when the function is called and when it is exited. - pub fn readonly(self) -> bool { + pub const fn readonly(self) -> bool { self.read(FlagBit::Readonly) } /// Set the `readonly` flag. pub fn set_readonly(&mut self) { - self.set(FlagBit::Readonly) + *self = self.with_readonly(); } /// Set the `readonly` flag, returning new flags. - pub fn with_readonly(mut self) -> Self { - self.set_readonly(); - self + pub const fn with_readonly(self) -> Self { + self.with(FlagBit::Readonly) } /// Test if the `heap` bit is set. @@ -213,7 +207,7 @@ impl MemFlags { /// means that behavior is undefined if the same memory is also /// accessed by another load/store with one of the other /// alias-analysis bits (`table`, `vmctx`) set, or `heap` not set. - pub fn heap(self) -> bool { + pub const fn heap(self) -> bool { self.read(FlagBit::Heap) } @@ -221,13 +215,12 @@ impl MemFlags { /// other bits in `heap()`. pub fn set_heap(&mut self) { assert!(!self.table() && !self.vmctx()); - self.set(FlagBit::Heap); + *self = self.with_heap(); } /// Set the `heap` bit, returning new flags. - pub fn with_heap(mut self) -> Self { - self.set_heap(); - self + pub const fn with_heap(self) -> Self { + self.with(FlagBit::Heap) } /// Test if the `table` bit is set. @@ -238,7 +231,7 @@ impl MemFlags { /// means that behavior is undefined if the same memory is also /// accessed by another load/store with one of the other /// alias-analysis bits (`heap`, `vmctx`) set, or `table` not set. - pub fn table(self) -> bool { + pub const fn table(self) -> bool { self.read(FlagBit::Table) } @@ -246,13 +239,12 @@ impl MemFlags { /// other bits in `table()`. pub fn set_table(&mut self) { assert!(!self.heap() && !self.vmctx()); - self.set(FlagBit::Table); + *self = self.with_table(); } /// Set the `table` bit, returning new flags. - pub fn with_table(mut self) -> Self { - self.set_table(); - self + pub const fn with_table(self) -> Self { + self.with(FlagBit::Table) } /// Test if the `vmctx` bit is set. @@ -263,7 +255,7 @@ impl MemFlags { /// means that behavior is undefined if the same memory is also /// accessed by another load/store with one of the other /// alias-analysis bits (`heap`, `table`) set, or `vmctx` not set. - pub fn vmctx(self) -> bool { + pub const fn vmctx(self) -> bool { self.read(FlagBit::Vmctx) } @@ -271,13 +263,12 @@ impl MemFlags { /// other bits in `vmctx()`. pub fn set_vmctx(&mut self) { assert!(!self.heap() && !self.table()); - self.set(FlagBit::Vmctx); + *self = self.with_vmctx(); } /// Set the `vmctx` bit, returning new flags. - pub fn with_vmctx(mut self) -> Self { - self.set_vmctx(); - self + pub const fn with_vmctx(self) -> Self { + self.with(FlagBit::Vmctx) } /// Test if the `checked` bit is set. @@ -291,19 +282,18 @@ impl MemFlags { /// `checked`-marked memory accesses are guaranteed (up to the /// checker's correctness) to access valid memory. This can be /// used to ensure memory safety and sandboxing. - pub fn checked(self) -> bool { + pub const fn checked(self) -> bool { self.read(FlagBit::Checked) } /// Set the `checked` bit. pub fn set_checked(&mut self) { - self.set(FlagBit::Checked); + *self = self.with_checked(); } /// Set the `checked` bit, returning new flags. - pub fn with_checked(mut self) -> Self { - self.set_checked(); - self + pub const fn with_checked(self) -> Self { + self.with(FlagBit::Checked) } } diff --git a/winch/codegen/src/isa/x64/masm.rs b/winch/codegen/src/isa/x64/masm.rs index 673c622151b1..eddd4739609f 100644 --- a/winch/codegen/src/isa/x64/masm.rs +++ b/winch/codegen/src/isa/x64/masm.rs @@ -7,7 +7,7 @@ use super::{ use crate::masm::{ DivKind, ExtendKind, FloatCmpKind, Imm as I, IntCmpKind, MacroAssembler as Masm, OperandSize, - RegImm, RemKind, RoundingMode, ShiftKind, TrapCode, + RegImm, RemKind, RoundingMode, ShiftKind, TrapCode, TRUSTED_FLAGS, UNTRUSTED_FLAGS, }; use crate::{ abi::ABI, @@ -24,7 +24,7 @@ use crate::{ masm::CalleeKind, }; use cranelift_codegen::{ - ir::{Endianness, MemFlags}, + ir::MemFlags, isa::unwind::UnwindInst, isa::x64::{ args::{ExtMode, CC}, @@ -47,10 +47,6 @@ pub(crate) struct MacroAssembler { shared_flags: settings::Flags, /// The target pointer size. ptr_size: OperandSize, - /// Flags for native loads/stores. - trusted_flags: MemFlags, - /// Flags for Wasm loads/stores. - untrusted_flags: MemFlags, } impl Masm for MacroAssembler { @@ -114,24 +110,16 @@ impl Masm for MacroAssembler { let bytes = size.bytes(); self.reserve_stack(bytes); let sp_offset = SPOffset::from_u32(self.sp_offset); - self.asm.mov_rm( - reg, - &self.address_from_sp(sp_offset), - size, - self.trusted_flags, - ); + self.asm + .mov_rm(reg, &self.address_from_sp(sp_offset), size, TRUSTED_FLAGS); bytes } (RegClass::Float, _) => { let bytes = size.bytes(); self.reserve_stack(bytes); let sp_offset = SPOffset::from_u32(self.sp_offset); - self.asm.xmm_mov_rm( - reg, - &self.address_from_sp(sp_offset), - size, - self.trusted_flags, - ); + self.asm + .xmm_mov_rm(reg, &self.address_from_sp(sp_offset), size, TRUSTED_FLAGS); bytes } _ => unreachable!(), @@ -212,7 +200,7 @@ impl Masm for MacroAssembler { &self.address_at_vmctx(offset), ptr_base, self.ptr_size.into(), - self.trusted_flags, + TRUSTED_FLAGS, ); } else { // Else, simply move the vmctx register into the addr register as @@ -224,7 +212,7 @@ impl Masm for MacroAssembler { let bound_addr = self.address_at_reg(ptr_base, table_data.current_elems_offset); let bound_size = table_data.current_elements_size; self.asm - .movzx_mr(&bound_addr, bound, bound_size.into(), self.trusted_flags); + .movzx_mr(&bound_addr, bound, bound_size.into(), TRUSTED_FLAGS); self.asm.cmp_rr(bound, index, bound_size); self.asm.trapif(IntCmpKind::GeU, TrapCode::TableOutOfBounds); @@ -242,7 +230,7 @@ impl Masm for MacroAssembler { &self.address_at_reg(ptr_base, table_data.offset), ptr_base, self.ptr_size.into(), - self.trusted_flags, + TRUSTED_FLAGS, ); // Copy the value of the table base into a temporary register // so that we can use it later in case of a misspeculation. @@ -270,7 +258,7 @@ impl Masm for MacroAssembler { &self.address_at_vmctx(offset), scratch, self.ptr_size.into(), - self.trusted_flags, + TRUSTED_FLAGS, ); } else { self.asm.mov_rr(vmctx, scratch, self.ptr_size); @@ -281,7 +269,7 @@ impl Masm for MacroAssembler { &size_addr, size, table_data.current_elements_size.into(), - self.trusted_flags, + TRUSTED_FLAGS, ); context.stack.push(TypedReg::i32(size).into()); @@ -297,7 +285,7 @@ impl Masm for MacroAssembler { &self.address_at_vmctx(offset), scratch, self.ptr_size.into(), - self.trusted_flags, + TRUSTED_FLAGS, ); scratch } else { @@ -305,12 +293,8 @@ impl Masm for MacroAssembler { }; let size_addr = Address::offset(base, heap_data.current_length_offset); - self.asm.movzx_mr( - &size_addr, - size_reg, - self.ptr_size.into(), - self.trusted_flags, - ); + self.asm + .movzx_mr(&size_addr, size_reg, self.ptr_size.into(), TRUSTED_FLAGS); // Prepare the stack to emit a shift to get the size in pages rather // than in bytes. context @@ -351,11 +335,11 @@ impl Masm for MacroAssembler { } fn store(&mut self, src: RegImm, dst: Address, size: OperandSize) { - self.store_impl(src, dst, size, self.trusted_flags); + self.store_impl(src, dst, size, TRUSTED_FLAGS); } fn wasm_store(&mut self, src: Reg, dst: Self::Address, size: OperandSize) { - self.store_impl(src.into(), dst, size, self.untrusted_flags); + self.store_impl(src.into(), dst, size, UNTRUSTED_FLAGS); } fn pop(&mut self, dst: Reg, size: OperandSize) { @@ -363,8 +347,7 @@ impl Masm for MacroAssembler { match (dst.class(), size) { (RegClass::Int, OperandSize::S32) => { let addr = self.address_from_sp(current_sp); - self.asm - .movzx_mr(&addr, dst, size.into(), self.trusted_flags); + self.asm.movzx_mr(&addr, dst, size.into(), TRUSTED_FLAGS); self.free_stack(size.bytes()); } (RegClass::Int, OperandSize::S64) => { @@ -373,7 +356,7 @@ impl Masm for MacroAssembler { } (RegClass::Float, _) => { let addr = self.address_from_sp(current_sp); - self.asm.xmm_mov_mr(&addr, dst, size, self.trusted_flags); + self.asm.xmm_mov_mr(&addr, dst, size, TRUSTED_FLAGS); self.free_stack(size.bytes()); } _ => unreachable!(), @@ -409,7 +392,7 @@ impl Masm for MacroAssembler { } fn load(&mut self, src: Address, dst: Reg, size: OperandSize) { - self.load_impl::(src, dst, size, self.trusted_flags); + self.load_impl::(src, dst, size, TRUSTED_FLAGS); } fn wasm_load( @@ -420,9 +403,9 @@ impl Masm for MacroAssembler { kind: Option, ) { if let Some(ext) = kind { - self.asm.movsx_mr(&src, dst, ext, self.untrusted_flags); + self.asm.movsx_mr(&src, dst, ext, UNTRUSTED_FLAGS); } else { - self.load_impl::(src, dst, size, self.untrusted_flags) + self.load_impl::(src, dst, size, UNTRUSTED_FLAGS) } } @@ -446,11 +429,11 @@ impl Masm for MacroAssembler { I::I64(v) => self.asm.mov_ir(v, dst, size), I::F32(v) => { let addr = self.asm.add_constant(v.to_le_bytes().as_slice()); - self.asm.xmm_mov_mr(&addr, dst, size, self.trusted_flags); + self.asm.xmm_mov_mr(&addr, dst, size, TRUSTED_FLAGS); } I::F64(v) => { let addr = self.asm.add_constant(v.to_le_bytes().as_slice()); - self.asm.xmm_mov_mr(&addr, dst, size, self.trusted_flags); + self.asm.xmm_mov_mr(&addr, dst, size, TRUSTED_FLAGS); } }, } @@ -1152,11 +1135,6 @@ impl MacroAssembler { isa_flags: x64_settings::Flags, ) -> Self { let ptr_type: WasmValType = ptr_type_from_ptr_size(ptr_size.size()).into(); - // Flags used for WebAssembly loads / stores. - // Untrusted by default so we don't set `no_trap`. - // We also ensure that the endianess is the right one for WebAssembly. - let mut untrusted_flags = MemFlags::new(); - untrusted_flags.set_endianness(Endianness::Little); Self { sp_offset: 0, @@ -1164,8 +1142,6 @@ impl MacroAssembler { flags: isa_flags, shared_flags, ptr_size: ptr_type.into(), - trusted_flags: MemFlags::trusted(), - untrusted_flags, } } diff --git a/winch/codegen/src/masm.rs b/winch/codegen/src/masm.rs index 0e7339fe3fb7..54b1792c5425 100644 --- a/winch/codegen/src/masm.rs +++ b/winch/codegen/src/masm.rs @@ -1,7 +1,10 @@ use crate::abi::{self, align_to, LocalSlot}; use crate::codegen::{CodeGenContext, HeapData, TableData}; use crate::isa::reg::Reg; -use cranelift_codegen::{ir::LibCall, Final, MachBufferFinalized, MachLabel}; +use cranelift_codegen::{ + ir::{Endianness, LibCall, MemFlags}, + Final, MachBufferFinalized, MachLabel, +}; use std::{fmt::Debug, ops::Range}; use wasmtime_environ::PtrSize; @@ -336,6 +339,14 @@ pub enum RoundingMode { Zero, } +/// Memory flags for trusted loads/stores. +pub const TRUSTED_FLAGS: MemFlags = MemFlags::trusted(); + +/// Flags used for WebAssembly loads / stores. +/// Untrusted by default so we don't set `no_trap`. +/// We also ensure that the endianess is the right one for WebAssembly. +pub const UNTRUSTED_FLAGS: MemFlags = MemFlags::new().with_endianness(Endianness::Little); + /// Generic MacroAssembler interface used by the code generation. /// /// The MacroAssembler trait aims to expose an interface, high-level enough,