diff --git a/compiler/rustc_codegen_cranelift/src/global_asm.rs b/compiler/rustc_codegen_cranelift/src/global_asm.rs index 8d8cdb14dbc6b..ce7e4752c9bde 100644 --- a/compiler/rustc_codegen_cranelift/src/global_asm.rs +++ b/compiler/rustc_codegen_cranelift/src/global_asm.rs @@ -1,6 +1,7 @@ //! The AOT driver uses [`cranelift_object`] to write object files suitable for linking into a //! standalone executable. +use std::fmt::Write as _; use std::io::Write; use std::path::PathBuf; use std::process::{Command, Stdio}; @@ -8,6 +9,7 @@ use std::sync::Arc; use rustc_ast::{InlineAsmOptions, InlineAsmTemplatePiece}; use rustc_codegen_ssa::traits::{AsmCodegenMethods, GlobalAsmOperandRef}; +use rustc_middle::mir::interpret::{GlobalAlloc, PointerArithmetic, Scalar as ConstScalar}; use rustc_middle::ty::TyCtxt; use rustc_middle::ty::layout::{ FnAbiError, FnAbiOfHelpers, FnAbiRequest, HasTyCtxt, HasTypingEnv, LayoutError, LayoutOfHelpers, @@ -107,21 +109,50 @@ fn codegen_global_asm_inner<'tcx>( InlineAsmTemplatePiece::String(ref s) => global_asm.push_str(s), InlineAsmTemplatePiece::Placeholder { operand_idx, modifier: _, span } => { match operands[operand_idx] { - GlobalAsmOperandRef::Const { ref string } => { - global_asm.push_str(string); - } - GlobalAsmOperandRef::SymFn { instance } => { - if cfg!(not(feature = "inline_asm_sym")) { - tcx.dcx().span_err( - span, - "asm! and global_asm! sym operands are not yet supported", - ); - } + GlobalAsmOperandRef::Const { value, ty, instance: _ } => { + match value { + ConstScalar::Int(int) => { + let string = rustc_codegen_ssa::common::asm_const_to_str( + tcx, + int, + FullyMonomorphizedLayoutCx(tcx).layout_of(ty), + ); + global_asm.push_str(&string); + } - let symbol = tcx.symbol_name(instance); - // FIXME handle the case where the function was made private to the - // current codegen unit - global_asm.push_str(symbol.name); + ConstScalar::Ptr(ptr, _) => { + let (prov, offset) = ptr.prov_and_relative_offset(); + let global_alloc = tcx.global_alloc(prov.alloc_id()); + let symbol_name = match global_alloc { + GlobalAlloc::Function { instance } => { + if cfg!(not(feature = "inline_asm_sym")) { + tcx.dcx().span_err( + span, + "asm! and global_asm! sym operands are not yet supported", + ); + } + + // FIXME handle the case where the function was made private to the + // current codegen unit + tcx.symbol_name(instance) + } + GlobalAlloc::Static(def_id) => { + let instance = Instance::mono(tcx, def_id); + tcx.symbol_name(instance) + } + GlobalAlloc::Memory(_) + | GlobalAlloc::VTable(..) + | GlobalAlloc::TypeId { .. } => unreachable!(), + }; + + global_asm.push_str(symbol_name.name); + + if offset != Size::ZERO { + let offset = tcx.sign_extend_to_target_isize(offset.bytes()); + write!(global_asm, "{offset:+}").unwrap(); + } + } + } } GlobalAsmOperandRef::SymStatic { def_id } => { if cfg!(not(feature = "inline_asm_sym")) { @@ -130,7 +161,6 @@ fn codegen_global_asm_inner<'tcx>( "asm! and global_asm! sym operands are not yet supported", ); } - let instance = Instance::mono(tcx, def_id); let symbol = tcx.symbol_name(instance); global_asm.push_str(symbol.name); diff --git a/compiler/rustc_codegen_cranelift/src/inline_asm.rs b/compiler/rustc_codegen_cranelift/src/inline_asm.rs index 08cabe9d695c3..b1cdd4f1ca59c 100644 --- a/compiler/rustc_codegen_cranelift/src/inline_asm.rs +++ b/compiler/rustc_codegen_cranelift/src/inline_asm.rs @@ -94,10 +94,17 @@ pub(crate) fn codegen_inline_asm_terminator<'tcx>( } InlineAsmOperand::Const { ref value } => { let (const_value, ty) = crate::constant::eval_mir_constant(fx, value); + let mir::ConstValue::Scalar(scalar) = const_value else { + span_bug!( + span, + "expected Scalar for promoted asm const, but got {:#?}", + const_value + ) + }; + let value = rustc_codegen_ssa::common::asm_const_to_str( fx.tcx, - span, - const_value, + scalar.assert_scalar_int(), fx.layout_of(ty), ); CInlineAsmOperand::Const { value } diff --git a/compiler/rustc_codegen_gcc/src/asm.rs b/compiler/rustc_codegen_gcc/src/asm.rs index ceb3dd3ffedfc..a5e2bc0fde158 100644 --- a/compiler/rustc_codegen_gcc/src/asm.rs +++ b/compiler/rustc_codegen_gcc/src/asm.rs @@ -1,17 +1,21 @@ // cSpell:ignoreRegExp [afkspqvwy]reg use std::borrow::Cow; +use std::fmt::Write; use gccjit::{LValue, RValue, ToRValue, Type}; +use rustc_abi::Size; use rustc_ast::ast::{InlineAsmOptions, InlineAsmTemplatePiece}; use rustc_codegen_ssa::mir::operand::OperandValue; use rustc_codegen_ssa::mir::place::PlaceRef; use rustc_codegen_ssa::traits::{ AsmBuilderMethods, AsmCodegenMethods, BaseTypeCodegenMethods, BuilderMethods, - GlobalAsmOperandRef, InlineAsmOperandRef, + ConstCodegenMethods, GlobalAsmOperandRef, InlineAsmOperandRef, }; use rustc_middle::bug; +use rustc_middle::mir::interpret::{GlobalAlloc, PointerArithmetic, Scalar}; use rustc_middle::ty::Instance; +use rustc_middle::ty::layout::LayoutOf; use rustc_span::Span; use rustc_target::asm::*; @@ -143,6 +147,9 @@ impl<'a, 'gcc, 'tcx> AsmBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tcx> { // Clobbers collected from `out("explicit register") _` and `inout("explicit_reg") var => _` let mut clobbers = vec![]; + // Symbols name that needs to be inserted to asm const ptr template string. + let mut const_syms = vec![]; + // We're trying to preallocate space for the template let mut constants_len = 0; @@ -303,16 +310,11 @@ impl<'a, 'gcc, 'tcx> AsmBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tcx> { } } - InlineAsmOperandRef::Const { ref string } => { - constants_len += string.len() + att_dialect as usize; + InlineAsmOperandRef::Const { .. } => { + // We don't know the size at this point, just some estimate. + constants_len += 20; } - InlineAsmOperandRef::SymFn { instance } => { - // TODO(@Amanieu): Additional mangling is needed on - // some targets to add a leading underscore (Mach-O) - // or byte count suffixes (x86 Windows). - constants_len += self.tcx.symbol_name(instance).name.len(); - } InlineAsmOperandRef::SymStatic { def_id } => { // TODO(@Amanieu): Additional mangling is needed on // some targets to add a leading underscore (Mach-O). @@ -402,24 +404,22 @@ impl<'a, 'gcc, 'tcx> AsmBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tcx> { // processed in the previous pass } - InlineAsmOperandRef::SymFn { instance } => { - inputs.push(AsmInOperand { - constraint: "X".into(), - rust_idx, - val: get_fn(self.cx, instance).get_address(None), - }); - } + InlineAsmOperandRef::Const { value, ty: _, instance } => match value { + Scalar::Int(_) => (), + Scalar::Ptr(ptr, _) => { + let (prov, _) = ptr.prov_and_relative_offset(); + let global_alloc = self.tcx.global_alloc(prov.alloc_id()); + let (val, sym) = self.cx.alloc_to_backend(global_alloc, instance).unwrap(); + const_syms.push(sym.unwrap()); + inputs.push(AsmInOperand { constraint: "X".into(), rust_idx, val }); + } + }, InlineAsmOperandRef::SymStatic { def_id } => { - inputs.push(AsmInOperand { - constraint: "X".into(), - rust_idx, - val: self.cx.get_static(def_id).get_address(None), - }); - } - - InlineAsmOperandRef::Const { .. } => { - // processed in the previous pass + // TODO(@Amanieu): Additional mangling is needed on + // some targets to add a leading underscore (MachO). + constants_len += + self.tcx.symbol_name(Instance::mono(self.tcx, def_id)).name.len(); } InlineAsmOperandRef::Label { .. } => { @@ -495,12 +495,33 @@ impl<'a, 'gcc, 'tcx> AsmBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tcx> { push_to_template(modifier, gcc_index); } - InlineAsmOperandRef::SymFn { instance } => { - // TODO(@Amanieu): Additional mangling is needed on - // some targets to add a leading underscore (Mach-O) - // or byte count suffixes (x86 Windows). - let name = self.tcx.symbol_name(instance).name; - template_str.push_str(name); + InlineAsmOperandRef::Const { value, ty, instance: _ } => { + match value { + Scalar::Int(int) => { + // Const operands get injected directly into the template + let string = rustc_codegen_ssa::common::asm_const_to_str( + self.tcx, + int, + self.layout_of(ty), + ); + template_str.push_str(&string); + } + + Scalar::Ptr(ptr, _) => { + let (_, offset) = ptr.prov_and_relative_offset(); + let instance = const_syms.remove(0); + // TODO(@Amanieu): Additional mangling is needed on + // some targets to add a leading underscore (Mach-O) + // or byte count suffixes (x86 Windows). + template_str.push_str(self.tcx.symbol_name(instance).name); + + if offset != Size::ZERO { + let offset = + self.sign_extend_to_target_isize(offset.bytes()); + write!(template_str, "{offset:+}").unwrap(); + } + } + } } InlineAsmOperandRef::SymStatic { def_id } => { @@ -511,10 +532,6 @@ impl<'a, 'gcc, 'tcx> AsmBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tcx> { template_str.push_str(name); } - InlineAsmOperandRef::Const { ref string } => { - template_str.push_str(string); - } - InlineAsmOperandRef::Label { label } => { let label_gcc_index = labels.iter().position(|&l| l == label).expect("wrong rust index"); @@ -893,23 +910,52 @@ impl<'gcc, 'tcx> AsmCodegenMethods<'tcx> for CodegenCx<'gcc, 'tcx> { } InlineAsmTemplatePiece::Placeholder { operand_idx, modifier: _, span: _ } => { match operands[operand_idx] { - GlobalAsmOperandRef::Const { ref string } => { - // Const operands get injected directly into the - // template. Note that we don't need to escape % - // here unlike normal inline assembly. - template_str.push_str(string); - } + GlobalAsmOperandRef::Const { value, ty, instance } => { + match value { + Scalar::Int(int) => { + // Const operands get injected directly into the + // template. Note that we don't need to escape % + // here unlike normal inline assembly. + let string = rustc_codegen_ssa::common::asm_const_to_str( + self.tcx, + int, + self.layout_of(ty), + ); + template_str.push_str(&string); + } - GlobalAsmOperandRef::SymFn { instance } => { - let function = get_fn(self, instance); - self.add_used_function(function); - // TODO(@Amanieu): Additional mangling is needed on - // some targets to add a leading underscore (Mach-O) - // or byte count suffixes (x86 Windows). - let name = self.tcx.symbol_name(instance).name; - template_str.push_str(name); + Scalar::Ptr(ptr, _) => { + let (prov, offset) = ptr.prov_and_relative_offset(); + let global_alloc = self.tcx.global_alloc(prov.alloc_id()); + let symbol_name = match global_alloc { + GlobalAlloc::Function { instance } => { + let function = get_fn(self, instance); + self.add_used_function(function); + // TODO(@Amanieu): Additional mangling is needed on + // some targets to add a leading underscore (Mach-O) + // or byte count suffixes (x86 Windows). + self.tcx.symbol_name(instance) + } + _ => { + let (_, syms) = self + .alloc_to_backend(global_alloc, instance) + .unwrap(); + // TODO(antoyo): set the global variable as used. + // TODO(@Amanieu): Additional mangling is needed on + // some targets to add a leading underscore (Mach-O). + self.tcx.symbol_name(syms.unwrap()) + } + }; + template_str.push_str(symbol_name.name); + + if offset != Size::ZERO { + let offset = + self.sign_extend_to_target_isize(offset.bytes()); + write!(template_str, "{offset:+}").unwrap(); + } + } + } } - GlobalAsmOperandRef::SymStatic { def_id } => { // TODO(antoyo): set the global variable as used. // TODO(@Amanieu): Additional mangling is needed on diff --git a/compiler/rustc_codegen_gcc/src/common.rs b/compiler/rustc_codegen_gcc/src/common.rs index 7c2969e587186..448c2af7eac9a 100644 --- a/compiler/rustc_codegen_gcc/src/common.rs +++ b/compiler/rustc_codegen_gcc/src/common.rs @@ -1,4 +1,4 @@ -use gccjit::{LValue, RValue, ToRValue, Type}; +use gccjit::{GlobalKind, LValue, RValue, ToRValue, Type}; use rustc_abi::Primitive::Pointer; use rustc_abi::{self as abi, HasDataLayout}; use rustc_codegen_ssa::traits::{ @@ -6,6 +6,7 @@ use rustc_codegen_ssa::traits::{ }; use rustc_middle::mir::Mutability; use rustc_middle::mir::interpret::{ConstAllocation, GlobalAlloc, PointerArithmetic, Scalar}; +use rustc_middle::ty::Instance; use rustc_middle::ty::layout::LayoutOf; use crate::context::CodegenCx; @@ -109,7 +110,7 @@ pub fn type_is_pointer(typ: Type<'_>) -> bool { typ.get_pointee().is_some() } -impl<'gcc, 'tcx> ConstCodegenMethods for CodegenCx<'gcc, 'tcx> { +impl<'gcc, 'tcx> ConstCodegenMethods<'tcx> for CodegenCx<'gcc, 'tcx> { fn const_null(&self, typ: Type<'gcc>) -> RValue<'gcc> { if type_is_pointer(typ) { self.context.new_null(typ) } else { self.const_int(typ, 0) } } @@ -220,6 +221,76 @@ impl<'gcc, 'tcx> ConstCodegenMethods for CodegenCx<'gcc, 'tcx> { None } + fn alloc_to_backend( + &self, + global_alloc: GlobalAlloc<'tcx>, + name_hint: Option>, + ) -> Result<(RValue<'gcc>, Option>), u64> { + let alloc = match global_alloc { + GlobalAlloc::Function { instance, .. } => { + return Ok((self.get_fn_addr(instance), Some(instance))); + } + GlobalAlloc::Static(def_id) => { + assert!(self.tcx.is_static(def_id)); + return Ok(( + self.get_static(def_id).get_address(None), + Some(Instance::mono(self.tcx, def_id)), + )); + } + GlobalAlloc::TypeId { .. } => { + // Drop the provenance, the offset contains the bytes of the hash, so + // just return 0 as base address. + return Err(0); + } + + GlobalAlloc::Memory(alloc) => { + if alloc.inner().len() == 0 { + // For ZSTs directly codegen an aligned pointer. + // This avoids generating a zero-sized constant value and actually needing a + // real address at runtime. + return Err(alloc.inner().align.bytes()); + } + + alloc + } + + GlobalAlloc::VTable(ty, dyn_ty) => { + self.tcx + .global_alloc(self.tcx.vtable_allocation(( + ty, + dyn_ty.principal().map(|principal| { + self.tcx.instantiate_bound_regions_with_erased(principal) + }), + ))) + .unwrap_memory() + } + }; + + if let Some(name) = name_hint { + let sym = self.tcx.symbol_name(name); + + let init = crate::consts::const_alloc_to_gcc_uncached(self, alloc); + let alloc = alloc.inner(); + let typ = self.val_ty(init).get_aligned(alloc.align.bytes()); + + let global = self.declare_global_with_linkage(sym.name, typ, GlobalKind::Exported); + global.global_set_initializer_rvalue(init); + return Ok((global.get_address(None), Some(name))); + } + + let init = self.const_data_from_alloc(alloc); + let alloc = alloc.inner(); + let value = match alloc.mutability { + Mutability::Mut => self.static_addr_of_mut(init, alloc.align, None), + _ => self.static_addr_of(init, alloc.align, None), + }; + if !self.sess().fewer_names() { + // TODO(antoyo): set value name. + } + + Ok((value, None)) + } + fn scalar_to_backend(&self, cv: Scalar, layout: abi::Scalar, ty: Type<'gcc>) -> RValue<'gcc> { let bitsize = if layout.is_bool() { 1 } else { layout.size(self).bits() }; match cv { @@ -241,56 +312,16 @@ impl<'gcc, 'tcx> ConstCodegenMethods for CodegenCx<'gcc, 'tcx> { Scalar::Ptr(ptr, _size) => { let (prov, offset) = ptr.prov_and_relative_offset(); let alloc_id = prov.alloc_id(); - let base_addr = match self.tcx.global_alloc(alloc_id) { - GlobalAlloc::Memory(alloc) => { - // For ZSTs directly codegen an aligned pointer. - // This avoids generating a zero-sized constant value and actually needing a - // real address at runtime. - if alloc.inner().len() == 0 { - let val = alloc.inner().align.bytes().wrapping_add(offset.bytes()); - let val = self.const_usize(self.tcx.truncate_to_target_usize(val)); - return if matches!(layout.primitive(), Pointer(_)) { - self.context.new_cast(None, val, ty) - } else { - self.const_bitcast(val, ty) - }; - } - - let init = self.const_data_from_alloc(alloc); - let alloc = alloc.inner(); - let value = match alloc.mutability { - Mutability::Mut => self.static_addr_of_mut(init, alloc.align, None), - _ => self.static_addr_of(init, alloc.align, None), + let base_addr = match self.alloc_to_backend(self.tcx.global_alloc(alloc_id), None) { + Ok((base_addr, _)) => base_addr, + Err(base_addr) => { + let val = base_addr.wrapping_add(offset.bytes()); + let val = self.const_usize(self.tcx.truncate_to_target_usize(val)); + return if matches!(layout.primitive(), Pointer(_)) { + self.context.new_cast(None, val, ty) + } else { + self.const_bitcast(val, ty) }; - if !self.sess().fewer_names() { - // TODO(antoyo): set value name. - } - value - } - GlobalAlloc::Function { instance, .. } => self.get_fn_addr(instance), - GlobalAlloc::VTable(ty, dyn_ty) => { - let alloc = self - .tcx - .global_alloc(self.tcx.vtable_allocation(( - ty, - dyn_ty.principal().map(|principal| { - self.tcx.instantiate_bound_regions_with_erased(principal) - }), - ))) - .unwrap_memory(); - let init = self.const_data_from_alloc(alloc); - self.static_addr_of(init, alloc.inner().align, None) - } - GlobalAlloc::TypeId { .. } => { - let val = self.const_usize(offset.bytes()); - // This is still a variable of pointer type, even though we only use the provenance - // of that pointer in CTFE and Miri. But to make LLVM's type system happy, - // we need an int-to-ptr cast here (it doesn't matter at all which provenance that picks). - return self.context.new_cast(None, val, ty); - } - GlobalAlloc::Static(def_id) => { - assert!(self.tcx.is_static(def_id)); - self.get_static(def_id).get_address(None) } }; let ptr_type = base_addr.get_type(); diff --git a/compiler/rustc_codegen_llvm/src/asm.rs b/compiler/rustc_codegen_llvm/src/asm.rs index ee1b6d45e149d..781339e1b952c 100644 --- a/compiler/rustc_codegen_llvm/src/asm.rs +++ b/compiler/rustc_codegen_llvm/src/asm.rs @@ -1,10 +1,12 @@ use std::assert_matches::assert_matches; +use std::fmt::Write; -use rustc_abi::{BackendRepr, Float, Integer, Primitive, Scalar}; +use rustc_abi::{BackendRepr, Float, Integer, Primitive, Scalar, Size}; use rustc_ast::{InlineAsmOptions, InlineAsmTemplatePiece}; use rustc_codegen_ssa::mir::operand::OperandValue; use rustc_codegen_ssa::traits::*; use rustc_data_structures::fx::FxHashMap; +use rustc_middle::mir::interpret::{PointerArithmetic, Scalar as ConstScalar}; use rustc_middle::ty::Instance; use rustc_middle::ty::layout::TyAndLayout; use rustc_middle::{bug, span_bug}; @@ -157,11 +159,17 @@ impl<'ll, 'tcx> AsmBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> { constraints.push(format!("{}", op_idx[&idx])); } } - InlineAsmOperandRef::SymFn { instance } => { - inputs.push(self.cx.get_fn(instance)); - op_idx.insert(idx, constraints.len()); - constraints.push("s".to_string()); - } + InlineAsmOperandRef::Const { value, ty: _, instance: _ } => match value { + ConstScalar::Int(_) => (), + ConstScalar::Ptr(ptr, _) => { + let (prov, _) = ptr.prov_and_relative_offset(); + let global_alloc = self.tcx.global_alloc(prov.alloc_id()); + let (value, _) = self.cx.alloc_to_backend(global_alloc, None).unwrap(); + inputs.push(value); + op_idx.insert(idx, constraints.len()); + constraints.push("s".to_string()); + } + }, InlineAsmOperandRef::SymStatic { def_id } => { inputs.push(self.cx.get_static(def_id)); op_idx.insert(idx, constraints.len()); @@ -204,12 +212,33 @@ impl<'ll, 'tcx> AsmBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> { template_str.push_str(&format!("${{{}}}", op_idx[&operand_idx])); } } - InlineAsmOperandRef::Const { ref string } => { - // Const operands get injected directly into the template - template_str.push_str(string); + InlineAsmOperandRef::Const { value, ty, instance: _ } => { + match value { + ConstScalar::Int(int) => { + // Const operands get injected directly into the template + let string = rustc_codegen_ssa::common::asm_const_to_str( + self.tcx, + int, + self.layout_of(ty), + ); + template_str.push_str(&string); + } + ConstScalar::Ptr(ptr, _) => { + let (_, offset) = ptr.prov_and_relative_offset(); + + // Only emit the raw symbol name + template_str + .push_str(&format!("${{{}:c}}", op_idx[&operand_idx])); + + if offset != Size::ZERO { + let offset = + self.sign_extend_to_target_isize(offset.bytes()); + write!(template_str, "{offset:+}").unwrap(); + } + } + } } - InlineAsmOperandRef::SymFn { .. } - | InlineAsmOperandRef::SymStatic { .. } => { + InlineAsmOperandRef::SymStatic { .. } => { // Only emit the raw symbol name template_str.push_str(&format!("${{{}:c}}", op_idx[&operand_idx])); } @@ -402,20 +431,41 @@ impl<'tcx> AsmCodegenMethods<'tcx> for CodegenCx<'_, 'tcx> { InlineAsmTemplatePiece::String(ref s) => template_str.push_str(s), InlineAsmTemplatePiece::Placeholder { operand_idx, modifier: _, span: _ } => { match operands[operand_idx] { - GlobalAsmOperandRef::Const { ref string } => { - // Const operands get injected directly into the - // template. Note that we don't need to escape $ - // here unlike normal inline assembly. - template_str.push_str(string); - } - GlobalAsmOperandRef::SymFn { instance } => { - let llval = self.get_fn(instance); - self.add_compiler_used_global(llval); - let symbol = llvm::build_string(|s| unsafe { - llvm::LLVMRustGetMangledName(llval, s); - }) - .expect("symbol is not valid UTF-8"); - template_str.push_str(&symbol); + GlobalAsmOperandRef::Const { value, ty, instance } => { + match value { + ConstScalar::Int(int) => { + // Const operands get injected directly into the + // template. Note that we don't need to escape $ + // here unlike normal inline assembly. + let string = rustc_codegen_ssa::common::asm_const_to_str( + self.tcx, + int, + self.layout_of(ty), + ); + template_str.push_str(&string); + } + + ConstScalar::Ptr(ptr, _) => { + let (prov, offset) = ptr.prov_and_relative_offset(); + let global_alloc = self.tcx.global_alloc(prov.alloc_id()); + let (llval, sym) = + self.alloc_to_backend(global_alloc, instance).unwrap(); + assert!(sym.is_some()); + + self.add_compiler_used_global(llval); + let symbol = llvm::build_string(|s| unsafe { + llvm::LLVMRustGetMangledName(llval, s); + }) + .expect("symbol is not valid UTF-8"); + template_str.push_str(&symbol); + + if offset != Size::ZERO { + let offset = + self.sign_extend_to_target_isize(offset.bytes()); + write!(template_str, "{offset:+}").unwrap(); + } + } + } } GlobalAsmOperandRef::SymStatic { def_id } => { let llval = self diff --git a/compiler/rustc_codegen_llvm/src/common.rs b/compiler/rustc_codegen_llvm/src/common.rs index b0cf9925019d2..fe125213c9f51 100644 --- a/compiler/rustc_codegen_llvm/src/common.rs +++ b/compiler/rustc_codegen_llvm/src/common.rs @@ -13,7 +13,7 @@ use rustc_hashes::Hash128; use rustc_hir::def_id::DefId; use rustc_middle::bug; use rustc_middle::mir::interpret::{ConstAllocation, GlobalAlloc, PointerArithmetic, Scalar}; -use rustc_middle::ty::TyCtxt; +use rustc_middle::ty::{Instance, TyCtxt}; use rustc_session::cstore::DllImport; use tracing::debug; @@ -126,7 +126,7 @@ impl<'ll, CX: Borrow>> GenericCx<'ll, CX> { } } -impl<'ll, 'tcx> ConstCodegenMethods for CodegenCx<'ll, 'tcx> { +impl<'ll, 'tcx> ConstCodegenMethods<'tcx> for CodegenCx<'ll, 'tcx> { fn const_null(&self, t: &'ll Type) -> &'ll Value { unsafe { llvm::LLVMConstNull(t) } } @@ -260,6 +260,91 @@ impl<'ll, 'tcx> ConstCodegenMethods for CodegenCx<'ll, 'tcx> { }) } + fn alloc_to_backend( + &self, + global_alloc: GlobalAlloc<'tcx>, + name_hint: Option>, + ) -> Result<(Self::Value, Option>), u64> { + let alloc = match global_alloc { + GlobalAlloc::Function { instance, .. } => { + return Ok((self.get_fn_addr(instance), Some(instance))); + } + GlobalAlloc::Static(def_id) => { + assert!(self.tcx.is_static(def_id)); + assert!(!self.tcx.is_thread_local_static(def_id)); + return Ok(( + // `alloc_to_backend` might be called by `global_asm!` codegen. In which case + // `global_asm!` would need to find the renamed statics to use for symbol name. + self.renamed_statics + .borrow() + .get(&def_id) + .copied() + .unwrap_or_else(|| self.get_static(def_id)), + Some(Instance::mono(self.tcx, def_id)), + )); + } + GlobalAlloc::TypeId { .. } => { + // Drop the provenance, the offset contains the bytes of the hash, so + // just return 0 as base address. + return Err(0); + } + + GlobalAlloc::Memory(alloc) => { + if alloc.inner().len() == 0 { + // For ZSTs directly codegen an aligned pointer. + // This avoids generating a zero-sized constant value and actually needing a + // real address at runtime. + return Err(alloc.inner().align.bytes()); + } + + alloc + } + GlobalAlloc::VTable(ty, dyn_ty) => { + self.tcx + .global_alloc(self.tcx.vtable_allocation(( + ty, + dyn_ty.principal().map(|principal| { + self.tcx.instantiate_bound_regions_with_erased(principal) + }), + ))) + .unwrap_memory() + } + }; + + let init = const_alloc_to_llvm(self, alloc.inner(), /*static*/ false); + let alloc = alloc.inner(); + + if let Some(name) = name_hint { + let sym = self.tcx.symbol_name(name); + + // If a hint is provided, always use `static_addr_of_mut`, as `static_addr_of_impl` may + // deduplicate and provide one that doesn't have a desired name. + let value = self.static_addr_of_mut(init, alloc.align, None); + if alloc.mutability.is_not() { + llvm::set_global_constant(value, true); + } + + llvm::set_value_name(value, sym.name.as_bytes()); + llvm::set_linkage(value, llvm::Linkage::InternalLinkage); + return Ok((value, Some(name))); + } + + let value = match alloc.mutability { + Mutability::Mut => self.static_addr_of_mut(init, alloc.align, None), + _ => self.static_addr_of_impl(init, alloc.align, None), + }; + if !self.sess().fewer_names() && llvm::get_value_name(value).is_empty() { + let hash = self.tcx.with_stable_hashing_context(|mut hcx| { + let mut hasher = StableHasher::new(); + alloc.hash_stable(&mut hcx, &mut hasher); + hasher.finish::() + }); + llvm::set_value_name(value, format!("alloc_{hash:032x}").as_bytes()); + } + + Ok((value, None)) + } + fn scalar_to_backend(&self, cv: Scalar, layout: abi::Scalar, llty: &'ll Type) -> &'ll Value { let bitsize = if layout.is_bool() { 1 } else { layout.size(self).bits() }; match cv { @@ -275,68 +360,19 @@ impl<'ll, 'tcx> ConstCodegenMethods for CodegenCx<'ll, 'tcx> { Scalar::Ptr(ptr, _size) => { let (prov, offset) = ptr.prov_and_relative_offset(); let global_alloc = self.tcx.global_alloc(prov.alloc_id()); - let base_addr = match global_alloc { - GlobalAlloc::Memory(alloc) => { - // For ZSTs directly codegen an aligned pointer. - // This avoids generating a zero-sized constant value and actually needing a - // real address at runtime. - if alloc.inner().len() == 0 { - let val = alloc.inner().align.bytes().wrapping_add(offset.bytes()); - let llval = self.const_usize(self.tcx.truncate_to_target_usize(val)); - return if matches!(layout.primitive(), Pointer(_)) { - unsafe { llvm::LLVMConstIntToPtr(llval, llty) } - } else { - self.const_bitcast(llval, llty) - }; + let base_addr_space = global_alloc.address_space(self); + let base_addr = match self.alloc_to_backend(global_alloc, None) { + Ok((base_addr, _)) => base_addr, + Err(base_addr) => { + let val = base_addr.wrapping_add(offset.bytes()); + let llval = self.const_usize(self.tcx.truncate_to_target_usize(val)); + return if matches!(layout.primitive(), Pointer(_)) { + unsafe { llvm::LLVMConstIntToPtr(llval, llty) } } else { - let init = - const_alloc_to_llvm(self, alloc.inner(), /*static*/ false); - let alloc = alloc.inner(); - let value = match alloc.mutability { - Mutability::Mut => self.static_addr_of_mut(init, alloc.align, None), - _ => self.static_addr_of_impl(init, alloc.align, None), - }; - if !self.sess().fewer_names() && llvm::get_value_name(value).is_empty() - { - let hash = self.tcx.with_stable_hashing_context(|mut hcx| { - let mut hasher = StableHasher::new(); - alloc.hash_stable(&mut hcx, &mut hasher); - hasher.finish::() - }); - llvm::set_value_name( - value, - format!("alloc_{hash:032x}").as_bytes(), - ); - } - value - } - } - GlobalAlloc::Function { instance, .. } => self.get_fn_addr(instance), - GlobalAlloc::VTable(ty, dyn_ty) => { - let alloc = self - .tcx - .global_alloc(self.tcx.vtable_allocation(( - ty, - dyn_ty.principal().map(|principal| { - self.tcx.instantiate_bound_regions_with_erased(principal) - }), - ))) - .unwrap_memory(); - let init = const_alloc_to_llvm(self, alloc.inner(), /*static*/ false); - self.static_addr_of_impl(init, alloc.inner().align, None) - } - GlobalAlloc::Static(def_id) => { - assert!(self.tcx.is_static(def_id)); - assert!(!self.tcx.is_thread_local_static(def_id)); - self.get_static(def_id) - } - GlobalAlloc::TypeId { .. } => { - // Drop the provenance, the offset contains the bytes of the hash - let llval = self.const_usize(offset.bytes()); - return unsafe { llvm::LLVMConstIntToPtr(llval, llty) }; + self.const_bitcast(llval, llty) + }; } }; - let base_addr_space = global_alloc.address_space(self); let llval = unsafe { llvm::LLVMConstInBoundsGEP2( self.type_i8(), diff --git a/compiler/rustc_codegen_ssa/src/base.rs b/compiler/rustc_codegen_ssa/src/base.rs index 8ab0b367f08a6..aa187bc8b7aba 100644 --- a/compiler/rustc_codegen_ssa/src/base.rs +++ b/compiler/rustc_codegen_ssa/src/base.rs @@ -21,12 +21,12 @@ use rustc_middle::middle::debugger_visualizer::DebuggerVisualizerFile; use rustc_middle::middle::dependency_format::Dependencies; use rustc_middle::middle::exported_symbols::{self, SymbolExportKind}; use rustc_middle::middle::lang_items; -use rustc_middle::mir::BinOp; -use rustc_middle::mir::interpret::ErrorHandled; +use rustc_middle::mir::interpret::{CTFE_ALLOC_SALT, ErrorHandled, Scalar}; use rustc_middle::mir::mono::{CodegenUnit, CodegenUnitNameBuilder, MonoItem, MonoItemPartitions}; +use rustc_middle::mir::{BinOp, ConstValue}; use rustc_middle::query::Providers; use rustc_middle::ty::layout::{HasTyCtxt, HasTypingEnv, LayoutOf, TyAndLayout}; -use rustc_middle::ty::{self, Instance, Ty, TyCtxt}; +use rustc_middle::ty::{self, Instance, Ty, TyCtxt, UintTy}; use rustc_middle::{bug, span_bug}; use rustc_session::Session; use rustc_session::config::{self, CrateType, EntryFnType}; @@ -407,24 +407,36 @@ where .iter() .map(|(op, op_sp)| match *op { rustc_hir::InlineAsmOperand::Const { ref anon_const } => { - match cx.tcx().const_eval_poly(anon_const.def_id.to_def_id()) { + let def_id = anon_const.def_id.to_def_id(); + match cx.tcx().const_eval_poly(def_id) { Ok(const_value) => { let ty = cx.tcx().typeck_body(anon_const.body).node_type(anon_const.hir_id); - let string = common::asm_const_to_str( - cx.tcx(), - *op_sp, - const_value, - cx.layout_of(ty), - ); - GlobalAsmOperandRef::Const { string } + let ConstValue::Scalar(scalar) = const_value else { + span_bug!( + *op_sp, + "expected Scalar for promoted asm const, but got {:#?}", + const_value + ) + }; + GlobalAsmOperandRef::Const { + value: common::asm_const_ptr_clean(cx.tcx(), scalar), + ty, + instance: Some(Instance::new_raw( + def_id, + ty::GenericArgs::identity_for_item(cx.tcx(), def_id), + )), + } } Err(ErrorHandled::Reported { .. }) => { // An error has already been reported and // compilation is guaranteed to fail if execution - // hits this path. So an empty string instead of - // a stringified constant value will suffice. - GlobalAsmOperandRef::Const { string: String::new() } + // hits this path. So anything will suffice. + GlobalAsmOperandRef::Const { + value: Scalar::from_u32(0), + ty: Ty::new_uint(cx.tcx(), UintTy::U32), + instance: None, + } } Err(ErrorHandled::TooGeneric(_)) => { span_bug!(*op_sp, "asm const cannot be resolved; too generic") @@ -444,7 +456,14 @@ where _ => span_bug!(*op_sp, "asm sym is not a function"), }; - GlobalAsmOperandRef::SymFn { instance } + GlobalAsmOperandRef::Const { + value: Scalar::from_pointer( + cx.tcx().reserve_and_set_fn_alloc(instance, CTFE_ALLOC_SALT).into(), + cx, + ), + ty: Ty::new_fn_ptr(cx.tcx(), ty.fn_sig(cx.tcx())), + instance: None, + } } rustc_hir::InlineAsmOperand::SymStatic { path: _, def_id } => { GlobalAsmOperandRef::SymStatic { def_id } diff --git a/compiler/rustc_codegen_ssa/src/common.rs b/compiler/rustc_codegen_ssa/src/common.rs index 89a3f8061a8c1..a56f51606b3dd 100644 --- a/compiler/rustc_codegen_ssa/src/common.rs +++ b/compiler/rustc_codegen_ssa/src/common.rs @@ -2,9 +2,10 @@ use rustc_hir::LangItem; use rustc_hir::attrs::PeImportNameType; +use rustc_middle::bug; +use rustc_middle::mir::interpret::{GlobalAlloc, PointerArithmetic, Scalar}; use rustc_middle::ty::layout::TyAndLayout; -use rustc_middle::ty::{self, Instance, TyCtxt}; -use rustc_middle::{bug, mir, span_bug}; +use rustc_middle::ty::{self, Instance, ScalarInt, TyCtxt}; use rustc_session::cstore::{DllCallingConvention, DllImport}; use rustc_span::Span; use rustc_target::spec::{Abi, Env, Os, Target}; @@ -148,14 +149,10 @@ pub(crate) fn shift_mask_val<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>( pub fn asm_const_to_str<'tcx>( tcx: TyCtxt<'tcx>, - sp: Span, - const_value: mir::ConstValue, + scalar: ScalarInt, ty_and_layout: TyAndLayout<'tcx>, ) -> String { - let mir::ConstValue::Scalar(scalar) = const_value else { - span_bug!(sp, "expected Scalar for promoted asm const, but got {:#?}", const_value) - }; - let value = scalar.assert_scalar_int().to_bits(ty_and_layout.size); + let value = scalar.to_bits(ty_and_layout.size); match ty_and_layout.ty.kind() { ty::Uint(_) => value.to_string(), ty::Int(int_ty) => match int_ty.normalize(tcx.sess.target.pointer_width) { @@ -166,7 +163,34 @@ pub fn asm_const_to_str<'tcx>( ty::IntTy::I128 => (value as i128).to_string(), ty::IntTy::Isize => unreachable!(), }, - _ => span_bug!(sp, "asm const has bad type {}", ty_and_layout.ty), + // For unsigned integers or pointers without provenance, just print the unsigned value + _ => value.to_string(), + } +} + +/// "Clean" a const pointer by removing values where the resulting ASM will not be +/// ` + `. +/// +/// These values are converted to `ScalarInt`. +pub fn asm_const_ptr_clean<'tcx>(tcx: TyCtxt<'tcx>, scalar: Scalar) -> Scalar { + let Scalar::Ptr(ptr, _) = scalar else { + return scalar; + }; + let (prov, offset) = ptr.prov_and_relative_offset(); + let global_alloc = tcx.global_alloc(prov.alloc_id()); + match global_alloc { + GlobalAlloc::TypeId { .. } => { + // `TypeId` provenances are not a thing in codegen. Just erase and replace with scalar offset. + Scalar::from_u64(offset.bytes()) + } + GlobalAlloc::Memory(alloc) if alloc.inner().len() == 0 => { + // ZST const allocations don't actually get global defined when lowered. + // Turn them into integer without provenances now. + let val = alloc.inner().align.bytes().wrapping_add(offset.bytes()); + Scalar::from_target_usize(tcx.truncate_to_target_usize(val), &tcx) + } + // Other types of `GlobalAlloc` are fine. + _ => scalar, } } diff --git a/compiler/rustc_codegen_ssa/src/mir/block.rs b/compiler/rustc_codegen_ssa/src/mir/block.rs index cce33107e2c27..b922d0bcb6faf 100644 --- a/compiler/rustc_codegen_ssa/src/mir/block.rs +++ b/compiler/rustc_codegen_ssa/src/mir/block.rs @@ -6,7 +6,10 @@ use rustc_ast::{InlineAsmOptions, InlineAsmTemplatePiece}; use rustc_data_structures::packed::Pu128; use rustc_hir::lang_items::LangItem; use rustc_lint_defs::builtin::TAIL_CALL_TRACK_CALLER; -use rustc_middle::mir::{self, AssertKind, InlineAsmMacro, SwitchTargets, UnwindTerminateReason}; +use rustc_middle::mir::interpret::{CTFE_ALLOC_SALT, Scalar}; +use rustc_middle::mir::{ + self, AssertKind, Const, InlineAsmMacro, SwitchTargets, UnwindTerminateReason, +}; use rustc_middle::ty::layout::{HasTyCtxt, LayoutOf, ValidityRequirement}; use rustc_middle::ty::print::{with_no_trimmed_paths, with_no_visible_paths}; use rustc_middle::ty::{self, Instance, Ty}; @@ -1267,14 +1270,24 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { InlineAsmOperandRef::InOut { reg, late, in_value, out_place } } mir::InlineAsmOperand::Const { ref value } => { + let Const::Unevaluated(c, _) = &value.const_ else { + bug!("need unevaluated const to derive symbol name") + }; + let const_instance = Instance::new_raw(c.def, c.args); + let const_value = self.eval_mir_constant(value); - let string = common::asm_const_to_str( - bx.tcx(), - span, - const_value, - bx.layout_of(value.ty()), - ); - InlineAsmOperandRef::Const { string } + let mir::ConstValue::Scalar(scalar) = const_value else { + span_bug!( + span, + "expected Scalar for promoted asm const, but got {:#?}", + const_value + ) + }; + InlineAsmOperandRef::Const { + value: common::asm_const_ptr_clean(bx.tcx(), scalar), + ty: value.ty(), + instance: Some(const_instance), + } } mir::InlineAsmOperand::SymFn { ref value } => { let const_ = self.monomorphize(value.const_); @@ -1286,7 +1299,15 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { args, ) .unwrap(); - InlineAsmOperandRef::SymFn { instance } + + InlineAsmOperandRef::Const { + value: Scalar::from_pointer( + bx.tcx().reserve_and_set_fn_alloc(instance, CTFE_ALLOC_SALT).into(), + bx, + ), + ty: Ty::new_fn_ptr(bx.tcx(), const_.ty().fn_sig(bx.tcx())), + instance: None, + } } else { span_bug!(span, "invalid type for asm sym (fn)"); } diff --git a/compiler/rustc_codegen_ssa/src/mir/naked_asm.rs b/compiler/rustc_codegen_ssa/src/mir/naked_asm.rs index 4a47799b2bdc3..2ccc859ad87d3 100644 --- a/compiler/rustc_codegen_ssa/src/mir/naked_asm.rs +++ b/compiler/rustc_codegen_ssa/src/mir/naked_asm.rs @@ -1,10 +1,11 @@ use rustc_abi::{BackendRepr, Float, Integer, Primitive, RegKind}; use rustc_hir::attrs::{InstructionSetAttr, Linkage}; +use rustc_middle::mir::interpret::{CTFE_ALLOC_SALT, Scalar}; use rustc_middle::mir::mono::{MonoItemData, Visibility}; -use rustc_middle::mir::{InlineAsmOperand, START_BLOCK}; +use rustc_middle::mir::{self, Const, InlineAsmOperand, START_BLOCK}; use rustc_middle::ty::layout::{FnAbiOf, LayoutOf, TyAndLayout}; use rustc_middle::ty::{Instance, Ty, TyCtxt, TypeVisitableExt}; -use rustc_middle::{bug, ty}; +use rustc_middle::{bug, span_bug, ty}; use rustc_span::sym; use rustc_target::callconv::{ArgAbi, FnAbi, PassMode}; use rustc_target::spec::{Arch, BinaryFormat}; @@ -62,6 +63,11 @@ fn inline_to_global_operand<'a, 'tcx, Cx: LayoutOf<'tcx, LayoutOfResult = TyAndL ) -> GlobalAsmOperandRef<'tcx> { match op { InlineAsmOperand::Const { value } => { + let Const::Unevaluated(c, _) = &value.const_ else { + bug!("need unevaluated const to derive symbol name") + }; + let const_instance = Instance::new_raw(c.def, c.args); + let const_value = instance .instantiate_mir_and_normalize_erasing_regions( cx.tcx(), @@ -76,15 +82,19 @@ fn inline_to_global_operand<'a, 'tcx, Cx: LayoutOf<'tcx, LayoutOfResult = TyAndL cx.typing_env(), ty::EarlyBinder::bind(value.ty()), ); + let mir::ConstValue::Scalar(scalar) = const_value else { + span_bug!( + value.span, + "expected Scalar for promoted asm const, but got {:#?}", + const_value + ) + }; - let string = common::asm_const_to_str( - cx.tcx(), - value.span, - const_value, - cx.layout_of(mono_type), - ); - - GlobalAsmOperandRef::Const { string } + GlobalAsmOperandRef::Const { + value: common::asm_const_ptr_clean(cx.tcx(), scalar), + ty: mono_type, + instance: Some(const_instance), + } } InlineAsmOperand::SymFn { value } => { let mono_type = instance.instantiate_mir_and_normalize_erasing_regions( @@ -100,7 +110,14 @@ fn inline_to_global_operand<'a, 'tcx, Cx: LayoutOf<'tcx, LayoutOfResult = TyAndL _ => bug!("asm sym is not a function"), }; - GlobalAsmOperandRef::SymFn { instance } + GlobalAsmOperandRef::Const { + value: Scalar::from_pointer( + cx.tcx().reserve_and_set_fn_alloc(instance, CTFE_ALLOC_SALT).into(), + cx, + ), + ty: Ty::new_fn_ptr(cx.tcx(), mono_type.fn_sig(cx.tcx())), + instance: None, + } } InlineAsmOperand::SymStatic { def_id } => { GlobalAsmOperandRef::SymStatic { def_id: *def_id } diff --git a/compiler/rustc_codegen_ssa/src/mir/place.rs b/compiler/rustc_codegen_ssa/src/mir/place.rs index 50f56f913a51e..ab40e0471e41c 100644 --- a/compiler/rustc_codegen_ssa/src/mir/place.rs +++ b/compiler/rustc_codegen_ssa/src/mir/place.rs @@ -134,7 +134,7 @@ impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> { Self::alloca(bx, ptr_layout) } - pub fn len>(&self, cx: &Cx) -> V { + pub fn len>(&self, cx: &Cx) -> V { if let FieldsShape::Array { count, .. } = self.layout.fields { if self.layout.is_unsized() { assert_eq!(count, 0); diff --git a/compiler/rustc_codegen_ssa/src/traits/asm.rs b/compiler/rustc_codegen_ssa/src/traits/asm.rs index cc7a6a3f19e9e..b81066f6dd8b6 100644 --- a/compiler/rustc_codegen_ssa/src/traits/asm.rs +++ b/compiler/rustc_codegen_ssa/src/traits/asm.rs @@ -1,6 +1,7 @@ use rustc_ast::{InlineAsmOptions, InlineAsmTemplatePiece}; use rustc_hir::def_id::DefId; -use rustc_middle::ty::Instance; +use rustc_middle::mir::interpret::Scalar; +use rustc_middle::ty::{Instance, Ty}; use rustc_span::Span; use rustc_target::asm::InlineAsmRegOrRegClass; @@ -26,10 +27,13 @@ pub enum InlineAsmOperandRef<'tcx, B: BackendTypes + ?Sized> { out_place: Option>, }, Const { - string: String, - }, - SymFn { - instance: Instance<'tcx>, + value: Scalar, + /// Type of the constant. This is needed to extract width and signedness. + ty: Ty<'tcx>, + /// Instance to the const that produces this operand. + /// + /// This is used to be able to generate unique name for promoted statics. + instance: Option>, }, SymStatic { def_id: DefId, @@ -41,9 +45,18 @@ pub enum InlineAsmOperandRef<'tcx, B: BackendTypes + ?Sized> { #[derive(Debug)] pub enum GlobalAsmOperandRef<'tcx> { - Const { string: String }, - SymFn { instance: Instance<'tcx> }, - SymStatic { def_id: DefId }, + Const { + value: Scalar, + /// Type of the constant. This is needed to extract width and signedness. + ty: Ty<'tcx>, + /// Instance to the const that produces this operand. + /// + /// This is used to be able to generate unique name for promoted statics. + instance: Option>, + }, + SymStatic { + def_id: DefId, + }, } pub trait AsmBuilderMethods<'tcx>: BackendTypes { diff --git a/compiler/rustc_codegen_ssa/src/traits/consts.rs b/compiler/rustc_codegen_ssa/src/traits/consts.rs index d83a04d814be3..40e62012a56e8 100644 --- a/compiler/rustc_codegen_ssa/src/traits/consts.rs +++ b/compiler/rustc_codegen_ssa/src/traits/consts.rs @@ -1,9 +1,10 @@ use rustc_abi as abi; -use rustc_middle::mir::interpret::{ConstAllocation, Scalar}; +use rustc_middle::mir::interpret::{ConstAllocation, GlobalAlloc, Scalar}; +use rustc_middle::ty::Instance; use super::BackendTypes; -pub trait ConstCodegenMethods: BackendTypes { +pub trait ConstCodegenMethods<'tcx>: BackendTypes { // Constant constructors fn const_null(&self, t: Self::Type) -> Self::Value; /// Generate an uninitialized value (matching uninitialized memory in MIR). @@ -39,6 +40,19 @@ pub trait ConstCodegenMethods: BackendTypes { fn const_data_from_alloc(&self, alloc: ConstAllocation<'_>) -> Self::Value; + /// Turn a `GlobalAlloc` into a backend global, return the value and instance that is used to + /// generate the symbol name, if any. + /// + /// If the `GlobalAlloc` should not be mapped to a global, but absolute address should be used, + /// an integer is returned as `Err` instead. + /// + /// If the caller needs to guarantee a symbol name, it can provide a name hint. The name will be + /// used to generate a new symbol if there isn't one already (i.e. the case of fn/static). + fn alloc_to_backend( + &self, + global_alloc: GlobalAlloc<'tcx>, + name_hint: Option>, + ) -> Result<(Self::Value, Option>), u64>; fn scalar_to_backend(&self, cv: Scalar, layout: abi::Scalar, llty: Self::Type) -> Self::Value; fn const_ptr_byte_offset(&self, val: Self::Value, offset: abi::Size) -> Self::Value; diff --git a/compiler/rustc_codegen_ssa/src/traits/mod.rs b/compiler/rustc_codegen_ssa/src/traits/mod.rs index 6d1ac717c0b8f..69f0732f84c1c 100644 --- a/compiler/rustc_codegen_ssa/src/traits/mod.rs +++ b/compiler/rustc_codegen_ssa/src/traits/mod.rs @@ -55,7 +55,7 @@ pub trait CodegenObject = Copy + fmt::Debug; pub trait CodegenMethods<'tcx> = LayoutOf<'tcx, LayoutOfResult = TyAndLayout<'tcx>> + FnAbiOf<'tcx, FnAbiOfResult = &'tcx FnAbi<'tcx, Ty<'tcx>>> + TypeCodegenMethods<'tcx> - + ConstCodegenMethods + + ConstCodegenMethods<'tcx> + StaticCodegenMethods + DebugInfoCodegenMethods<'tcx> + AsmCodegenMethods<'tcx> diff --git a/compiler/rustc_feature/src/unstable.rs b/compiler/rustc_feature/src/unstable.rs index fe053935f9e64..7d555fc480424 100644 --- a/compiler/rustc_feature/src/unstable.rs +++ b/compiler/rustc_feature/src/unstable.rs @@ -384,6 +384,8 @@ declare_features! ( (unstable, arbitrary_self_types, "1.23.0", Some(44874)), /// Allows inherent and trait methods with arbitrary self types that are raw pointers. (unstable, arbitrary_self_types_pointers, "1.83.0", Some(44874)), + /// Allows using `const` operands with pointer in inline assembly. + (unstable, asm_const_ptr, "CURRENT_RUSTC_VERSION", Some(128464)), /// Enables experimental inline assembly support for additional architectures. (unstable, asm_experimental_arch, "1.58.0", Some(93335)), /// Enables experimental register support in inline assembly. diff --git a/compiler/rustc_hir_typeck/messages.ftl b/compiler/rustc_hir_typeck/messages.ftl index 325be43a0065c..3f907803babea 100644 --- a/compiler/rustc_hir_typeck/messages.ftl +++ b/compiler/rustc_hir_typeck/messages.ftl @@ -15,6 +15,10 @@ hir_typeck_arg_mismatch_indeterminate = argument type mismatch was detected, but .note = we would appreciate a bug report: https://github.com/rust-lang/rust/issues/new hir_typeck_as_deref_suggestion = consider using `as_deref` here + +hir_typeck_asm_const_ptr_unstable = + using pointers in asm `const` operand is experimental + hir_typeck_base_expression_double_dot = base expression required after `..` hir_typeck_base_expression_double_dot_add_expr = add a base expression here hir_typeck_base_expression_double_dot_enable_default_field_values = diff --git a/compiler/rustc_hir_typeck/src/errors.rs b/compiler/rustc_hir_typeck/src/errors.rs index 620002915fa8d..cdf7965048cbe 100644 --- a/compiler/rustc_hir_typeck/src/errors.rs +++ b/compiler/rustc_hir_typeck/src/errors.rs @@ -19,6 +19,13 @@ use rustc_span::{Ident, Span, Symbol}; use crate::{FnCtxt, fluent_generated as fluent}; +#[derive(Diagnostic)] +#[diag(hir_typeck_asm_const_ptr_unstable)] +pub(crate) struct AsmConstPtrUnstable { + #[primary_span] + pub span: Span, +} + #[derive(Diagnostic)] #[diag(hir_typeck_base_expression_double_dot, code = E0797)] pub(crate) struct BaseExpressionDoubleDot { diff --git a/compiler/rustc_hir_typeck/src/expr.rs b/compiler/rustc_hir_typeck/src/expr.rs index 48082560bb83b..00b3cab93df94 100644 --- a/compiler/rustc_hir_typeck/src/expr.rs +++ b/compiler/rustc_hir_typeck/src/expr.rs @@ -3698,7 +3698,40 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { } } hir::InlineAsmOperand::Const { ref anon_const } => { - self.check_expr_const_block(anon_const, Expectation::NoExpectation); + // This is mostly similar to type-checking of inline const expressions `const { ... }`, however + // asm const has special coercion rules (per RFC 3848) where function items and closures are coerced to + // function pointers (while pointers and integer remain as-is). + let body = self.tcx.hir_body(anon_const.body); + + let fcx = FnCtxt::new(self, self.param_env, anon_const.def_id); + let ty = fcx.check_expr(body.value); + let target_ty = match self.structurally_resolve_type(body.value.span, ty).kind() + { + ty::FnDef(..) => { + let fn_sig = ty.fn_sig(self.tcx()); + Ty::new_fn_ptr(self.tcx(), fn_sig) + } + ty::Closure(_, args) => { + let closure_sig = args.as_closure().sig(); + let fn_sig = + self.tcx().signature_unclosure(closure_sig, hir::Safety::Safe); + Ty::new_fn_ptr(self.tcx(), fn_sig) + } + _ => ty, + }; + + if let Err(diag) = + self.demand_coerce_diag(&body.value, ty, target_ty, None, AllowTwoPhase::No) + { + diag.emit(); + } + + fcx.require_type_is_sized( + target_ty, + body.value.span, + ObligationCauseCode::SizedConstOrStatic, + ); + fcx.write_ty(anon_const.hir_id, target_ty); } hir::InlineAsmOperand::SymFn { expr } => { self.check_expr(expr); diff --git a/compiler/rustc_hir_typeck/src/inline_asm.rs b/compiler/rustc_hir_typeck/src/inline_asm.rs index 6626c3edb5466..e97f0d5083c91 100644 --- a/compiler/rustc_hir_typeck/src/inline_asm.rs +++ b/compiler/rustc_hir_typeck/src/inline_asm.rs @@ -14,7 +14,7 @@ use rustc_target::asm::{ use rustc_trait_selection::infer::InferCtxtExt; use crate::FnCtxt; -use crate::errors::RegisterTypeUnstable; +use crate::errors::{AsmConstPtrUnstable, RegisterTypeUnstable}; pub(crate) struct InlineAsmCtxt<'a, 'tcx> { target_features: &'tcx FxIndexSet, @@ -522,7 +522,36 @@ impl<'a, 'tcx> InlineAsmCtxt<'a, 'tcx> { match ty.kind() { ty::Error(_) => {} _ if ty.is_integral() => {} + ty::FnPtr(..) => { + if !self.tcx().features().asm_const_ptr() { + self.tcx() + .sess + .create_feature_err( + AsmConstPtrUnstable { span: op_sp }, + sym::asm_const_ptr, + ) + .emit(); + } + } + ty::RawPtr(pointee, _) | ty::Ref(_, pointee, _) + if self.is_thin_ptr_ty(op_sp, *pointee) => + { + if !self.tcx().features().asm_const_ptr() { + self.tcx() + .sess + .create_feature_err( + AsmConstPtrUnstable { span: op_sp }, + sym::asm_const_ptr, + ) + .emit(); + } + } _ => { + let const_possible_ty = if !self.tcx().features().asm_const_ptr() { + "integer" + } else { + "integer or thin pointer" + }; self.fcx .dcx() .struct_span_err(op_sp, "invalid type for `const` operand") @@ -530,7 +559,9 @@ impl<'a, 'tcx> InlineAsmCtxt<'a, 'tcx> { self.tcx().def_span(anon_const.def_id), format!("is {} `{}`", ty.kind().article(), ty), ) - .with_help("`const` operands must be of an integer type") + .with_help(format!( + "`const` operands must be of an {const_possible_ty} type" + )) .emit(); } } diff --git a/compiler/rustc_middle/src/mir/mono.rs b/compiler/rustc_middle/src/mir/mono.rs index c7ced84a527f7..427889e2df4d1 100644 --- a/compiler/rustc_middle/src/mir/mono.rs +++ b/compiler/rustc_middle/src/mir/mono.rs @@ -122,7 +122,7 @@ impl<'tcx> MonoItem<'tcx> { MonoItem::Fn(instance) => tcx.symbol_name(instance), MonoItem::Static(def_id) => tcx.symbol_name(Instance::mono(tcx, def_id)), MonoItem::GlobalAsm(item_id) => { - SymbolName::new(tcx, &format!("global_asm_{:?}", item_id.owner_id)) + tcx.symbol_name(Instance::mono(tcx, item_id.owner_id.to_def_id())) } } } diff --git a/compiler/rustc_monomorphize/src/collector.rs b/compiler/rustc_monomorphize/src/collector.rs index cd699436e0120..2f0580a8da466 100644 --- a/compiler/rustc_monomorphize/src/collector.rs +++ b/compiler/rustc_monomorphize/src/collector.rs @@ -508,10 +508,18 @@ fn collect_items_rec<'tcx>( if let hir::ItemKind::GlobalAsm { asm, .. } = item.kind { for (op, op_sp) in asm.operands { match *op { - hir::InlineAsmOperand::Const { .. } => { - // Only constants which resolve to a plain integer - // are supported. Therefore the value should not - // depend on any other items. + hir::InlineAsmOperand::Const { anon_const } => { + match tcx.const_eval_poly(anon_const.def_id.to_def_id()) { + Ok(val) => { + collect_const_value(tcx, val, &mut used_items); + } + Err(ErrorHandled::TooGeneric(..)) => { + span_bug!(*op_sp, "asm const cannot be resolved; too generic") + } + Err(ErrorHandled::Reported(..)) => { + continue; + } + } } hir::InlineAsmOperand::SymFn { expr } => { let fn_ty = tcx.typeck(item_id.owner_id).expr_ty(expr); diff --git a/compiler/rustc_span/src/symbol.rs b/compiler/rustc_span/src/symbol.rs index 8e464b55d6c23..80d6a6bb63f0d 100644 --- a/compiler/rustc_span/src/symbol.rs +++ b/compiler/rustc_span/src/symbol.rs @@ -481,6 +481,7 @@ symbols! { asm, asm_cfg, asm_const, + asm_const_ptr, asm_experimental_arch, asm_experimental_reg, asm_goto, diff --git a/compiler/rustc_symbol_mangling/src/legacy.rs b/compiler/rustc_symbol_mangling/src/legacy.rs index ee2621af84280..50ccbbfb97105 100644 --- a/compiler/rustc_symbol_mangling/src/legacy.rs +++ b/compiler/rustc_symbol_mangling/src/legacy.rs @@ -36,6 +36,11 @@ pub(super) fn mangle<'tcx>( debug!(?instance_ty); break; } + DefPathData::GlobalAsm => { + // `global_asm!` doesn't have a type. + instance_ty = tcx.types.unit; + break; + } _ => { // if we're making a symbol for something, there ought // to be a value or type-def or something in there diff --git a/tests/assembly-llvm/asm/global_asm.rs b/tests/assembly-llvm/asm/global_asm.rs index 8a4bf98c7450b..f7bf13281dbde 100644 --- a/tests/assembly-llvm/asm/global_asm.rs +++ b/tests/assembly-llvm/asm/global_asm.rs @@ -5,6 +5,7 @@ //@ compile-flags: -C symbol-mangling-version=v0 #![crate_type = "rlib"] +#![feature(asm_const_ptr)] use std::arch::global_asm; @@ -26,6 +27,10 @@ global_asm!("call {}", sym my_func); global_asm!("lea rax, [rip + {}]", sym MY_STATIC); // CHECK: call _RNvC[[CRATE_IDENT:[a-zA-Z0-9]{12}]]_10global_asm6foobar global_asm!("call {}", sym foobar); +// CHECK: lea rax, [rip + _RNKNaC[[CRATE_IDENT]]_10global_asms4_00B3_] +global_asm!("lea rax, [rip + {}]", const &1); +// CHECK: lea rax, [rip + _RNKNaC[[CRATE_IDENT]]_10global_asms5_00B3_+4] +global_asm!("lea rax, [rip + {}]", const &[1; 2][1]); // CHECK: _RNvC[[CRATE_IDENT]]_10global_asm6foobar: fn foobar() { loop {} diff --git a/tests/assembly-llvm/asm/x86-types.rs b/tests/assembly-llvm/asm/x86-types.rs index 9fe7ea00bd939..f703693f21c50 100644 --- a/tests/assembly-llvm/asm/x86-types.rs +++ b/tests/assembly-llvm/asm/x86-types.rs @@ -9,7 +9,7 @@ //@ compile-flags: -C target-feature=+avx512bw //@ compile-flags: -Zmerge-functions=disabled -#![feature(no_core, repr_simd, f16, f128)] +#![feature(no_core, repr_simd, f16, f128, asm_const_ptr)] #![crate_type = "rlib"] #![no_core] #![allow(asm_sub_register, non_camel_case_types)] @@ -101,6 +101,15 @@ pub unsafe fn sym_static() { asm!("mov al, byte ptr [{}]", sym extern_static); } +// CHECK-LABEL: const_ptr: +// CHECK: #APP +// CHECK: mov al, byte ptr [{{.*}}anon{{.*}}] +// CHECK: #NO_APP +#[no_mangle] +pub unsafe fn const_ptr() { + asm!("mov al, byte ptr [{}]", const &1u8); +} + macro_rules! check { ($func:ident $ty:ident $class:ident $mov:literal) => { #[no_mangle] diff --git a/tests/ui/asm/const-refs-to-static.rs b/tests/ui/asm/const-refs-to-static.rs index ce2c5b3246ec8..8058d70550aba 100644 --- a/tests/ui/asm/const-refs-to-static.rs +++ b/tests/ui/asm/const-refs-to-static.rs @@ -1,19 +1,20 @@ //@ needs-asm-support //@ ignore-nvptx64 //@ ignore-spirv +//@ build-pass + +#![feature(asm_const_ptr)] use std::arch::{asm, global_asm}; use std::ptr::addr_of; static FOO: u8 = 42; -global_asm!("{}", const addr_of!(FOO)); -//~^ ERROR invalid type for `const` operand +global_asm!("/* {} */", const addr_of!(FOO)); #[no_mangle] fn inline() { - unsafe { asm!("{}", const addr_of!(FOO)) }; - //~^ ERROR invalid type for `const` operand + unsafe { asm!("/* {} */", const addr_of!(FOO)) }; } fn main() {} diff --git a/tests/ui/asm/const-refs-to-static.stderr b/tests/ui/asm/const-refs-to-static.stderr deleted file mode 100644 index 10e1ca5bd6068..0000000000000 --- a/tests/ui/asm/const-refs-to-static.stderr +++ /dev/null @@ -1,22 +0,0 @@ -error: invalid type for `const` operand - --> $DIR/const-refs-to-static.rs:10:19 - | -LL | global_asm!("{}", const addr_of!(FOO)); - | ^^^^^^------------- - | | - | is a `*const u8` - | - = help: `const` operands must be of an integer type - -error: invalid type for `const` operand - --> $DIR/const-refs-to-static.rs:15:25 - | -LL | unsafe { asm!("{}", const addr_of!(FOO)) }; - | ^^^^^^------------- - | | - | is a `*const u8` - | - = help: `const` operands must be of an integer type - -error: aborting due to 2 previous errors - diff --git a/tests/ui/asm/invalid-const-operand.rs b/tests/ui/asm/invalid-const-operand.rs index bbf4001752a4b..2c40db8f36795 100644 --- a/tests/ui/asm/invalid-const-operand.rs +++ b/tests/ui/asm/invalid-const-operand.rs @@ -2,6 +2,8 @@ //@ ignore-nvptx64 //@ ignore-spirv +#![feature(asm_const_ptr)] + use std::arch::{asm, global_asm}; // Const operands must be integers and must be constants. @@ -12,11 +14,10 @@ global_asm!("{}", const 0i128); global_asm!("{}", const 0f32); //~^ ERROR invalid type for `const` operand global_asm!("{}", const 0 as *mut u8); -//~^ ERROR invalid type for `const` operand fn test1() { unsafe { - // Const operands must be integers and must be constants. + // Const operands must be integers or thin pointers asm!("{}", const 0); asm!("{}", const 0i32); @@ -24,9 +25,14 @@ fn test1() { asm!("{}", const 0f32); //~^ ERROR invalid type for `const` operand asm!("{}", const 0 as *mut u8); - //~^ ERROR invalid type for `const` operand asm!("{}", const &0); + asm!("{}", const b"Foo".as_slice()); //~^ ERROR invalid type for `const` operand + + asm!("{}", const test1 as fn()); + asm!("{}", const test1); + asm!("{}", const (|| {}) as fn()); + asm!("{}", const || {}); } } diff --git a/tests/ui/asm/invalid-const-operand.stderr b/tests/ui/asm/invalid-const-operand.stderr index 01aa843c6fb19..82cf042697b55 100644 --- a/tests/ui/asm/invalid-const-operand.stderr +++ b/tests/ui/asm/invalid-const-operand.stderr @@ -1,5 +1,5 @@ error[E0435]: attempt to use a non-constant value in a constant - --> $DIR/invalid-const-operand.rs:44:26 + --> $DIR/invalid-const-operand.rs:50:26 | LL | asm!("{}", const x); | ^ non-constant value @@ -11,7 +11,7 @@ LL + const x: /* Type */ = 0; | error[E0435]: attempt to use a non-constant value in a constant - --> $DIR/invalid-const-operand.rs:47:36 + --> $DIR/invalid-const-operand.rs:53:36 | LL | asm!("{}", const const_foo(x)); | ^ non-constant value @@ -23,7 +23,7 @@ LL + const x: /* Type */ = 0; | error[E0435]: attempt to use a non-constant value in a constant - --> $DIR/invalid-const-operand.rs:50:36 + --> $DIR/invalid-const-operand.rs:56:36 | LL | asm!("{}", const const_bar(x)); | ^ non-constant value @@ -35,55 +35,35 @@ LL + const x: /* Type */ = 0; | error: invalid type for `const` operand - --> $DIR/invalid-const-operand.rs:12:19 + --> $DIR/invalid-const-operand.rs:14:19 | LL | global_asm!("{}", const 0f32); | ^^^^^^---- | | | is an `f32` | - = help: `const` operands must be of an integer type - -error: invalid type for `const` operand - --> $DIR/invalid-const-operand.rs:14:19 - | -LL | global_asm!("{}", const 0 as *mut u8); - | ^^^^^^------------ - | | - | is a `*mut u8` - | - = help: `const` operands must be of an integer type + = help: `const` operands must be of an integer or thin pointer type error: invalid type for `const` operand - --> $DIR/invalid-const-operand.rs:24:20 + --> $DIR/invalid-const-operand.rs:25:20 | LL | asm!("{}", const 0f32); | ^^^^^^---- | | | is an `f32` | - = help: `const` operands must be of an integer type - -error: invalid type for `const` operand - --> $DIR/invalid-const-operand.rs:26:20 - | -LL | asm!("{}", const 0 as *mut u8); - | ^^^^^^------------ - | | - | is a `*mut u8` - | - = help: `const` operands must be of an integer type + = help: `const` operands must be of an integer or thin pointer type error: invalid type for `const` operand - --> $DIR/invalid-const-operand.rs:28:20 + --> $DIR/invalid-const-operand.rs:29:20 | -LL | asm!("{}", const &0); - | ^^^^^^-- +LL | asm!("{}", const b"Foo".as_slice()); + | ^^^^^^----------------- | | - | is a `&i32` + | is a `&[u8]` | - = help: `const` operands must be of an integer type + = help: `const` operands must be of an integer or thin pointer type -error: aborting due to 8 previous errors +error: aborting due to 6 previous errors For more information about this error, try `rustc --explain E0435`. diff --git a/tests/ui/feature-gates/feature-gate-asm_const_ptr.rs b/tests/ui/feature-gates/feature-gate-asm_const_ptr.rs new file mode 100644 index 0000000000000..cdcb5995a0f08 --- /dev/null +++ b/tests/ui/feature-gates/feature-gate-asm_const_ptr.rs @@ -0,0 +1,22 @@ +//@ only-x86_64 + +use std::arch::{asm, global_asm, naked_asm}; + +global_asm!("/* {} */", const &0); +//~^ ERROR using pointers in asm `const` operand is experimental + +#[unsafe(naked)] +extern "C" fn naked() { + unsafe { + naked_asm!("ret /* {} */", const &0); + //~^ ERROR using pointers in asm `const` operand is experimental + } +} + +fn main() { + naked(); + unsafe { + asm!("/* {} */", const &0); + //~^ ERROR using pointers in asm `const` operand is experimental + } +} diff --git a/tests/ui/feature-gates/feature-gate-asm_const_ptr.stderr b/tests/ui/feature-gates/feature-gate-asm_const_ptr.stderr new file mode 100644 index 0000000000000..a804d8fe44be5 --- /dev/null +++ b/tests/ui/feature-gates/feature-gate-asm_const_ptr.stderr @@ -0,0 +1,33 @@ +error[E0658]: using pointers in asm `const` operand is experimental + --> $DIR/feature-gate-asm_const_ptr.rs:5:25 + | +LL | global_asm!("/* {} */", const &0); + | ^^^^^^^^ + | + = note: see issue #128464 for more information + = help: add `#![feature(asm_const_ptr)]` to the crate attributes to enable + = note: this compiler was built on YYYY-MM-DD; consider upgrading it if it is out of date + +error[E0658]: using pointers in asm `const` operand is experimental + --> $DIR/feature-gate-asm_const_ptr.rs:11:36 + | +LL | naked_asm!("ret /* {} */", const &0); + | ^^^^^^^^ + | + = note: see issue #128464 for more information + = help: add `#![feature(asm_const_ptr)]` to the crate attributes to enable + = note: this compiler was built on YYYY-MM-DD; consider upgrading it if it is out of date + +error[E0658]: using pointers in asm `const` operand is experimental + --> $DIR/feature-gate-asm_const_ptr.rs:19:26 + | +LL | asm!("/* {} */", const &0); + | ^^^^^^^^ + | + = note: see issue #128464 for more information + = help: add `#![feature(asm_const_ptr)]` to the crate attributes to enable + = note: this compiler was built on YYYY-MM-DD; consider upgrading it if it is out of date + +error: aborting due to 3 previous errors + +For more information about this error, try `rustc --explain E0658`.