From c9c04f0e0805906fc083a519180dca3932d9ad98 Mon Sep 17 00:00:00 2001 From: Flakebi Date: Thu, 4 Dec 2025 09:27:41 +0100 Subject: [PATCH] Add intrinsic for launch-sized workgroup memory on GPUs Workgroup memory is a memory region that is shared between all threads in a workgroup on GPUs. Workgroup memory can be allocated statically or after compilation, when launching a gpu-kernel. The intrinsic added here returns the pointer to the memory that is allocated at launch-time. # Interface With this change, workgroup memory can be accessed in Rust by calling the new `gpu_launch_sized_workgroup_mem() -> *mut T` intrinsic. It returns the pointer to workgroup memory guaranteeing that it is aligned to at least the alignment of `T`. The pointer is dereferencable for the size specified when launching the current gpu-kernel (which may be the size of `T` but can also be larger or smaller or zero). All calls to this intrinsic return a pointer to the same address. See the intrinsic documentation for more details. ## Alternative Interfaces It was also considered to expose dynamic workgroup memory as extern static variables in Rust, like they are represented in LLVM IR. However, due to the pointer not being guaranteed to be dereferencable (that depends on the allocated size at runtime), such a global must be zero-sized, which makes global variables a bad fit. # Implementation Details Workgroup memory in amdgpu and nvptx lives in address space 3. Workgroup memory from a launch is implemented by creating an external global variable in address space 3. The global is declared with size 0, as the actual size is only known at runtime. It is defined behavior in LLVM to access an external global outside the defined size. There is no similar way to get the allocated size of launch-sized workgroup memory on amdgpu an nvptx, so users have to pass this out-of-band or rely on target specific ways for now. --- compiler/rustc_abi/src/lib.rs | 3 ++ compiler/rustc_codegen_llvm/src/declare.rs | 23 ++++++++++ compiler/rustc_codegen_llvm/src/intrinsic.rs | 44 ++++++++++++++++++- compiler/rustc_codegen_llvm/src/llvm/ffi.rs | 7 +++ .../rustc_codegen_ssa/src/mir/intrinsic.rs | 1 + .../rustc_hir_analysis/src/check/intrinsic.rs | 2 + .../rustc_llvm/llvm-wrapper/RustWrapper.cpp | 21 ++++++--- compiler/rustc_span/src/symbol.rs | 1 + library/core/src/intrinsics/mod.rs | 39 ++++++++++++++++ src/tools/tidy/src/style.rs | 4 ++ .../gpu-launch-sized-workgroup-memory.rs | 31 +++++++++++++ 11 files changed, 169 insertions(+), 7 deletions(-) create mode 100644 tests/codegen-llvm/gpu-launch-sized-workgroup-memory.rs diff --git a/compiler/rustc_abi/src/lib.rs b/compiler/rustc_abi/src/lib.rs index cd85efb2753ae..6e090a61c7726 100644 --- a/compiler/rustc_abi/src/lib.rs +++ b/compiler/rustc_abi/src/lib.rs @@ -1719,6 +1719,9 @@ pub struct AddressSpace(pub u32); impl AddressSpace { /// LLVM's `0` address space. pub const ZERO: Self = AddressSpace(0); + /// The address space for workgroup memory on nvptx and amdgpu. + /// See e.g. the `gpu_launch_sized_workgroup_mem` intrinsic for details. + pub const GPU_WORKGROUP: Self = AddressSpace(3); } /// The way we represent values to the backend diff --git a/compiler/rustc_codegen_llvm/src/declare.rs b/compiler/rustc_codegen_llvm/src/declare.rs index 8f69f176138cf..8c5fcd36fa69b 100644 --- a/compiler/rustc_codegen_llvm/src/declare.rs +++ b/compiler/rustc_codegen_llvm/src/declare.rs @@ -14,6 +14,7 @@ use std::borrow::Borrow; use itertools::Itertools; +use rustc_abi::AddressSpace; use rustc_codegen_ssa::traits::TypeMembershipCodegenMethods; use rustc_data_structures::fx::FxIndexSet; use rustc_middle::ty::{Instance, Ty}; @@ -97,6 +98,28 @@ impl<'ll, CX: Borrow>> GenericCx<'ll, CX> { ) } } + + /// Declare a global value in a specific address space. + /// + /// If there’s a value with the same name already declared, the function will + /// return its Value instead. + pub(crate) fn declare_global_in_addrspace( + &self, + name: &str, + ty: &'ll Type, + addr_space: AddressSpace, + ) -> &'ll Value { + debug!("declare_global(name={name:?}, addrspace={addr_space:?})"); + unsafe { + llvm::LLVMRustGetOrInsertGlobalInAddrspace( + (**self).borrow().llmod, + name.as_c_char_ptr(), + name.len(), + ty, + addr_space.0, + ) + } + } } impl<'ll, 'tcx> CodegenCx<'ll, 'tcx> { diff --git a/compiler/rustc_codegen_llvm/src/intrinsic.rs b/compiler/rustc_codegen_llvm/src/intrinsic.rs index 33541f7b695f8..81162ebe709f9 100644 --- a/compiler/rustc_codegen_llvm/src/intrinsic.rs +++ b/compiler/rustc_codegen_llvm/src/intrinsic.rs @@ -1,7 +1,9 @@ use std::assert_matches::assert_matches; use std::cmp::Ordering; -use rustc_abi::{Align, BackendRepr, ExternAbi, Float, HasDataLayout, Primitive, Size}; +use rustc_abi::{ + AddressSpace, Align, BackendRepr, ExternAbi, Float, HasDataLayout, Primitive, Size, +}; use rustc_codegen_ssa::base::{compare_simd_types, wants_msvc_seh, wants_wasm_eh}; use rustc_codegen_ssa::codegen_attrs::autodiff_attrs; use rustc_codegen_ssa::common::{IntPredicate, TypeKind}; @@ -20,7 +22,7 @@ use rustc_session::config::CrateType; use rustc_span::{Span, Symbol, sym}; use rustc_symbol_mangling::{mangle_internal_symbol, symbol_name_for_instance_in_crate}; use rustc_target::callconv::PassMode; -use rustc_target::spec::Os; +use rustc_target::spec::{Arch, Os}; use tracing::debug; use crate::abi::FnAbiLlvmExt; @@ -553,6 +555,44 @@ impl<'ll, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> { return Ok(()); } + sym::gpu_launch_sized_workgroup_mem => { + // The name of the global variable is not relevant, the important properties are. + // 1. The global is in the address space for workgroup memory + // 2. It is an extern global + // All instances of extern addrspace(gpu_workgroup) globals are merged in the LLVM backend. + // Generate an unnamed global per intrinsic call, so that different kernels can have + // different minimum alignments. + // See https://docs.nvidia.com/cuda/cuda-c-programming-guide/#shared + // FIXME Workaround an nvptx backend issue that extern globals must have a name + let name = if tcx.sess.target.arch == Arch::Nvptx64 { + "gpu_launch_sized_workgroup_mem" + } else { + "" + }; + let global = self.declare_global_in_addrspace( + name, + self.type_array(self.type_i8(), 0), + AddressSpace::GPU_WORKGROUP, + ); + let ty::RawPtr(inner_ty, _) = result.layout.ty.kind() else { unreachable!() }; + // The alignment of the global is used to specify the *minimum* alignment that + // must be obeyed by the GPU runtime. + // When multiple of these global variables are used by a kernel, the maximum alignment is taken. + // See https://github.com/llvm/llvm-project/blob/a271d07488a85ce677674bbe8101b10efff58c95/llvm/lib/Target/AMDGPU/AMDGPULowerModuleLDSPass.cpp#L821 + let alignment = self.align_of(*inner_ty).bytes() as u32; + unsafe { + // FIXME Workaround the above issue by taking maximum alignment if the global existed + if tcx.sess.target.arch == Arch::Nvptx64 { + if alignment > llvm::LLVMGetAlignment(global) { + llvm::LLVMSetAlignment(global, alignment); + } + } else { + llvm::LLVMSetAlignment(global, alignment); + } + } + self.cx().const_pointercast(global, self.type_ptr()) + } + _ if name.as_str().starts_with("simd_") => { // Unpack non-power-of-2 #[repr(packed, simd)] arguments. // This gives them the expected layout of a regular #[repr(simd)] vector. diff --git a/compiler/rustc_codegen_llvm/src/llvm/ffi.rs b/compiler/rustc_codegen_llvm/src/llvm/ffi.rs index 53b9a2bda8944..28bbf4605532b 100644 --- a/compiler/rustc_codegen_llvm/src/llvm/ffi.rs +++ b/compiler/rustc_codegen_llvm/src/llvm/ffi.rs @@ -2017,6 +2017,13 @@ unsafe extern "C" { NameLen: size_t, T: &'a Type, ) -> &'a Value; + pub(crate) fn LLVMRustGetOrInsertGlobalInAddrspace<'a>( + M: &'a Module, + Name: *const c_char, + NameLen: size_t, + T: &'a Type, + AddressSpace: c_uint, + ) -> &'a Value; pub(crate) fn LLVMRustGetNamedValue( M: &Module, Name: *const c_char, diff --git a/compiler/rustc_codegen_ssa/src/mir/intrinsic.rs b/compiler/rustc_codegen_ssa/src/mir/intrinsic.rs index aeb7401182347..2110b0a9e9b7a 100644 --- a/compiler/rustc_codegen_ssa/src/mir/intrinsic.rs +++ b/compiler/rustc_codegen_ssa/src/mir/intrinsic.rs @@ -111,6 +111,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { sym::abort | sym::unreachable | sym::cold_path + | sym::gpu_launch_sized_workgroup_mem | sym::breakpoint | sym::assert_zero_valid | sym::assert_mem_uninitialized_valid diff --git a/compiler/rustc_hir_analysis/src/check/intrinsic.rs b/compiler/rustc_hir_analysis/src/check/intrinsic.rs index 676c9a980afff..d97e9060e5eb4 100644 --- a/compiler/rustc_hir_analysis/src/check/intrinsic.rs +++ b/compiler/rustc_hir_analysis/src/check/intrinsic.rs @@ -132,6 +132,7 @@ fn intrinsic_operation_unsafety(tcx: TyCtxt<'_>, intrinsic_id: LocalDefId) -> hi | sym::forget | sym::frem_algebraic | sym::fsub_algebraic + | sym::gpu_launch_sized_workgroup_mem | sym::is_val_statically_known | sym::log2f16 | sym::log2f32 @@ -293,6 +294,7 @@ pub(crate) fn check_intrinsic_type( sym::offset_of => (1, 0, vec![tcx.types.u32, tcx.types.u32], tcx.types.usize), sym::rustc_peek => (1, 0, vec![param(0)], param(0)), sym::caller_location => (0, 0, vec![], tcx.caller_location_ty()), + sym::gpu_launch_sized_workgroup_mem => (1, 0, vec![], Ty::new_mut_ptr(tcx, param(0))), sym::assert_inhabited | sym::assert_zero_valid | sym::assert_mem_uninitialized_valid => { (1, 0, vec![], tcx.types.unit) } diff --git a/compiler/rustc_llvm/llvm-wrapper/RustWrapper.cpp b/compiler/rustc_llvm/llvm-wrapper/RustWrapper.cpp index dda06e9b2bf68..26dc665954229 100644 --- a/compiler/rustc_llvm/llvm-wrapper/RustWrapper.cpp +++ b/compiler/rustc_llvm/llvm-wrapper/RustWrapper.cpp @@ -261,10 +261,10 @@ extern "C" LLVMValueRef LLVMRustGetOrInsertFunction(LLVMModuleRef M, .getCallee()); } -extern "C" LLVMValueRef LLVMRustGetOrInsertGlobal(LLVMModuleRef M, - const char *Name, - size_t NameLen, - LLVMTypeRef Ty) { +extern "C" LLVMValueRef +LLVMRustGetOrInsertGlobalInAddrspace(LLVMModuleRef M, const char *Name, + size_t NameLen, LLVMTypeRef Ty, + unsigned AddressSpace) { Module *Mod = unwrap(M); auto NameRef = StringRef(Name, NameLen); @@ -275,10 +275,21 @@ extern "C" LLVMValueRef LLVMRustGetOrInsertGlobal(LLVMModuleRef M, GlobalVariable *GV = Mod->getGlobalVariable(NameRef, true); if (!GV) GV = new GlobalVariable(*Mod, unwrap(Ty), false, - GlobalValue::ExternalLinkage, nullptr, NameRef); + GlobalValue::ExternalLinkage, nullptr, NameRef, + nullptr, GlobalValue::NotThreadLocal, AddressSpace); return wrap(GV); } +extern "C" LLVMValueRef LLVMRustGetOrInsertGlobal(LLVMModuleRef M, + const char *Name, + size_t NameLen, + LLVMTypeRef Ty) { + Module *Mod = unwrap(M); + unsigned AddressSpace = Mod->getDataLayout().getDefaultGlobalsAddressSpace(); + return LLVMRustGetOrInsertGlobalInAddrspace(M, Name, NameLen, Ty, + AddressSpace); +} + // Must match the layout of `rustc_codegen_llvm::llvm::ffi::AttributeKind`. enum class LLVMRustAttributeKind { AlwaysInline = 0, diff --git a/compiler/rustc_span/src/symbol.rs b/compiler/rustc_span/src/symbol.rs index f2b13dad1fd90..5dc47e2ee052c 100644 --- a/compiler/rustc_span/src/symbol.rs +++ b/compiler/rustc_span/src/symbol.rs @@ -1152,6 +1152,7 @@ symbols! { global_asm, global_registration, globs, + gpu_launch_sized_workgroup_mem, gt, guard_patterns, half_open_range_patterns, diff --git a/library/core/src/intrinsics/mod.rs b/library/core/src/intrinsics/mod.rs index 2115c5c9a85d8..13c94244c66df 100644 --- a/library/core/src/intrinsics/mod.rs +++ b/library/core/src/intrinsics/mod.rs @@ -3436,6 +3436,45 @@ pub(crate) const fn miri_promise_symbolic_alignment(ptr: *const (), align: usize ) } +/// Returns the pointer to workgroup memory allocated at launch-time on GPUs. +/// +/// Workgroup memory is a memory region that is shared between all threads in +/// the same workgroup. It is faster to access than other memory but pointers do not +/// work outside the workgroup where they were obtained. +/// Workgroup memory can be allocated statically or after compilation, when +/// launching a gpu-kernel. `gpu_launch_sized_workgroup_mem` returns the pointer to +/// the memory that is allocated at launch-time. +/// The size of this memory can differ between launches of a gpu-kernel, depending on +/// what is specified at launch-time. +/// However, the alignment is fixed by the kernel itself, at compile-time. +/// +/// The returned pointer is the start of the workgroup memory region that is +/// allocated at launch-time. +/// All calls to `gpu_launch_sized_workgroup_mem` in a workgroup, independent of the +/// generic type, return the same address, so alias the same memory. +/// The returned pointer is aligned by at least the alignment of `T`. +/// +/// # Safety +/// +/// The pointer is safe to dereference from the start (the returned pointer) up to the +/// size of workgroup memory that was specified when launching the current gpu-kernel. +/// +/// The user must take care of synchronizing access to workgroup memory between +/// threads in a workgroup. The usual data race requirements apply. +/// +/// # Other APIs +/// +/// CUDA and HIP call this dynamic shared memory, shared between threads in a block. +/// OpenCL and SYCL call this local memory, shared between threads in a work-group. +/// GLSL calls this shared memory, shared between invocations in a work group. +/// DirectX calls this groupshared memory, shared between threads in a thread-group. +#[must_use = "returns a pointer that does nothing unless used"] +#[rustc_intrinsic] +#[rustc_nounwind] +#[unstable(feature = "gpu_launch_sized_workgroup_mem", issue = "135513")] +#[cfg(any(target_arch = "amdgpu", target_arch = "nvptx64"))] +pub fn gpu_launch_sized_workgroup_mem() -> *mut T; + /// Copies the current location of arglist `src` to the arglist `dst`. /// /// # Safety diff --git a/src/tools/tidy/src/style.rs b/src/tools/tidy/src/style.rs index 111fe89e7eb0a..c80fd5820bdc5 100644 --- a/src/tools/tidy/src/style.rs +++ b/src/tools/tidy/src/style.rs @@ -222,6 +222,10 @@ fn should_ignore(line: &str) -> bool { || static_regex!( "\\s*//@ \\!?(count|files|has|has-dir|hasraw|matches|matchesraw|snapshot)\\s.*" ).is_match(line) + // Matching for FileCheck checks + || static_regex!( + "\\s*// [a-zA-Z0-9-_]*:\\s.*" + ).is_match(line) } /// Returns `true` if `line` is allowed to be longer than the normal limit. diff --git a/tests/codegen-llvm/gpu-launch-sized-workgroup-memory.rs b/tests/codegen-llvm/gpu-launch-sized-workgroup-memory.rs new file mode 100644 index 0000000000000..649c06a0f55b9 --- /dev/null +++ b/tests/codegen-llvm/gpu-launch-sized-workgroup-memory.rs @@ -0,0 +1,31 @@ +// Checks that the GPU intrinsic to get launch-sized workgroup memory works. + +//@ revisions: amdgpu nvptx +//@ compile-flags: --crate-type=rlib +// +//@ [amdgpu] compile-flags: --target amdgcn-amd-amdhsa -Ctarget-cpu=gfx900 +//@ [amdgpu] needs-llvm-components: amdgpu +//@ [nvptx] compile-flags: --target nvptx64-nvidia-cuda +//@ [nvptx] needs-llvm-components: nvptx +//@ add-minicore +#![feature(intrinsics, no_core, rustc_attrs)] +#![no_core] + +extern crate minicore; + +#[rustc_intrinsic] +#[rustc_nounwind] +fn gpu_launch_sized_workgroup_mem() -> *mut T; + +// amdgpu-DAG: @[[SMALL:[^ ]+]] = external addrspace(3) global [0 x i8], align 4 +// amdgpu-DAG: @[[BIG:[^ ]+]] = external addrspace(3) global [0 x i8], align 8 +// amdgpu: ret { ptr, ptr } { ptr addrspacecast (ptr addrspace(3) @[[SMALL]] to ptr), ptr addrspacecast (ptr addrspace(3) @[[BIG]] to ptr) } + +// nvptx: @[[BIG:[^ ]+]] = external addrspace(3) global [0 x i8], align 8 +// nvptx: ret { ptr, ptr } { ptr addrspacecast (ptr addrspace(3) @[[BIG]] to ptr), ptr addrspacecast (ptr addrspace(3) @[[BIG]] to ptr) } +#[unsafe(no_mangle)] +pub fn fun() -> (*mut i32, *mut f64) { + let small = gpu_launch_sized_workgroup_mem::(); + let big = gpu_launch_sized_workgroup_mem::(); // Increase alignment to 8 + (small, big) +}