-
Notifications
You must be signed in to change notification settings - Fork 720
[XLA:GPU] Add SPIRV-LLVM-Translator and translation pass #11424
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,34 @@ | ||
| cc_library( | ||
| name = "spirv_llvm_translator", | ||
| srcs = glob([ | ||
| "lib/SPIRV/libSPIRV/*.cpp", | ||
| "lib/SPIRV/libSPIRV/*.hpp", | ||
| "lib/SPIRV/libSPIRV/*.h", | ||
| "lib/SPIRV/Mangler/*.cpp", | ||
| "lib/SPIRV/Mangler/*.h", | ||
| "lib/SPIRV/*.cpp", | ||
| "lib/SPIRV/*.hpp", | ||
| "lib/SPIRV/*.h", | ||
| ]), | ||
| hdrs = glob(["include/*"]), | ||
| includes = [ | ||
| "include/", | ||
| "lib/SPIRV/", | ||
| "lib/SPIRV/Mangler/", | ||
| "lib/SPIRV/libSPIRV/", | ||
| ], | ||
| visibility = ["//visibility:public"], | ||
| deps = [ | ||
| "@llvm-project//llvm:Analysis", | ||
| "@llvm-project//llvm:BitWriter", | ||
| "@llvm-project//llvm:CodeGen", | ||
| "@llvm-project//llvm:Core", | ||
| "@llvm-project//llvm:Demangle", | ||
| "@llvm-project//llvm:IRReader", | ||
| "@llvm-project//llvm:Linker", | ||
| "@llvm-project//llvm:Passes", | ||
| "@llvm-project//llvm:Support", | ||
| "@llvm-project//llvm:TransformUtils", | ||
| "@spirv_headers//:spirv_cpp_headers", | ||
| ], | ||
| ) |
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,25 @@ | ||
| diff --git a/lib/SPIRV/SPIRVInternal.h b/lib/SPIRV/SPIRVInternal.h | ||
| index a828add8..924e13b4 100644 | ||
|
|
||
| Spir backend uses different addrspace representations link with nvptx backend link. | ||
| We reorder the enum value here so that we can make XLA LLVM codegen simple(avoiding | ||
| changing addrspace based on device backend everywhere) | ||
|
|
||
| --- a/lib/SPIRV/SPIRVInternal.h | ||
| +++ b/lib/SPIRV/SPIRVInternal.h | ||
| @@ -179,11 +179,12 @@ typedef SPIRVMap<Op, Op, IntBoolOpMapId> IntBoolOpMap; | ||
| "-v512:512:512-v1024:1024:1024" | ||
|
|
||
| enum SPIRAddressSpace { | ||
| - SPIRAS_Private, | ||
| + SPIRAS_Generic, | ||
| SPIRAS_Global, | ||
| - SPIRAS_Constant, | ||
| + SPIRAS_Internal, | ||
| SPIRAS_Local, | ||
| - SPIRAS_Generic, | ||
| + SPIRAS_Constant, | ||
| + SPIRAS_Private, | ||
| SPIRAS_GlobalDevice, | ||
| SPIRAS_GlobalHost, | ||
| SPIRAS_Input, | ||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -105,6 +105,11 @@ limitations under the License. | |
| #include "xla/stream_executor/cuda/cuda_asm_compiler.h" | ||
| #endif | ||
|
|
||
| #if TENSORFLOW_USE_SYCL | ||
| #include "LLVMSPIRVLib.h" | ||
| #include "LLVMSPIRVOpts.h" | ||
| #endif // TENSORFLOW_USE_SYCL | ||
|
|
||
| namespace xla { | ||
| namespace gpu { | ||
| namespace { | ||
|
|
@@ -452,7 +457,9 @@ absl::Status LinkAndOptimizeModule( | |
| llvm::CGSCCAnalysisManager cgam; | ||
| llvm::ModuleAnalysisManager mam; | ||
|
|
||
| fam.registerPass([&] { return target_machine->getTargetIRAnalysis(); }); | ||
| if (target_machine) { | ||
| fam.registerPass([&] { return target_machine->getTargetIRAnalysis(); }); | ||
| } | ||
|
|
||
| llvm::PipelineTuningOptions pto; | ||
| pto.SLPVectorization = true; | ||
|
|
@@ -1132,5 +1139,95 @@ absl::StatusOr<std::vector<uint8_t>> CompileToHsaco( | |
|
|
||
| } // namespace amdgpu | ||
|
|
||
| namespace { | ||
|
Member
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Nit: Please add a blank line after this.
Contributor
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Done. |
||
|
|
||
| std::unique_ptr<llvm::TargetMachine> SPIRGetTargetMachine( | ||
| llvm::Triple target_triple, se::GpuComputeCapability gpu_version, | ||
| const DebugOptions& debug_options) { | ||
| return nullptr; | ||
| } | ||
|
|
||
| absl::Status SPIRTargetModuleLinker( | ||
| llvm::Module* module, se::GpuComputeCapability gpu_version, | ||
| const DebugOptions& debug_options, | ||
| const std::string& device_bitcode_dir_path) { | ||
| return absl::OkStatus(); | ||
| } | ||
|
|
||
| absl::StatusOr<std::string> EmitModuleToSpir( | ||
| llvm::Module* module, se::GpuComputeCapability gpu_version, | ||
| const DebugOptions& debug_options) { | ||
| #if TENSORFLOW_USE_SYCL | ||
| SPIRV::TranslatorOpts::ExtensionsStatusMap ExtensionsStatus; | ||
| SPIRV::TranslatorOpts opts(SPIRV::VersionNumber::MaximumVersion, | ||
| ExtensionsStatus); | ||
| opts.enableAllExtensions(); // enable all SPIR-V extension first | ||
|
|
||
| std::ostringstream oss; | ||
| std::string err; | ||
| bool success = llvm::writeSpirv(module, opts, oss, err); | ||
| if (!success) { | ||
| return xla::Internal("Fails to convert LLVM as SPIR-V: %s", err); | ||
| } | ||
| return oss.str(); | ||
| #else | ||
| return absl::UnimplementedError("Not implemented for SYCL"); | ||
| #endif | ||
| } | ||
|
|
||
| void SPIRBackendInit(const DebugOptions& debug_options) { | ||
| FeedLLVMWithFlags({ | ||
| "-slp-vectorize-hor=false", | ||
| "-slp-min-reg-size=64", | ||
| "-slp-max-reg-size=64", | ||
| }); | ||
|
|
||
| llvm_ir::InitializeLLVMCommandLineOptions( | ||
| debug_options.xla_backend_extra_options()); | ||
|
|
||
| llvm::PassRegistry* registry = llvm::PassRegistry::getPassRegistry(); | ||
| InitializePasses(registry); | ||
| } | ||
|
|
||
| } // namespace | ||
|
Member
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Nit: Please add a blank line before this.
Contributor
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Done. |
||
|
|
||
| namespace spir { | ||
|
|
||
| absl::StatusOr<std::vector<uint8_t>> CompileToSpir( | ||
| llvm::Module* module, se::GpuComputeCapability gpu_version, | ||
| const DebugOptions& debug_options) { | ||
| std::string libdevice_dir_path; | ||
| static absl::once_flag backend_init_flag; | ||
| absl::call_once(backend_init_flag, SPIRBackendInit, debug_options); | ||
|
|
||
| std::string spir; | ||
| { | ||
| XLA_SCOPED_LOGGING_TIMER("Compile module " + module->getName().str()); | ||
|
|
||
| // If the module has no functions or globals, there's nothing to compile. | ||
| if (module->empty() && module->global_empty()) { | ||
| VLOG(2) << "Module '" << module->getName().str() | ||
| << "' is empty. Skipping compilation."; | ||
| return std::vector<uint8_t>(); | ||
| } | ||
|
|
||
| llvm::Triple default_target_triple("spir64-unknown-unknown"); | ||
| std::unique_ptr<llvm::TargetMachine> target_machine = | ||
| SPIRGetTargetMachine(default_target_triple, gpu_version, debug_options); | ||
|
|
||
| TF_RETURN_IF_ERROR(LinkAndOptimizeModule( | ||
| module, gpu_version, debug_options, libdevice_dir_path, | ||
| SPIRTargetModuleLinker, default_target_triple, target_machine.get(), | ||
| kDefaultInlineThreshold)); | ||
|
|
||
| // Lower optimized LLVM module to SPIR. | ||
| TF_ASSIGN_OR_RETURN(spir, | ||
| EmitModuleToSpir(module, gpu_version, debug_options)); | ||
| } | ||
| return std::vector<uint8_t>(spir.begin(), spir.end()); | ||
| } | ||
|
|
||
| } // namespace spir | ||
|
|
||
| } // namespace gpu | ||
| } // namespace xla | ||
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Just curious, why do we need to reorder them?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Spir backend uses different addrspace representations link with nvptx backend link. We reorder the enum value here so that we can make xla llvm codegen simple(avoiding changing addrspace based on device backend everywhere)
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
This is useful context for people who are unfamiliar with the backend.
Could you please add this explanation in this patch file (between line 2 and 3)? I believe the patch tool will ignore anything above the
--- a/lib/SPIRV/SPIRVInternal.hline. E.g.,There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Done.