From fbd4b382b27c6b3f5fe03de8c0e17ced0bb88e6e Mon Sep 17 00:00:00 2001 From: Robin Kuzmin Date: Mon, 8 Feb 2021 20:34:14 -0800 Subject: [PATCH 01/30] Added some math funcs implementation (#500) * Added sqrt implementation and tests. * Restructured the files. * Restructured sqrt. * CR changes * Fixed the Linux build break. * Added Log() and ArcTan2(). * Added pauli_to_string * CR changes. * CR changes. --- src/QirRuntime/lib/QIR/CMakeLists.txt | 1 + src/QirRuntime/lib/QIR/bridge-qis.ll | 103 ++- src/QirRuntime/lib/QIR/bridge-rt.ll | 16 +- src/QirRuntime/lib/QIR/intrinsicsMath.cpp | 31 + src/QirRuntime/lib/QIR/quantum__qis.hpp | 6 + src/QirRuntime/lib/QIR/strings.cpp | 3 + src/QirRuntime/public/CoreTypes.hpp | 4 +- src/QirRuntime/test/QIR-static/CMakeLists.txt | 5 +- .../test/QIR-static/compiler/Constants.qs | 37 + .../test/QIR-static/compiler/QirTarget.qs | 41 + src/QirRuntime/test/QIR-static/generate.py | 2 +- src/QirRuntime/test/QIR-static/qir-driver.cpp | 14 +- .../test/QIR-static/qir-test-arrays.qs | 16 +- .../test/QIR-static/qir-test-math.cpp | 25 + .../test/QIR-static/qir-test-math.qs | 64 ++ .../test/QIR-static/qir-test-qsharp.ll | 825 ++++++++++++++---- .../QIR-static/qir-test-qubits-results.qs | 2 + .../test/QIR-static/qir-test-strings.cpp | 15 + .../test/QIR-static/qir-test-strings.qs | 20 + 19 files changed, 1034 insertions(+), 196 deletions(-) create mode 100644 src/QirRuntime/lib/QIR/intrinsicsMath.cpp create mode 100644 src/QirRuntime/test/QIR-static/compiler/Constants.qs create mode 100644 src/QirRuntime/test/QIR-static/qir-test-math.cpp create mode 100644 src/QirRuntime/test/QIR-static/qir-test-math.qs create mode 100644 src/QirRuntime/test/QIR-static/qir-test-strings.cpp create mode 100644 src/QirRuntime/test/QIR-static/qir-test-strings.qs diff --git a/src/QirRuntime/lib/QIR/CMakeLists.txt b/src/QirRuntime/lib/QIR/CMakeLists.txt index 58701c2d2da..37455ba9634 100644 --- a/src/QirRuntime/lib/QIR/CMakeLists.txt +++ b/src/QirRuntime/lib/QIR/CMakeLists.txt @@ -70,6 +70,7 @@ add_dependencies(qir-rt ${bridge_rt_target}) # set(qis_sup_source_files "intrinsics.cpp" + "intrinsicsMath.cpp" ) add_library(qir-qis-support ${qis_sup_source_files}) diff --git a/src/QirRuntime/lib/QIR/bridge-qis.ll b/src/QirRuntime/lib/QIR/bridge-qis.ll index 9a011f112a4..1c93d58f0fe 100644 --- a/src/QirRuntime/lib/QIR/bridge-qis.ll +++ b/src/QirRuntime/lib/QIR/bridge-qis.ll @@ -13,7 +13,7 @@ %Range = type { i64, i64, i64 } %Result = type opaque %String = type opaque -%Pauli = type {i2} +%Pauli = type i2 ;======================================================================================================================= ; Native types @@ -29,9 +29,7 @@ %struct.QirCallable = type opaque %struct.QirRange = type { i64, i64, i64 } %struct.QirString = type opaque - -; Assumptions: -; %PauliId = type {i32} +%PauliId = type i32 ;=============================================================================== ; declarations of the native methods this bridge delegates to @@ -139,53 +137,53 @@ define %Result* @__quantum__qis__measure__body(%Array* %.paulis, %Array* %.qubit ret %Result* %.r } -define void @__quantum__qis__r__body(i2 %.pauli, double %theta, %Qubit* %.q) { +define void @__quantum__qis__r__body(%Pauli %.pauli, double %theta, %Qubit* %.q) { %q = bitcast %Qubit* %.q to %class.QUBIT* - %pauli = zext i2 %.pauli to i32 - call void @quantum__qis__r__body(i32 %pauli, double %theta, %class.QUBIT* %q) + %pauli = zext %Pauli %.pauli to %PauliId + call void @quantum__qis__r__body(%PauliId %pauli, double %theta, %class.QUBIT* %q) ret void } -define void @__quantum__qis__r__adj(i2 %.pauli, double %theta, %Qubit* %.q) { +define void @__quantum__qis__r__adj(%Pauli %.pauli, double %theta, %Qubit* %.q) { %q = bitcast %Qubit* %.q to %class.QUBIT* - %pauli = zext i2 %.pauli to i32 - call void @quantum__qis__r__adj(i32 %pauli, double %theta, %class.QUBIT* %q) + %pauli = zext %Pauli %.pauli to %PauliId + call void @quantum__qis__r__adj(%PauliId %pauli, double %theta, %class.QUBIT* %q) ret void } -define void @__quantum__qis__r__ctl(%Array* %.ctls, {i2, double, %Qubit*}* %.args) { +define void @__quantum__qis__r__ctl(%Array* %.ctls, {%Pauli, double, %Qubit*}* %.args) { %ctls = bitcast %Array* %.ctls to %struct.QirArray* - %.ppauli = getelementptr inbounds {i2, double, %Qubit*}, {i2, double, %Qubit*}* %.args, i32 0, i32 0 - %.pauli = load i2, i2* %.ppauli - %pauli = zext i2 %.pauli to i32 + %.ppauli = getelementptr inbounds {%Pauli, double, %Qubit*}, {%Pauli, double, %Qubit*}* %.args, i32 0, i32 0 + %.pauli = load %Pauli, %Pauli* %.ppauli + %pauli = zext %Pauli %.pauli to %PauliId - %.ptheta = getelementptr inbounds {i2, double, %Qubit*}, {i2, double, %Qubit*}* %.args, i32 0, i32 1 + %.ptheta = getelementptr inbounds {%Pauli, double, %Qubit*}, {%Pauli, double, %Qubit*}* %.args, i32 0, i32 1 %theta = load double, double* %.ptheta - %.pq = getelementptr inbounds {i2, double, %Qubit*}, {i2, double, %Qubit*}* %.args, i32 0, i32 2 + %.pq = getelementptr inbounds {%Pauli, double, %Qubit*}, {%Pauli, double, %Qubit*}* %.args, i32 0, i32 2 %.q = load %Qubit*, %Qubit** %.pq %q = bitcast %Qubit* %.q to %class.QUBIT* - call void @quantum__qis__r__ctl(%struct.QirArray* %ctls, i32 %pauli, double %theta, %class.QUBIT* %q) + call void @quantum__qis__r__ctl(%struct.QirArray* %ctls, %PauliId %pauli, double %theta, %class.QUBIT* %q) ret void } -define void @__quantum__qis__r__ctladj(%Array* %.ctls, {i2, double, %Qubit*}* %.args) { +define void @__quantum__qis__r__ctladj(%Array* %.ctls, {%Pauli, double, %Qubit*}* %.args) { %ctls = bitcast %Array* %.ctls to %struct.QirArray* - %.ppauli = getelementptr inbounds {i2, double, %Qubit*}, {i2, double, %Qubit*}* %.args, i32 0, i32 0 - %.pauli = load i2, i2* %.ppauli - %pauli = zext i2 %.pauli to i32 + %.ppauli = getelementptr inbounds {%Pauli, double, %Qubit*}, {%Pauli, double, %Qubit*}* %.args, i32 0, i32 0 + %.pauli = load %Pauli, %Pauli* %.ppauli + %pauli = zext %Pauli %.pauli to %PauliId - %.ptheta = getelementptr inbounds {i2, double, %Qubit*}, {i2, double, %Qubit*}* %.args, i32 0, i32 1 + %.ptheta = getelementptr inbounds {%Pauli, double, %Qubit*}, {%Pauli, double, %Qubit*}* %.args, i32 0, i32 1 %theta = load double, double* %.ptheta - %.pq = getelementptr inbounds {i2, double, %Qubit*}, {i2, double, %Qubit*}* %.args, i32 0, i32 2 + %.pq = getelementptr inbounds {%Pauli, double, %Qubit*}, {%Pauli, double, %Qubit*}* %.args, i32 0, i32 2 %.q = load %Qubit*, %Qubit** %.pq %q = bitcast %Qubit* %.q to %class.QUBIT* - call void @quantum__qis__r__ctladj(%struct.QirArray* %ctls, i32 %pauli, double %theta, %class.QUBIT* %q) + call void @quantum__qis__r__ctladj(%struct.QirArray* %ctls, %PauliId %pauli, double %theta, %class.QUBIT* %q) ret void } @@ -281,3 +279,60 @@ define void @__quantum__qis__z__ctl(%Array* %.ctls, %Qubit* %.q) { } +;=============================================================================== +; quantum.qis math functions +; + +; LLVM intrinsics (https://llvm.org/docs/LangRef.html): +declare double @llvm.sqrt.f64(double %.val) +declare double @llvm.log.f64(double %Val) + +; Native implementations: +declare i1 @quantum__qis__isnan__body(double %d) +declare double @quantum__qis__infinity__body() +declare i1 @quantum__qis__isinf__body(double %d) +declare double @quantum__qis__arctan2__body(double %y, double %x) + +; API for the user code: +define double @__quantum__qis__nan__body() { ; Q#: function NAN() : Double http://www.cplusplus.com/reference/cmath/nan-function/ + %result = call double @llvm.sqrt.f64(double -1.0) ; sqrt() -> NaN + ret double %result +} + +define i1 @__quantum__qis__isnan__body(double %d) { ; http://www.cplusplus.com/reference/cmath/isnan/ + %result = call i1 @quantum__qis__isnan__body(double %d) + ret i1 %result +} + +define double @__quantum__qis__infinity__body() { ; https://en.cppreference.com/w/c/numeric/math/INFINITY + %result = call double @quantum__qis__infinity__body() + ret double %result +} + +define i1 @__quantum__qis__isinf__body(double %d) { ; https://en.cppreference.com/w/cpp/numeric/math/isinf + %result = call i1 @quantum__qis__isinf__body(double %d) + ret i1 %result +} + +define double @__quantum__qis__sqrt__body(double %d) { ; https://en.cppreference.com/w/cpp/numeric/math/sqrt + %result = call double @llvm.sqrt.f64(double %d) + ret double %result +} + +define double @__quantum__qis__log__body(double %d) { ; https://en.cppreference.com/w/cpp/numeric/math/log + %result = call double @llvm.log.f64(double %d) + ret double %result +} + +define i1 @__quantum__qis__isnegativeinfinity__body(double %d) { ; Q#: function IsNegativeInfinity(d : Double) : Bool + ; https://en.cppreference.com/w/cpp/numeric/math/log https://llvm.org/docs/LangRef.html#llvm-log-intrinsic + %negInf = call double @llvm.log.f64(double 0.0) ; ln(0) -> (-infinity) + %result = fcmp oeq double %negInf, %d ; %result = (%negInf == %d) + ret i1 %result +} + +define double @__quantum__qis__arctan2__body(double %y, double %x) { ; Q#: function ArcTan2 (y : Double, x : Double) : Double + ; https://en.cppreference.com/w/cpp/numeric/math/atan2 + %result = call double @quantum__qis__arctan2__body(double %y, double %x) + ret double %result +} diff --git a/src/QirRuntime/lib/QIR/bridge-rt.ll b/src/QirRuntime/lib/QIR/bridge-rt.ll index dc9ea4995e0..73a33d737a6 100644 --- a/src/QirRuntime/lib/QIR/bridge-rt.ll +++ b/src/QirRuntime/lib/QIR/bridge-rt.ll @@ -11,7 +11,7 @@ %Result = type opaque %String = type opaque %Tuple = type opaque -%Pauli = type {i2} +%Pauli = type i2 ;======================================================================================================================= ; Native types @@ -27,6 +27,7 @@ %"struct.QirCallable" = type opaque %"struct.QirRange" = type { i64, i64, i64 } %"struct.QirString" = type opaque +%PauliId = type i32 ; %Tuple* is mapped to i8* ;======================================================================================================================= @@ -97,7 +98,7 @@ declare %"struct.QirString"* @quantum__rt__int_to_string(i64) declare %"struct.QirString"* @quantum__rt__double_to_string(double) declare %"struct.QirString"* @quantum__rt__bool_to_string(i1) declare %"struct.QirString"* @quantum__rt__result_to_string(%class.RESULT*) -declare %"struct.QirString"* @quantum__rt__pauli_to_string(i32) +declare %"struct.QirString"* @quantum__rt__pauli_to_string(%PauliId) declare %"struct.QirString"* @quantum__rt__qubit_to_string(%class.QUBIT*) declare %"struct.QirString"* @quantum__rt__range_to_string(%"struct.QirRange"* dereferenceable(24) %range) @@ -378,10 +379,9 @@ define void @__quantum__rt__callable_memory_management(i32 %index, %Callable* %. ; strings bridge ; ; NYI: -;define %String* @__quantum__rt__pauli_to_string(%Pauli) ; need to check that the type is lowered correctly ;define %String* @__quantum__rt__bigint_to_string(%BigInt*) -define %String* @__quantum__rt__string_create(i8* %null_terminated_buffer) { +define %String* @__quantum__rt__string_create(i32 %ignoredStrLength, i8* %null_terminated_buffer) { %str = call %"struct.QirString"* @quantum__rt__string_create(i8* %null_terminated_buffer) %.str = bitcast %"struct.QirString"* %str to %String* ret %String* %.str @@ -435,6 +435,13 @@ define %String* @__quantum__rt__result_to_string(%Result* %.r) { ret %String* %.str } +define %String* @__quantum__rt__pauli_to_string(%Pauli %.pauli) { + %pauli = zext %Pauli %.pauli to %PauliId + %str = call %"struct.QirString"* @quantum__rt__pauli_to_string(%PauliId %pauli) + %.str = bitcast %"struct.QirString"* %str to %String* + ret %String* %.str +} + define %String* @__quantum__rt__qubit_to_string(%Qubit* %.q) { %q = bitcast %Qubit* %.q to %"class.QUBIT"* %str = call %"struct.QirString"* @quantum__rt__qubit_to_string(%"class.QUBIT"* %q) @@ -451,7 +458,6 @@ define %String* @__quantum__rt__range_to_string(%Range %.range) { ret %String* %.str } - ;------------------------------------------------------------------------------ ; bigints bridge ; diff --git a/src/QirRuntime/lib/QIR/intrinsicsMath.cpp b/src/QirRuntime/lib/QIR/intrinsicsMath.cpp new file mode 100644 index 00000000000..a3b97e420e9 --- /dev/null +++ b/src/QirRuntime/lib/QIR/intrinsicsMath.cpp @@ -0,0 +1,31 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +#include +#include "quantum__qis.hpp" + +extern "C" +{ + +// Implementations: +bool quantum__qis__isnan__body(double d) +{ + return std::isnan(d); // https://en.cppreference.com/w/cpp/numeric/math/isnan +} + +double quantum__qis__infinity__body() +{ + return INFINITY; // https://en.cppreference.com/w/c/numeric/math/INFINITY +} + +bool quantum__qis__isinf__body(double d) +{ + return std::isinf(d); // https://en.cppreference.com/w/cpp/numeric/math/isinf +} + +double quantum__qis__arctan2__body(double y, double x) +{ + return std::atan2(y, x); // https://en.cppreference.com/w/cpp/numeric/math/atan2 +} + +} // extern "C" diff --git a/src/QirRuntime/lib/QIR/quantum__qis.hpp b/src/QirRuntime/lib/QIR/quantum__qis.hpp index 8f90770c9c2..50e8c249016 100644 --- a/src/QirRuntime/lib/QIR/quantum__qis.hpp +++ b/src/QirRuntime/lib/QIR/quantum__qis.hpp @@ -59,4 +59,10 @@ extern "C" QIR_SHARED_API void quantum__qis__y__ctl(QirArray*, QUBIT*); // NOLINT QIR_SHARED_API void quantum__qis__z__body(QUBIT*); // NOLINT QIR_SHARED_API void quantum__qis__z__ctl(QirArray*, QUBIT*); // NOLINT + + QIR_SHARED_API bool quantum__qis__isnan__body(double d); // NOLINT + QIR_SHARED_API double quantum__qis__infinity__body(); // NOLINT + QIR_SHARED_API bool quantum__qis__isinf__body(double d); // NOLINT + QIR_SHARED_API double quantum__qis__arctan2__body(double y, double x); // NOLINT + } \ No newline at end of file diff --git a/src/QirRuntime/lib/QIR/strings.cpp b/src/QirRuntime/lib/QIR/strings.cpp index 3885bbdf3cb..0435329a1ef 100644 --- a/src/QirRuntime/lib/QIR/strings.cpp +++ b/src/QirRuntime/lib/QIR/strings.cpp @@ -135,7 +135,10 @@ extern "C" return quantum__rt__string_create("PauliY"); case PauliId_Z: return quantum__rt__string_create("PauliZ"); + default: + break; } + return quantum__rt__string_create(""); } // Returns a string representation of the range. diff --git a/src/QirRuntime/public/CoreTypes.hpp b/src/QirRuntime/public/CoreTypes.hpp index 6dd527f7f85..99319f8beaf 100644 --- a/src/QirRuntime/public/CoreTypes.hpp +++ b/src/QirRuntime/public/CoreTypes.hpp @@ -1,5 +1,7 @@ #pragma once +#include + // The core types will be exposed in the C-interfaces for interop, thus no // namespaces or scoped enums can be used to define them. @@ -27,7 +29,7 @@ enum ResultValue /*============================================================================== PauliId matrices ==============================================================================*/ -enum PauliId +enum PauliId : int32_t { PauliId_I = 0, PauliId_X = 1, diff --git a/src/QirRuntime/test/QIR-static/CMakeLists.txt b/src/QirRuntime/test/QIR-static/CMakeLists.txt index 7a7e99eb1fb..ae056c3453b 100644 --- a/src/QirRuntime/test/QIR-static/CMakeLists.txt +++ b/src/QirRuntime/test/QIR-static/CMakeLists.txt @@ -20,7 +20,10 @@ add_custom_target(qir_static_test_lib DEPENDS ${QIR_TESTS_LIBS}) # The executable target for QIR tests triggers the custom actions to compile ll files # add_executable(qir-static-tests - qir-driver.cpp) + qir-driver.cpp + qir-test-math.cpp + qir-test-strings.cpp +) target_link_libraries(qir-static-tests PUBLIC ${QIR_TESTS_LIBS} diff --git a/src/QirRuntime/test/QIR-static/compiler/Constants.qs b/src/QirRuntime/test/QIR-static/compiler/Constants.qs new file mode 100644 index 00000000000..1192459dbb2 --- /dev/null +++ b/src/QirRuntime/test/QIR-static/compiler/Constants.qs @@ -0,0 +1,37 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// these are all the static methods and const fields form System.Math class of .NET CLR +// that are not exposed as language operators and are relevant within type System. +// If there are two versions of the function for Int and Double types, the corresponding +// functions have suffix I or D. ExpD also has a suffix to avoid name clash with Primitives.Exp. + +namespace Microsoft.Quantum.Math { + + /// # Summary + /// Returns the natural logarithmic base to double-precision. + /// + /// # Output + /// A double-precision approximation of the natural logarithic base, + /// $e \approx 2.7182818284590452354$. + /// + /// # See Also + /// - Microsoft.Quantum.Math.PI + function E() : Double { + return 2.7182818284590452354; + } + + /// # Summary + /// Represents the ratio of the circumference of a circle to its diameter. + /// + /// # Ouptut + /// A double-precision approximation of the the circumference of a circle + /// to its diameter, $\pi \approx 3.14159265358979323846$. + /// + /// # See Also + /// - Microsoft.Quantum.Math.E + function PI() : Double { + return 3.14159265358979323846; + } + +} diff --git a/src/QirRuntime/test/QIR-static/compiler/QirTarget.qs b/src/QirRuntime/test/QIR-static/compiler/QirTarget.qs index 9b8a7833579..c117bdd0667 100644 --- a/src/QirRuntime/test/QIR-static/compiler/QirTarget.qs +++ b/src/QirRuntime/test/QIR-static/compiler/QirTarget.qs @@ -5,6 +5,47 @@ namespace Microsoft.Quantum.Intrinsic { open Microsoft.Quantum.Targeting; + @Inline() + function NAN() : Double { + body intrinsic; + } + + @Inline() + function IsNan(d: Double) : Bool { + body intrinsic; + } + + @Inline() + function INFINITY() : Double { + body intrinsic; + } + + @Inline() + function IsInf(d: Double) : Bool { + body intrinsic; + } + + @Inline() + function IsNegativeInfinity(d : Double) : Bool { + body intrinsic; + } + + @Inline() + function Sqrt(d : Double) : Double { + body intrinsic; + } + + @Inline() + function Log(d : Double) : Double { + body intrinsic; + } + + @Inline() + function ArcTan2(y : Double, x : Double) : Double { + body intrinsic; + } + + operation X(qb : Qubit) : Unit is Adj + Ctl { body intrinsic; diff --git a/src/QirRuntime/test/QIR-static/generate.py b/src/QirRuntime/test/QIR-static/generate.py index af6daa37c5e..99242f689b4 100644 --- a/src/QirRuntime/test/QIR-static/generate.py +++ b/src/QirRuntime/test/QIR-static/generate.py @@ -39,7 +39,7 @@ def log(message): output_file = file_name command = (qsc + " build --qir s --build-exe --input " + files_to_process + - " compiler\\qircore.qs compiler\\qirtarget.qs --proj " + output_file) + " compiler\\qircore.qs compiler\\qirtarget.qs compiler\\Constants.qs --proj " + output_file) log("Executing: " + command) subprocess.run(command, shell = True) diff --git a/src/QirRuntime/test/QIR-static/qir-driver.cpp b/src/QirRuntime/test/QIR-static/qir-driver.cpp index bac6bc26414..c6059c93a8e 100644 --- a/src/QirRuntime/test/QIR-static/qir-driver.cpp +++ b/src/QirRuntime/test/QIR-static/qir-driver.cpp @@ -57,8 +57,8 @@ extern "C" int64_t Microsoft__Quantum__Testing__QIR__Test_Arrays( // NOLINT int64_t* array, int64_t index, int64_t val, - bool dummy); -TEST_CASE("QIR: Using 1D arrays", "[qir]") + bool compilerDecoy); +TEST_CASE("QIR: Using 1D arrays", "[qir][qir.arr1d]") { // re-enable tracking when https://github.com/microsoft/qsharp-compiler/issues/844 is fixed QirContextScope qirctx(nullptr, false /*trackAllocatedObjects*/); @@ -152,7 +152,7 @@ struct QubitsResultsTestSimulator : public Microsoft::Quantum::SimulatorStub return reinterpret_cast(1); } }; -TEST_CASE("QIR: allocating and releasing qubits and results", "[qir]") +TEST_CASE("QIR: allocating and releasing qubits and results", "[qir][qir.qubit][qir.result]") { unique_ptr sim = make_unique(); QirContextScope qirctx(sim.get(), true /*trackAllocatedObjects*/); @@ -182,7 +182,7 @@ TEST_CASE("QIR: allocating and releasing qubits and results", "[qir]") // that is written to the original array at [1,1,1] and then retrieved from [1,1]. // Thus, all three dimensions must be at least 2. extern "C" int64_t TestMultidimArrays(char value, int64_t dim0, int64_t dim1, int64_t dim2); -TEST_CASE("QIR: multidimensional arrays", "[qir]") +TEST_CASE("QIR: multidimensional arrays", "[qir][qir.arrMultid]") { QirContextScope qirctx(nullptr, true /*trackAllocatedObjects*/); @@ -195,7 +195,7 @@ TEST_CASE("QIR: multidimensional arrays", "[qir]") // Manually authored QIR to test dumping range [0..2..6] into string and then raising a failure with it extern "C" void TestFailWithRangeString(int64_t start, int64_t step, int64_t end); -TEST_CASE("QIR: Report range in a failure message", "[qir]") +TEST_CASE("QIR: Report range in a failure message", "[qir][qir.range]") { QirContextScope qirctx(nullptr, true /*trackAllocatedObjects*/); @@ -215,7 +215,7 @@ TEST_CASE("QIR: Report range in a failure message", "[qir]") #if 0 // TODO: Q# compiler crashes generating QIR for TestPartials // TestPartials subtracts the second argument from the first and returns the result. extern "C" int64_t Microsoft__Quantum__Testing__QIR__TestPartials__body(int64_t, int64_t); // NOLINT -TEST_CASE("QIR: Partial application of a callable", "[qir]") +TEST_CASE("QIR: Partial application of a callable", "[qir][qir.partCallable]") { QirContextScope qirctx(nullptr, true /*trackAllocatedObjects*/); @@ -309,7 +309,7 @@ extern "C" void __quantum__qis__k__ctl(QirArray* controls, Qubit q) // NOLINT { g_ctrqapi->ControlledX(controls->count, reinterpret_cast(controls->buffer), q); } -TEST_CASE("QIR: application of nested controlled functor", "[qir]") +TEST_CASE("QIR: application of nested controlled functor", "[qir][qir.functor]") { unique_ptr qapi = make_unique(); QirContextScope qirctx(qapi.get(), true /*trackAllocatedObjects*/); diff --git a/src/QirRuntime/test/QIR-static/qir-test-arrays.qs b/src/QirRuntime/test/QIR-static/qir-test-arrays.qs index 9752c047d00..65d05ca8ef2 100644 --- a/src/QirRuntime/test/QIR-static/qir-test-arrays.qs +++ b/src/QirRuntime/test/QIR-static/qir-test-arrays.qs @@ -3,8 +3,11 @@ namespace Microsoft.Quantum.Testing.QIR { + open Microsoft.Quantum.Testing.QIR.Math; + open Microsoft.Quantum.Testing.QIR.Str; + @EntryPoint() - operation Test_Arrays(array : Int[], index : Int, val : Int, dummy : Bool) : Int + operation Test_Arrays(array : Int[], index : Int, val : Int, compilerDecoy : Bool) : Int { // exercise __quantum__rt__array_copy mutable local = array; @@ -30,12 +33,17 @@ namespace Microsoft.Quantum.Testing.QIR } // The purpose of this block is to keep the Q# compiler from optimizing away other tests when generating QIR - if (dummy) + if (compilerDecoy) { let res1 = TestControlled(); - //Q# compiler crashes if both TestControlled and TestPartials are enabled - //let res2 = TestPartials(17, 42); + let res2 = TestPartials(17, 42); let res3 = Test_Qubit_Result_Management(); + + // Math tests: + let res4 = SqrtTest(); + let res5 = LogTest(); + let res6 = ArcTan2Test(); + let res7 = PauliToStringTest(); } return sum; diff --git a/src/QirRuntime/test/QIR-static/qir-test-math.cpp b/src/QirRuntime/test/QIR-static/qir-test-math.cpp new file mode 100644 index 00000000000..27ccea024f0 --- /dev/null +++ b/src/QirRuntime/test/QIR-static/qir-test-math.cpp @@ -0,0 +1,25 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. +#include + +#include "catch.hpp" + +extern "C" uint64_t Microsoft__Quantum__Testing__QIR__Math__SqrtTest__body(); // NOLINT +extern "C" uint64_t Microsoft__Quantum__Testing__QIR__Math__LogTest__body(); // NOLINT +extern "C" uint64_t Microsoft__Quantum__Testing__QIR__Math__ArcTan2Test__body(); // NOLINT + +TEST_CASE("QIR: Math.Sqrt", "[qir.math][qir.Math.Sqrt]") +{ + REQUIRE(0 == Microsoft__Quantum__Testing__QIR__Math__SqrtTest__body()); +} + +TEST_CASE("QIR: Math.Log", "[qir.math][qir.Math.Log]") +{ + REQUIRE(0 == Microsoft__Quantum__Testing__QIR__Math__LogTest__body()); +} + +TEST_CASE("QIR: Math.ArcTan2", "[qir.math][qir.Math.ArcTan2]") +{ + REQUIRE(0 == Microsoft__Quantum__Testing__QIR__Math__ArcTan2Test__body()); +} + diff --git a/src/QirRuntime/test/QIR-static/qir-test-math.qs b/src/QirRuntime/test/QIR-static/qir-test-math.qs new file mode 100644 index 00000000000..bdca70c30b9 --- /dev/null +++ b/src/QirRuntime/test/QIR-static/qir-test-math.qs @@ -0,0 +1,64 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +namespace Microsoft.Quantum.Testing.QIR.Math { + + open Microsoft.Quantum.Intrinsic; + open Microsoft.Quantum.Math; // E() + + function SqrtTest() : Int { + if 2.0 != Sqrt( 4.0) { return 1; } // The return value indicates which test case has failed. + if 3.0 != Sqrt( 9.0) { return 2; } + if 10.0 != Sqrt(100.0) { return 3; } + + if not IsNan(Sqrt(-5.0)) { return 4; } + if not IsNan(Sqrt(NAN())) { return 5; } + if not IsInf(Sqrt(INFINITY())) { return 6; } + + return 0; + } + + function LogTest() : Int { + if 1.0 != Log(E()) { return 1; } // ln(e) -> 1 // The return value indicates which test case has failed. + if 2.0 != Log(E() * E()) { return 2; } // ln(e^2) -> 2 + + if not IsNegativeInfinity(Log(0.0)) { return 3; } // ln(0) -> (-nfinity) + if not IsNan(Log(-5.0)) { return 4; } // ln() -> NaN + if not IsNan(Log(NAN())) { return 5; } // ln(NaN) -> NaN + if not IsInf(Log(INFINITY())) { return 6; } // ln(+infinity) -> +infinity + + return 0; + } + + function ArcTan2Test() : Int { + + // function ArcTan2(y : Double, x : Double) : Double + + if 0.0 != ArcTan2( 0.0, 1.0 ) { return 1; } // The return value indicates which test case has failed. + if PI() != ArcTan2( 0.0, -1.0 ) { return 2; } + if PI()/2.0 != ArcTan2( 1.0, 0.0 ) { return 3; } + if -PI()/2.0 != ArcTan2(-1.0, 0.0 ) { return 4; } + + if PI()/4.0 != ArcTan2( 1.0, 1.0 ) { return 5; } + if PI()*3.0/4.0 != ArcTan2( 1.0, -1.0 ) { return 6; } + if -PI()*3.0/4.0 != ArcTan2(-1.0, -1.0 ) { return 7; } + if -PI()/4.0 != ArcTan2(-1.0, 1.0 ) { return 8; } + + if 0.0 != ArcTan2( 0.0, 0.0 ) { return 9; } + + // Fails because of lack of precision: + // if PI()/6.0 != ArcTan2( 1.0, Sqrt(3.0) ) { return 10; } // tg(Pi/6) = sin(Pi/6) / cos(Pi/6) = (1/2) / (Sqrt(3)/2) = 1/Sqrt(3) = y/x. ArcTan2(1.0, Sqrt(3)) -> Pi/6 + + if not IsNan(ArcTan2(NAN(), 0.0) ) { return 11; } + if not IsNan(ArcTan2( 0.0, NAN()) ) { return 12; } + if not IsNan(ArcTan2(NAN(), NAN()) ) { return 13; } + + // The infinity cases show discrepancy between + // https://docs.microsoft.com/en-us/dotnet/api/system.math.atan2?view=net-5.0 + // and https://en.cppreference.com/w/cpp/numeric/math/atan2 . + + return 0; + } + +} + diff --git a/src/QirRuntime/test/QIR-static/qir-test-qsharp.ll b/src/QirRuntime/test/QIR-static/qir-test-qsharp.ll index 774cf5774ac..a619a2a6218 100644 --- a/src/QirRuntime/test/QIR-static/qir-test-qsharp.ll +++ b/src/QirRuntime/test/QIR-static/qir-test-qsharp.ll @@ -2,9 +2,10 @@ %Result = type opaque %Range = type { i64, i64, i64 } %Tuple = type opaque +%Callable = type opaque %Qubit = type opaque %Array = type opaque -%Callable = type opaque +%String = type opaque @ResultZero = external global %Result* @ResultOne = external global %Result* @@ -16,111 +17,14 @@ @Microsoft__Quantum__Testing__QIR__Qop = constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Testing__QIR__Qop__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Testing__QIR__Qop__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Testing__QIR__Qop__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Testing__QIR__Qop__ctladj__wrapper] @PartialApplication__1 = constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__1__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__1__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__1__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__1__ctladj__wrapper] @MemoryManagement__1 = constant [2 x void (%Tuple*, i64)*] [void (%Tuple*, i64)* @MemoryManagement__1__RefCount, void (%Tuple*, i64)* @MemoryManagement__1__AliasCount] - -define void @Microsoft__Quantum__Intrinsic__K__body(%Qubit* %q) { -entry: - call void @__quantum__qis__k__body(%Qubit* %q) - ret void -} - -declare void @__quantum__qis__k__body(%Qubit*) - -define void @Microsoft__Quantum__Intrinsic__K__adj(%Qubit* %q) { -entry: - call void @__quantum__qis__k__body(%Qubit* %q) - ret void -} - -define void @Microsoft__Quantum__Intrinsic__K__ctl(%Array* %__controlQubits__, %Qubit* %q) { -entry: - call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i64 1) - call void @__quantum__qis__k__ctl(%Array* %__controlQubits__, %Qubit* %q) - call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i64 -1) - ret void -} - -declare void @__quantum__rt__array_update_alias_count(%Array*, i64) - -declare void @__quantum__qis__k__ctl(%Array*, %Qubit*) - -define void @Microsoft__Quantum__Intrinsic__K__ctladj(%Array* %__controlQubits__, %Qubit* %q) { -entry: - call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i64 1) - call void @__quantum__qis__k__ctl(%Array* %__controlQubits__, %Qubit* %q) - call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i64 -1) - ret void -} - -define %Result* @Microsoft__Quantum__Intrinsic__M__body(%Qubit* %qb) { -entry: - %bases__inline__1 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 1) - %0 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %bases__inline__1, i64 0) - %1 = bitcast i8* %0 to i2* - %2 = load i2, i2* @PauliZ - store i2 %2, i2* %1 - call void @__quantum__rt__array_update_alias_count(%Array* %bases__inline__1, i64 1) - %qubits__inline__1 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) - %3 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qubits__inline__1, i64 0) - %4 = bitcast i8* %3 to %Qubit** - store %Qubit* %qb, %Qubit** %4 - call void @__quantum__rt__array_update_alias_count(%Array* %qubits__inline__1, i64 1) - %5 = call %Result* @__quantum__qis__measure__body(%Array* %bases__inline__1, %Array* %qubits__inline__1) - call void @__quantum__rt__array_update_alias_count(%Array* %bases__inline__1, i64 -1) - call void @__quantum__rt__array_update_alias_count(%Array* %qubits__inline__1, i64 -1) - call void @__quantum__rt__array_update_reference_count(%Array* %bases__inline__1, i64 -1) - call void @__quantum__rt__array_update_reference_count(%Array* %qubits__inline__1, i64 -1) - ret %Result* %5 -} - -declare %Array* @__quantum__rt__array_create_1d(i32, i64) - -declare i8* @__quantum__rt__array_get_element_ptr_1d(%Array*, i64) - -declare %Result* @__quantum__qis__measure__body(%Array*, %Array*) - -declare void @__quantum__rt__array_update_reference_count(%Array*, i64) - -define %Result* @Microsoft__Quantum__Intrinsic__Measure__body(%Array* %bases, %Array* %qubits) { -entry: - call void @__quantum__rt__array_update_alias_count(%Array* %bases, i64 1) - call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i64 1) - %0 = call %Result* @__quantum__qis__measure__body(%Array* %bases, %Array* %qubits) - call void @__quantum__rt__array_update_alias_count(%Array* %bases, i64 -1) - call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i64 -1) - ret %Result* %0 -} - -define void @Microsoft__Quantum__Intrinsic__X__body(%Qubit* %qb) { -entry: - call void @__quantum__qis__x__body(%Qubit* %qb) - ret void -} - -declare void @__quantum__qis__x__body(%Qubit*) - -define void @Microsoft__Quantum__Intrinsic__X__adj(%Qubit* %qb) { -entry: - call void @__quantum__qis__x__body(%Qubit* %qb) - ret void -} - -define void @Microsoft__Quantum__Intrinsic__X__ctl(%Array* %__controlQubits__, %Qubit* %qb) { -entry: - call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i64 1) - call void @__quantum__qis__x__ctl(%Array* %__controlQubits__, %Qubit* %qb) - call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i64 -1) - ret void -} - -declare void @__quantum__qis__x__ctl(%Array*, %Qubit*) - -define void @Microsoft__Quantum__Intrinsic__X__ctladj(%Array* %__controlQubits__, %Qubit* %qb) { -entry: - call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i64 1) - call void @__quantum__qis__x__ctl(%Array* %__controlQubits__, %Qubit* %qb) - call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i64 -1) - ret void -} +@Microsoft__Quantum__Testing__QIR__Subtract = constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Testing__QIR__Subtract__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null] +@PartialApplication__2 = constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__2__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null] +@MemoryManagement__2 = constant [2 x void (%Tuple*, i64)*] [void (%Tuple*, i64)* @MemoryManagement__2__RefCount, void (%Tuple*, i64)* @MemoryManagement__2__AliasCount] +@0 = internal constant [20 x i8] c"Pauli value: PauliI\00" +@1 = internal constant [14 x i8] c"Pauli value: \00" +@2 = internal constant [7 x i8] c"PauliX\00" +@3 = internal constant [7 x i8] c"PauliY\00" +@4 = internal constant [7 x i8] c"PauliZ\00" define i64 @Microsoft__Quantum__Testing__QIR__TestControlled__body() { entry: @@ -633,17 +537,128 @@ declare %Array* @__quantum__rt__qubit_allocate_array(i64) declare void @__quantum__rt__callable_invoke(%Callable*, %Tuple*, %Tuple*) +define %Result* @Microsoft__Quantum__Intrinsic__M__body(%Qubit* %qb) { +entry: + %bases = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 1) + %0 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %bases, i64 0) + %1 = bitcast i8* %0 to i2* + %2 = load i2, i2* @PauliZ + store i2 %2, i2* %1 + call void @__quantum__rt__array_update_alias_count(%Array* %bases, i64 1) + %qubits = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %3 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qubits, i64 0) + %4 = bitcast i8* %3 to %Qubit** + store %Qubit* %qb, %Qubit** %4 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i64 1) + %5 = call %Result* @__quantum__qis__measure__body(%Array* %bases, %Array* %qubits) + call void @__quantum__rt__array_update_alias_count(%Array* %bases, i64 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i64 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %bases, i64 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i64 -1) + ret %Result* %5 +} + declare i1 @__quantum__rt__result_equal(%Result*, %Result*) +declare %Array* @__quantum__rt__array_create_1d(i32, i64) + +declare i8* @__quantum__rt__array_get_element_ptr_1d(%Array*, i64) + declare void @__quantum__rt__qubit_release(%Qubit*) declare void @__quantum__rt__callable_update_reference_count(%Callable*, i64) declare void @__quantum__rt__tuple_update_reference_count(%Tuple*, i64) +declare void @__quantum__rt__array_update_reference_count(%Array*, i64) + declare void @__quantum__rt__result_update_reference_count(%Result*, i64) -define i64 @Microsoft__Quantum__Testing__QIR__Test_Arrays__body(%Array* %array, i64 %index, i64 %val, i1 %dummy) { +define i64 @Microsoft__Quantum__Testing__QIR__Subtract__body(i64 %from, i64 %what) { +entry: + %0 = sub i64 %from, %what + ret i64 %0 +} + +define void @Microsoft__Quantum__Testing__QIR__Qop__body(%Qubit* %q, i64 %n) { +entry: + %0 = srem i64 %n, 2 + %1 = icmp eq i64 %0, 1 + br i1 %1, label %then0__1, label %continue__1 + +then0__1: ; preds = %entry + call void @__quantum__qis__k__body(%Qubit* %q) + br label %continue__1 + +continue__1: ; preds = %then0__1, %entry + ret void +} + +declare void @__quantum__qis__k__body(%Qubit*) + +define void @Microsoft__Quantum__Testing__QIR__Qop__adj(%Qubit* %q, i64 %n) { +entry: + %0 = srem i64 %n, 2 + %1 = icmp eq i64 %0, 1 + br i1 %1, label %then0__1, label %continue__1 + +then0__1: ; preds = %entry + call void @__quantum__qis__k__body(%Qubit* %q) + br label %continue__1 + +continue__1: ; preds = %then0__1, %entry + ret void +} + +define void @Microsoft__Quantum__Testing__QIR__Qop__ctl(%Array* %ctrls, { %Qubit*, i64 }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %ctrls, i64 1) + %1 = getelementptr { %Qubit*, i64 }, { %Qubit*, i64 }* %0, i64 0, i32 0 + %q = load %Qubit*, %Qubit** %1 + %2 = getelementptr { %Qubit*, i64 }, { %Qubit*, i64 }* %0, i64 0, i32 1 + %n = load i64, i64* %2 + %3 = srem i64 %n, 2 + %4 = icmp eq i64 %3, 1 + br i1 %4, label %then0__1, label %continue__1 + +then0__1: ; preds = %entry + call void @__quantum__rt__array_update_alias_count(%Array* %ctrls, i64 1) + call void @__quantum__qis__k__ctl(%Array* %ctrls, %Qubit* %q) + call void @__quantum__rt__array_update_alias_count(%Array* %ctrls, i64 -1) + br label %continue__1 + +continue__1: ; preds = %then0__1, %entry + call void @__quantum__rt__array_update_alias_count(%Array* %ctrls, i64 -1) + ret void +} + +declare void @__quantum__rt__array_update_alias_count(%Array*, i64) + +declare void @__quantum__qis__k__ctl(%Array*, %Qubit*) + +define void @Microsoft__Quantum__Testing__QIR__Qop__ctladj(%Array* %__controlQubits__, { %Qubit*, i64 }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i64 1) + %1 = getelementptr { %Qubit*, i64 }, { %Qubit*, i64 }* %0, i64 0, i32 0 + %q = load %Qubit*, %Qubit** %1 + %2 = getelementptr { %Qubit*, i64 }, { %Qubit*, i64 }* %0, i64 0, i32 1 + %n = load i64, i64* %2 + %3 = srem i64 %n, 2 + %4 = icmp eq i64 %3, 1 + br i1 %4, label %then0__1, label %continue__1 + +then0__1: ; preds = %entry + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i64 1) + call void @__quantum__qis__k__ctl(%Array* %__controlQubits__, %Qubit* %q) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i64 -1) + br label %continue__1 + +continue__1: ; preds = %then0__1, %entry + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i64 -1) + ret void +} + +define i64 @Microsoft__Quantum__Testing__QIR__Test_Arrays__body(%Array* %array, i64 %index, i64 %val, i1 %compilerDecoy) { entry: call void @__quantum__rt__array_update_alias_count(%Array* %array, i64 1) %local = alloca %Array* @@ -699,11 +714,16 @@ exiting__1: ; preds = %body__1 br label %header__1 exit__1: ; preds = %header__1 - br i1 %dummy, label %then0__1, label %continue__1 + br i1 %compilerDecoy, label %then0__1, label %continue__1 then0__1: ; preds = %exit__1 %res1 = call i64 @Microsoft__Quantum__Testing__QIR__TestControlled__body() + %res2 = call i64 @Microsoft__Quantum__Testing__QIR__TestPartials__body(i64 17, i64 42) %res3 = call i1 @Microsoft__Quantum__Testing__QIR__Test_Qubit_Result_Management__body() + %res4 = call i64 @Microsoft__Quantum__Testing__QIR__Math__SqrtTest__body() + %res5 = call i64 @Microsoft__Quantum__Testing__QIR__Math__LogTest__body() + %res6 = call i64 @Microsoft__Quantum__Testing__QIR__Math__ArcTan2Test__body() + %res7 = call i64 @Microsoft__Quantum__Testing__QIR__Str__PauliToStringTest__body() br label %continue__1 continue__1: ; preds = %then0__1, %exit__1 @@ -730,14 +750,44 @@ declare %Array* @__quantum__rt__array_slice_1d(%Array*, %Range, i1) declare %Array* @__quantum__rt__array_concatenate(%Array*, %Array*) +define i64 @Microsoft__Quantum__Testing__QIR__TestPartials__body(i64 %x, i64 %y) { +entry: + %0 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, i64 }* getelementptr ({ %Callable*, i64 }, { %Callable*, i64 }* null, i32 1) to i64)) + %1 = bitcast %Tuple* %0 to { %Callable*, i64 }* + %2 = getelementptr { %Callable*, i64 }, { %Callable*, i64 }* %1, i64 0, i32 0 + %3 = getelementptr { %Callable*, i64 }, { %Callable*, i64 }* %1, i64 0, i32 1 + %4 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Testing__QIR__Subtract, [2 x void (%Tuple*, i64)*]* null, %Tuple* null) + store %Callable* %4, %Callable** %2 + store i64 %x, i64* %3 + %subtractor = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__2, [2 x void (%Tuple*, i64)*]* @MemoryManagement__2, %Tuple* %0) + call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %subtractor, i64 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %subtractor, i64 1) + %5 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint (i64* getelementptr (i64, i64* null, i32 1) to i64)) + %6 = bitcast %Tuple* %5 to { i64 }* + %7 = getelementptr { i64 }, { i64 }* %6, i64 0, i32 0 + store i64 %y, i64* %7 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint (i64* getelementptr (i64, i64* null, i32 1) to i64)) + call void @__quantum__rt__callable_invoke(%Callable* %subtractor, %Tuple* %5, %Tuple* %8) + %9 = bitcast %Tuple* %8 to { i64 }* + %10 = getelementptr { i64 }, { i64 }* %9, i64 0, i32 0 + %11 = load i64, i64* %10 + call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %subtractor, i64 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %subtractor, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %subtractor, i64 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %subtractor, i64 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %5, i64 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i64 -1) + ret i64 %11 +} + define i1 @Microsoft__Quantum__Testing__QIR__Test_Qubit_Result_Management__body() { entry: %qs = call %Array* @__quantum__rt__qubit_allocate_array(i64 2) call void @__quantum__rt__array_update_alias_count(%Array* %qs, i64 1) %0 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 1) %1 = bitcast i8* %0 to %Qubit** - %qb__inline__1 = load %Qubit*, %Qubit** %1 - call void @__quantum__qis__x__body(%Qubit* %qb__inline__1) + %qb = load %Qubit*, %Qubit** %1 + call void @__quantum__qis__x__body(%Qubit* %qb) %q = call %Qubit* @__quantum__rt__qubit_allocate() %2 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 1) %3 = bitcast i8* %2 to %Qubit** @@ -769,81 +819,550 @@ continue__1: ; preds = %then0__1, %entry ret i1 %14 } -define void @Microsoft__Quantum__Testing__QIR__Qop__body(%Qubit* %q, i64 %n) { +define i64 @Microsoft__Quantum__Testing__QIR__Math__SqrtTest__body() { entry: - %0 = srem i64 %n, 2 - %1 = icmp eq i64 %0, 1 + %0 = call double @__quantum__qis__sqrt__body(double 4.000000e+00) + %1 = fcmp one double 2.000000e+00, %0 br i1 %1, label %then0__1, label %continue__1 then0__1: ; preds = %entry - call void @__quantum__qis__k__body(%Qubit* %q) - br label %continue__1 + ret i64 1 + +continue__1: ; preds = %entry + %2 = call double @__quantum__qis__sqrt__body(double 9.000000e+00) + %3 = fcmp one double 3.000000e+00, %2 + br i1 %3, label %then0__2, label %continue__2 + +then0__2: ; preds = %continue__1 + ret i64 2 + +continue__2: ; preds = %continue__1 + %4 = call double @__quantum__qis__sqrt__body(double 1.000000e+02) + %5 = fcmp one double 1.000000e+01, %4 + br i1 %5, label %then0__3, label %continue__3 + +then0__3: ; preds = %continue__2 + ret i64 3 + +continue__3: ; preds = %continue__2 + %d__4 = call double @__quantum__qis__sqrt__body(double -5.000000e+00) + %6 = call i1 @__quantum__qis__isnan__body(double %d__4) + %7 = xor i1 %6, true + br i1 %7, label %then0__4, label %continue__4 + +then0__4: ; preds = %continue__3 + ret i64 4 + +continue__4: ; preds = %continue__3 + %d__5 = call double @__quantum__qis__nan__body() + %d__6 = call double @__quantum__qis__sqrt__body(double %d__5) + %8 = call i1 @__quantum__qis__isnan__body(double %d__6) + %9 = xor i1 %8, true + br i1 %9, label %then0__5, label %continue__5 + +then0__5: ; preds = %continue__4 + ret i64 5 + +continue__5: ; preds = %continue__4 + %d__7 = call double @__quantum__qis__infinity__body() + %d__8 = call double @__quantum__qis__sqrt__body(double %d__7) + %10 = call i1 @__quantum__qis__isinf__body(double %d__8) + %11 = xor i1 %10, true + br i1 %11, label %then0__6, label %continue__6 -continue__1: ; preds = %then0__1, %entry - ret void +then0__6: ; preds = %continue__5 + ret i64 6 + +continue__6: ; preds = %continue__5 + ret i64 0 } -define void @Microsoft__Quantum__Testing__QIR__Qop__adj(%Qubit* %q, i64 %n) { +define i64 @Microsoft__Quantum__Testing__QIR__Math__LogTest__body() { entry: - %0 = srem i64 %n, 2 - %1 = icmp eq i64 %0, 1 + %d = call double @Microsoft__Quantum__Math__E__body() + %0 = call double @__quantum__qis__log__body(double %d) + %1 = fcmp one double 1.000000e+00, %0 br i1 %1, label %then0__1, label %continue__1 then0__1: ; preds = %entry - call void @__quantum__qis__k__body(%Qubit* %q) - br label %continue__1 + ret i64 1 + +continue__1: ; preds = %entry + %2 = call double @Microsoft__Quantum__Math__E__body() + %3 = call double @Microsoft__Quantum__Math__E__body() + %d__1 = fmul double %2, %3 + %4 = call double @__quantum__qis__log__body(double %d__1) + %5 = fcmp one double 2.000000e+00, %4 + br i1 %5, label %then0__2, label %continue__2 + +then0__2: ; preds = %continue__1 + ret i64 2 + +continue__2: ; preds = %continue__1 + %d__3 = call double @__quantum__qis__log__body(double 0.000000e+00) + %6 = call i1 @__quantum__qis__isnegativeinfinity__body(double %d__3) + %7 = xor i1 %6, true + br i1 %7, label %then0__3, label %continue__3 + +then0__3: ; preds = %continue__2 + ret i64 3 + +continue__3: ; preds = %continue__2 + %d__5 = call double @__quantum__qis__log__body(double -5.000000e+00) + %8 = call i1 @__quantum__qis__isnan__body(double %d__5) + %9 = xor i1 %8, true + br i1 %9, label %then0__4, label %continue__4 + +then0__4: ; preds = %continue__3 + ret i64 4 + +continue__4: ; preds = %continue__3 + %d__6 = call double @__quantum__qis__nan__body() + %d__7 = call double @__quantum__qis__log__body(double %d__6) + %10 = call i1 @__quantum__qis__isnan__body(double %d__7) + %11 = xor i1 %10, true + br i1 %11, label %then0__5, label %continue__5 -continue__1: ; preds = %then0__1, %entry - ret void +then0__5: ; preds = %continue__4 + ret i64 5 + +continue__5: ; preds = %continue__4 + %d__8 = call double @__quantum__qis__infinity__body() + %d__9 = call double @__quantum__qis__log__body(double %d__8) + %12 = call i1 @__quantum__qis__isinf__body(double %d__9) + %13 = xor i1 %12, true + br i1 %13, label %then0__6, label %continue__6 + +then0__6: ; preds = %continue__5 + ret i64 6 + +continue__6: ; preds = %continue__5 + ret i64 0 } -define void @Microsoft__Quantum__Testing__QIR__Qop__ctl(%Array* %ctrls, { %Qubit*, i64 }* %0) { +define i64 @Microsoft__Quantum__Testing__QIR__Math__ArcTan2Test__body() { entry: - call void @__quantum__rt__array_update_alias_count(%Array* %ctrls, i64 1) - %1 = getelementptr { %Qubit*, i64 }, { %Qubit*, i64 }* %0, i64 0, i32 0 - %q = load %Qubit*, %Qubit** %1 - %2 = getelementptr { %Qubit*, i64 }, { %Qubit*, i64 }* %0, i64 0, i32 1 - %n = load i64, i64* %2 - %3 = srem i64 %n, 2 - %4 = icmp eq i64 %3, 1 - br i1 %4, label %then0__1, label %continue__1 + %0 = call double @__quantum__qis__arctan2__body(double 0.000000e+00, double 1.000000e+00) + %1 = fcmp one double 0.000000e+00, %0 + br i1 %1, label %then0__1, label %continue__1 then0__1: ; preds = %entry - call void @__quantum__rt__array_update_alias_count(%Array* %ctrls, i64 1) - call void @__quantum__qis__k__ctl(%Array* %ctrls, %Qubit* %q) - call void @__quantum__rt__array_update_alias_count(%Array* %ctrls, i64 -1) - br label %continue__1 + ret i64 1 + +continue__1: ; preds = %entry + %2 = call double @Microsoft__Quantum__Math__PI__body() + %3 = call double @__quantum__qis__arctan2__body(double 0.000000e+00, double -1.000000e+00) + %4 = fcmp one double %2, %3 + br i1 %4, label %then0__2, label %continue__2 + +then0__2: ; preds = %continue__1 + ret i64 2 + +continue__2: ; preds = %continue__1 + %5 = call double @Microsoft__Quantum__Math__PI__body() + %6 = fdiv double %5, 2.000000e+00 + %7 = call double @__quantum__qis__arctan2__body(double 1.000000e+00, double 0.000000e+00) + %8 = fcmp one double %6, %7 + br i1 %8, label %then0__3, label %continue__3 + +then0__3: ; preds = %continue__2 + ret i64 3 + +continue__3: ; preds = %continue__2 + %9 = call double @Microsoft__Quantum__Math__PI__body() + %10 = fneg double %9 + %11 = fdiv double %10, 2.000000e+00 + %12 = call double @__quantum__qis__arctan2__body(double -1.000000e+00, double 0.000000e+00) + %13 = fcmp one double %11, %12 + br i1 %13, label %then0__4, label %continue__4 + +then0__4: ; preds = %continue__3 + ret i64 4 + +continue__4: ; preds = %continue__3 + %14 = call double @Microsoft__Quantum__Math__PI__body() + %15 = fdiv double %14, 4.000000e+00 + %16 = call double @__quantum__qis__arctan2__body(double 1.000000e+00, double 1.000000e+00) + %17 = fcmp one double %15, %16 + br i1 %17, label %then0__5, label %continue__5 + +then0__5: ; preds = %continue__4 + ret i64 5 + +continue__5: ; preds = %continue__4 + %18 = call double @Microsoft__Quantum__Math__PI__body() + %19 = fmul double %18, 3.000000e+00 + %20 = fdiv double %19, 4.000000e+00 + %21 = call double @__quantum__qis__arctan2__body(double 1.000000e+00, double -1.000000e+00) + %22 = fcmp one double %20, %21 + br i1 %22, label %then0__6, label %continue__6 + +then0__6: ; preds = %continue__5 + ret i64 6 + +continue__6: ; preds = %continue__5 + %23 = call double @Microsoft__Quantum__Math__PI__body() + %24 = fneg double %23 + %25 = fmul double %24, 3.000000e+00 + %26 = fdiv double %25, 4.000000e+00 + %27 = call double @__quantum__qis__arctan2__body(double -1.000000e+00, double -1.000000e+00) + %28 = fcmp one double %26, %27 + br i1 %28, label %then0__7, label %continue__7 + +then0__7: ; preds = %continue__6 + ret i64 7 + +continue__7: ; preds = %continue__6 + %29 = call double @Microsoft__Quantum__Math__PI__body() + %30 = fneg double %29 + %31 = fdiv double %30, 4.000000e+00 + %32 = call double @__quantum__qis__arctan2__body(double -1.000000e+00, double 1.000000e+00) + %33 = fcmp one double %31, %32 + br i1 %33, label %then0__8, label %continue__8 + +then0__8: ; preds = %continue__7 + ret i64 8 + +continue__8: ; preds = %continue__7 + %34 = call double @__quantum__qis__arctan2__body(double 0.000000e+00, double 0.000000e+00) + %35 = fcmp one double 0.000000e+00, %34 + br i1 %35, label %then0__9, label %continue__9 + +then0__9: ; preds = %continue__8 + ret i64 9 + +continue__9: ; preds = %continue__8 + %y__9 = call double @__quantum__qis__nan__body() + %d = call double @__quantum__qis__arctan2__body(double %y__9, double 0.000000e+00) + %36 = call i1 @__quantum__qis__isnan__body(double %d) + %37 = xor i1 %36, true + br i1 %37, label %then0__10, label %continue__10 + +then0__10: ; preds = %continue__9 + ret i64 11 + +continue__10: ; preds = %continue__9 + %x__10 = call double @__quantum__qis__nan__body() + %d__1 = call double @__quantum__qis__arctan2__body(double 0.000000e+00, double %x__10) + %38 = call i1 @__quantum__qis__isnan__body(double %d__1) + %39 = xor i1 %38, true + br i1 %39, label %then0__11, label %continue__11 + +then0__11: ; preds = %continue__10 + ret i64 12 + +continue__11: ; preds = %continue__10 + %y__11 = call double @__quantum__qis__nan__body() + %x__11 = call double @__quantum__qis__nan__body() + %d__2 = call double @__quantum__qis__arctan2__body(double %y__11, double %x__11) + %40 = call i1 @__quantum__qis__isnan__body(double %d__2) + %41 = xor i1 %40, true + br i1 %41, label %then0__12, label %continue__12 + +then0__12: ; preds = %continue__11 + ret i64 13 + +continue__12: ; preds = %continue__11 + ret i64 0 +} -continue__1: ; preds = %then0__1, %entry - call void @__quantum__rt__array_update_alias_count(%Array* %ctrls, i64 -1) +define i64 @Microsoft__Quantum__Testing__QIR__Str__PauliToStringTest__body() { +entry: + %0 = call %String* @__quantum__rt__string_create(i32 19, i8* getelementptr inbounds ([20 x i8], [20 x i8]* @0, i32 0, i32 0)) + %1 = call %String* @__quantum__rt__string_create(i32 13, i8* getelementptr inbounds ([14 x i8], [14 x i8]* @1, i32 0, i32 0)) + %2 = load i2, i2* @PauliI + %3 = call %String* @__quantum__rt__pauli_to_string(i2 %2) + %4 = call %String* @__quantum__rt__string_concatenate(%String* %1, %String* %3) + call void @__quantum__rt__string_update_reference_count(%String* %1, i64 -1) + call void @__quantum__rt__string_update_reference_count(%String* %3, i64 -1) + %5 = call i1 @__quantum__rt__string_equal(%String* %0, %String* %4) + %6 = xor i1 %5, true + br i1 %6, label %then0__1, label %continue__1 + +then0__1: ; preds = %entry + call void @__quantum__rt__string_update_reference_count(%String* %0, i64 -1) + call void @__quantum__rt__string_update_reference_count(%String* %4, i64 -1) + ret i64 1 + +continue__1: ; preds = %entry + %7 = call %String* @__quantum__rt__string_create(i32 6, i8* getelementptr inbounds ([7 x i8], [7 x i8]* @2, i32 0, i32 0)) + %8 = load i2, i2* @PauliX + %9 = call %String* @__quantum__rt__pauli_to_string(i2 %8) + %10 = call i1 @__quantum__rt__string_equal(%String* %7, %String* %9) + %11 = xor i1 %10, true + br i1 %11, label %then0__2, label %continue__2 + +then0__2: ; preds = %continue__1 + call void @__quantum__rt__string_update_reference_count(%String* %0, i64 -1) + call void @__quantum__rt__string_update_reference_count(%String* %4, i64 -1) + call void @__quantum__rt__string_update_reference_count(%String* %7, i64 -1) + call void @__quantum__rt__string_update_reference_count(%String* %9, i64 -1) + ret i64 2 + +continue__2: ; preds = %continue__1 + %12 = call %String* @__quantum__rt__string_create(i32 6, i8* getelementptr inbounds ([7 x i8], [7 x i8]* @3, i32 0, i32 0)) + %13 = load i2, i2* @PauliY + %14 = call %String* @__quantum__rt__pauli_to_string(i2 %13) + %15 = call i1 @__quantum__rt__string_equal(%String* %12, %String* %14) + %16 = xor i1 %15, true + br i1 %16, label %then0__3, label %continue__3 + +then0__3: ; preds = %continue__2 + call void @__quantum__rt__string_update_reference_count(%String* %0, i64 -1) + call void @__quantum__rt__string_update_reference_count(%String* %4, i64 -1) + call void @__quantum__rt__string_update_reference_count(%String* %7, i64 -1) + call void @__quantum__rt__string_update_reference_count(%String* %9, i64 -1) + call void @__quantum__rt__string_update_reference_count(%String* %12, i64 -1) + call void @__quantum__rt__string_update_reference_count(%String* %14, i64 -1) + ret i64 3 + +continue__3: ; preds = %continue__2 + %17 = call %String* @__quantum__rt__string_create(i32 6, i8* getelementptr inbounds ([7 x i8], [7 x i8]* @4, i32 0, i32 0)) + %18 = load i2, i2* @PauliZ + %19 = call %String* @__quantum__rt__pauli_to_string(i2 %18) + %20 = call i1 @__quantum__rt__string_equal(%String* %17, %String* %19) + %21 = xor i1 %20, true + br i1 %21, label %then0__4, label %continue__4 + +then0__4: ; preds = %continue__3 + call void @__quantum__rt__string_update_reference_count(%String* %0, i64 -1) + call void @__quantum__rt__string_update_reference_count(%String* %4, i64 -1) + call void @__quantum__rt__string_update_reference_count(%String* %7, i64 -1) + call void @__quantum__rt__string_update_reference_count(%String* %9, i64 -1) + call void @__quantum__rt__string_update_reference_count(%String* %12, i64 -1) + call void @__quantum__rt__string_update_reference_count(%String* %14, i64 -1) + call void @__quantum__rt__string_update_reference_count(%String* %17, i64 -1) + call void @__quantum__rt__string_update_reference_count(%String* %19, i64 -1) + ret i64 4 + +continue__4: ; preds = %continue__3 + call void @__quantum__rt__string_update_reference_count(%String* %0, i64 -1) + call void @__quantum__rt__string_update_reference_count(%String* %4, i64 -1) + call void @__quantum__rt__string_update_reference_count(%String* %7, i64 -1) + call void @__quantum__rt__string_update_reference_count(%String* %9, i64 -1) + call void @__quantum__rt__string_update_reference_count(%String* %12, i64 -1) + call void @__quantum__rt__string_update_reference_count(%String* %14, i64 -1) + call void @__quantum__rt__string_update_reference_count(%String* %17, i64 -1) + call void @__quantum__rt__string_update_reference_count(%String* %19, i64 -1) + ret i64 0 +} + +define void @Microsoft__Quantum__Testing__QIR__Subtract__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { i64, i64 }* + %1 = getelementptr { i64, i64 }, { i64, i64 }* %0, i64 0, i32 0 + %2 = getelementptr { i64, i64 }, { i64, i64 }* %0, i64 0, i32 1 + %3 = load i64, i64* %1 + %4 = load i64, i64* %2 + %5 = call i64 @Microsoft__Quantum__Testing__QIR__Subtract__body(i64 %3, i64 %4) + %6 = bitcast %Tuple* %result-tuple to { i64 }* + %7 = getelementptr { i64 }, { i64 }* %6, i64 0, i32 0 + store i64 %5, i64* %7 ret void } -define void @Microsoft__Quantum__Testing__QIR__Qop__ctladj(%Array* %__controlQubits__, { %Qubit*, i64 }* %0) { +define void @Lifted__PartialApplication__2__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, i64 }* + %1 = getelementptr { %Callable*, i64 }, { %Callable*, i64 }* %0, i64 0, i32 1 + %2 = load i64, i64* %1 + %3 = bitcast %Tuple* %arg-tuple to { i64 }* + %4 = getelementptr { i64 }, { i64 }* %3, i64 0, i32 0 + %5 = load i64, i64* %4 + %6 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i64* getelementptr (i64, i64* null, i32 1) to i64), i64 2)) + %7 = bitcast %Tuple* %6 to { i64, i64 }* + %8 = getelementptr { i64, i64 }, { i64, i64 }* %7, i64 0, i32 0 + %9 = getelementptr { i64, i64 }, { i64, i64 }* %7, i64 0, i32 1 + store i64 %2, i64* %8 + store i64 %5, i64* %9 + %10 = getelementptr { %Callable*, i64 }, { %Callable*, i64 }* %0, i64 0, i32 0 + %11 = load %Callable*, %Callable** %10 + call void @__quantum__rt__callable_invoke(%Callable* %11, %Tuple* %6, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i64 -1) + ret void +} + +define void @MemoryManagement__2__RefCount(%Tuple* %capture-tuple, i64 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, i64 }* + %1 = getelementptr { %Callable*, i64 }, { %Callable*, i64 }* %0, i64 0, i32 0 + %2 = load %Callable*, %Callable** %1 + call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %2, i64 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %2, i64 %count-change) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %capture-tuple, i64 %count-change) + ret void +} + +define void @MemoryManagement__2__AliasCount(%Tuple* %capture-tuple, i64 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, i64 }* + %1 = getelementptr { %Callable*, i64 }, { %Callable*, i64 }* %0, i64 0, i32 0 + %2 = load %Callable*, %Callable** %1 + call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %2, i64 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i64 %count-change) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %capture-tuple, i64 %count-change) + ret void +} + +declare void @__quantum__qis__x__body(%Qubit*) + +declare void @__quantum__rt__qubit_release_array(%Array*) + +declare %Result* @__quantum__qis__measure__body(%Array*, %Array*) + +define double @Microsoft__Quantum__Intrinsic__ArcTan2__body(double %y, double %x) { +entry: + %0 = call double @__quantum__qis__arctan2__body(double %y, double %x) + ret double %0 +} + +declare double @__quantum__qis__arctan2__body(double, double) + +define void @Microsoft__Quantum__Intrinsic__K__body(%Qubit* %q) { +entry: + call void @__quantum__qis__k__body(%Qubit* %q) + ret void +} + +define void @Microsoft__Quantum__Intrinsic__K__adj(%Qubit* %q) { +entry: + call void @__quantum__qis__k__body(%Qubit* %q) + ret void +} + +define void @Microsoft__Quantum__Intrinsic__K__ctl(%Array* %__controlQubits__, %Qubit* %q) { entry: call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i64 1) - %1 = getelementptr { %Qubit*, i64 }, { %Qubit*, i64 }* %0, i64 0, i32 0 - %q = load %Qubit*, %Qubit** %1 - %2 = getelementptr { %Qubit*, i64 }, { %Qubit*, i64 }* %0, i64 0, i32 1 - %n = load i64, i64* %2 - %3 = srem i64 %n, 2 - %4 = icmp eq i64 %3, 1 - br i1 %4, label %then0__1, label %continue__1 + call void @__quantum__qis__k__ctl(%Array* %__controlQubits__, %Qubit* %q) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i64 -1) + ret void +} -then0__1: ; preds = %entry +define void @Microsoft__Quantum__Intrinsic__K__ctladj(%Array* %__controlQubits__, %Qubit* %q) { +entry: call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i64 1) call void @__quantum__qis__k__ctl(%Array* %__controlQubits__, %Qubit* %q) call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i64 -1) - br label %continue__1 + ret void +} -continue__1: ; preds = %then0__1, %entry +define double @Microsoft__Quantum__Intrinsic__Sqrt__body(double %d) { +entry: + %0 = call double @__quantum__qis__sqrt__body(double %d) + ret double %0 +} + +declare double @__quantum__qis__sqrt__body(double) + +define double @Microsoft__Quantum__Intrinsic__NAN__body() { +entry: + %0 = call double @__quantum__qis__nan__body() + ret double %0 +} + +declare double @__quantum__qis__nan__body() + +define void @Microsoft__Quantum__Intrinsic__X__body(%Qubit* %qb) { +entry: + call void @__quantum__qis__x__body(%Qubit* %qb) + ret void +} + +define void @Microsoft__Quantum__Intrinsic__X__adj(%Qubit* %qb) { +entry: + call void @__quantum__qis__x__body(%Qubit* %qb) + ret void +} + +define void @Microsoft__Quantum__Intrinsic__X__ctl(%Array* %__controlQubits__, %Qubit* %qb) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i64 1) + call void @__quantum__qis__x__ctl(%Array* %__controlQubits__, %Qubit* %qb) call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i64 -1) ret void } -declare void @__quantum__rt__qubit_release_array(%Array*) +declare void @__quantum__qis__x__ctl(%Array*, %Qubit*) + +define void @Microsoft__Quantum__Intrinsic__X__ctladj(%Array* %__controlQubits__, %Qubit* %qb) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i64 1) + call void @__quantum__qis__x__ctl(%Array* %__controlQubits__, %Qubit* %qb) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i64 -1) + ret void +} + +define i1 @Microsoft__Quantum__Intrinsic__IsNan__body(double %d) { +entry: + %0 = call i1 @__quantum__qis__isnan__body(double %d) + ret i1 %0 +} + +declare i1 @__quantum__qis__isnan__body(double) + +define %Result* @Microsoft__Quantum__Intrinsic__Measure__body(%Array* %bases, %Array* %qubits) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %bases, i64 1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i64 1) + %0 = call %Result* @__quantum__qis__measure__body(%Array* %bases, %Array* %qubits) + call void @__quantum__rt__array_update_alias_count(%Array* %bases, i64 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i64 -1) + ret %Result* %0 +} + +define double @Microsoft__Quantum__Intrinsic__Log__body(double %d) { +entry: + %0 = call double @__quantum__qis__log__body(double %d) + ret double %0 +} + +declare double @__quantum__qis__log__body(double) + +define double @Microsoft__Quantum__Intrinsic__INFINITY__body() { +entry: + %0 = call double @__quantum__qis__infinity__body() + ret double %0 +} + +declare double @__quantum__qis__infinity__body() + +define i1 @Microsoft__Quantum__Intrinsic__IsInf__body(double %d) { +entry: + %0 = call i1 @__quantum__qis__isinf__body(double %d) + ret i1 %0 +} + +declare i1 @__quantum__qis__isinf__body(double) + +define i1 @Microsoft__Quantum__Intrinsic__IsNegativeInfinity__body(double %d) { +entry: + %0 = call i1 @__quantum__qis__isnegativeinfinity__body(double %d) + ret i1 %0 +} + +declare i1 @__quantum__qis__isnegativeinfinity__body(double) + +define double @Microsoft__Quantum__Math__E__body() { +entry: + ret double 0x4005BF0A8B145769 +} + +define double @Microsoft__Quantum__Math__PI__body() { +entry: + ret double 0x400921FB54442D18 +} + +declare %String* @__quantum__rt__string_create(i32, i8*) + +declare %String* @__quantum__rt__pauli_to_string(i2) + +declare %String* @__quantum__rt__string_concatenate(%String*, %String*) + +declare void @__quantum__rt__string_update_reference_count(%String*, i64) + +declare i1 @__quantum__rt__string_equal(%String*, %String*) -define i64 @Microsoft__Quantum__Testing__QIR__Test_Arrays(i64 %array__count, i64* %array, i64 %index, i64 %val, i1 %dummy) #0 { +define i64 @Microsoft__Quantum__Testing__QIR__Test_Arrays(i64 %array__count, i64* %array, i64 %index, i64 %val, i1 %compilerDecoy) #0 { entry: %0 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 %array__count) %1 = icmp sgt i64 %array__count, 0 @@ -856,7 +1375,7 @@ copy: ; preds = %entry br label %next next: ; preds = %copy, %entry - %4 = call i64 @Microsoft__Quantum__Testing__QIR__Test_Arrays__body(%Array* %0, i64 %index, i64 %val, i1 %dummy) + %4 = call i64 @Microsoft__Quantum__Testing__QIR__Test_Arrays__body(%Array* %0, i64 %index, i64 %val, i1 %compilerDecoy) ret i64 %4 } diff --git a/src/QirRuntime/test/QIR-static/qir-test-qubits-results.qs b/src/QirRuntime/test/QIR-static/qir-test-qubits-results.qs index 600ecb7379b..9c4f2e5493f 100644 --- a/src/QirRuntime/test/QIR-static/qir-test-qubits-results.qs +++ b/src/QirRuntime/test/QIR-static/qir-test-qubits-results.qs @@ -1,3 +1,5 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. namespace Microsoft.Quantum.Testing.QIR { open Microsoft.Quantum.Intrinsic; diff --git a/src/QirRuntime/test/QIR-static/qir-test-strings.cpp b/src/QirRuntime/test/QIR-static/qir-test-strings.cpp new file mode 100644 index 00000000000..7cbe6c3592e --- /dev/null +++ b/src/QirRuntime/test/QIR-static/qir-test-strings.cpp @@ -0,0 +1,15 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. +#include + +#include "catch.hpp" + + +extern "C" uint64_t Microsoft__Quantum__Testing__QIR__Str__PauliToStringTest__body(); // NOLINT + + +TEST_CASE("QIR: Strings", "[qir.Str][qir.Str.PauliToString]") +{ + REQUIRE(0 == Microsoft__Quantum__Testing__QIR__Str__PauliToStringTest__body()); +} + diff --git a/src/QirRuntime/test/QIR-static/qir-test-strings.qs b/src/QirRuntime/test/QIR-static/qir-test-strings.qs new file mode 100644 index 00000000000..6046ce6ab0d --- /dev/null +++ b/src/QirRuntime/test/QIR-static/qir-test-strings.qs @@ -0,0 +1,20 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +namespace Microsoft.Quantum.Testing.QIR.Str { + + open Microsoft.Quantum.Intrinsic; + + function PauliToStringTest() : Int { + + if "Pauli value: PauliI" != + $"Pauli value: {PauliI}" { return 1; } // The return value indicates which test case has failed. + if "PauliX" != $"{PauliX}" { return 2; } + if "PauliY" != $"{PauliY}" { return 3; } + if "PauliZ" != $"{PauliZ}" { return 4; } + + return 0; + } + +} + From 4f8aaee0e7897c1bd55386d25722049e40b2cd63 Mon Sep 17 00:00:00 2001 From: Irina Yatsenko <36858951+irinayat-MS@users.noreply.github.com> Date: Tue, 9 Feb 2021 12:35:49 -0800 Subject: [PATCH 02/30] QirRuntime: eliminate usage of word 'dummy' (#510) --- src/QirRuntime/lib/QIR/CMakeLists.txt | 76 +++++-------------- src/QirRuntime/lib/QIR/__dummy.cpp | 3 - .../test/FullstateSimulator/CMakeLists.txt | 6 +- .../test/unittests/QirRuntimeTests.cpp | 6 +- 4 files changed, 26 insertions(+), 65 deletions(-) delete mode 100644 src/QirRuntime/lib/QIR/__dummy.cpp diff --git a/src/QirRuntime/lib/QIR/CMakeLists.txt b/src/QirRuntime/lib/QIR/CMakeLists.txt index 37455ba9634..a90b95c7e71 100644 --- a/src/QirRuntime/lib/QIR/CMakeLists.txt +++ b/src/QirRuntime/lib/QIR/CMakeLists.txt @@ -1,25 +1,18 @@ -# -# The bridge funtions calls into the rt so on Linux the dependency requires to -# introduce a new target that sits on top of both and links them in the correct -# order. This top target doesn't have any code of its own but CMake insists on -# providing a source file, thus __dummy.cpp... -# - -set(CLANG_ARGS "-c") -if (CMAKE_BUILD_TYPE STREQUAL "Debug") -set(CLANG_ARGS - "${CLANG_ARGS}" - "-O0" - "-D_DEBUG" -) -endif() +# The downstream consumers but must pick up both the native support lib and the utility +# lib, produced from ll bridge files when linking against either qir-rt or qir-qis. #+++++++++++++++++++++++++++++++++++++ # qir-rt #+++++++++++++++++++++++++++++++++++++ #=============================================================================== -# Step 1: create qir-rt-support lib from the C++ sources +# create a utility lib from bridge-rt.ll +# +set(bridge_rt_target "bridge_rt_target") +compile_from_qir(bridge-rt ${bridge_rt_target}) + +#=============================================================================== +# create qir-rt-support lib from the C++ sources # set(rt_sup_source_files "allocationsTracker.cpp" @@ -40,33 +33,20 @@ add_library(qir-rt-support-obj OBJECT ${rt_sup_source_files}) target_include_directories(qir-rt-support-obj PUBLIC ${public_includes}) set_property(TARGET qir-rt-support-obj PROPERTY POSITION_INDEPENDENT_CODE ON) -#=============================================================================== -# Step 2: create a utility lib from bridge-rt.ll -# -set(bridge_rt_target "bridge_rt_target") -compile_from_qir(bridge-rt ${bridge_rt_target}) - -#=============================================================================== -# Step 3: combine the utility lib and the support lib into a single static qir-rt -# library, the clients can link against. -# -add_library(qir-rt STATIC __dummy.cpp) - -target_link_libraries(qir-rt PUBLIC - ${QIR_UTILITY_LIB} # set in compile_from_qir - qir-rt-support - ${CMAKE_DL_LIBS} -) -add_dependencies(qir-rt ${bridge_rt_target}) - +add_dependencies(qir-rt-support ${bridge_rt_target}) #+++++++++++++++++++++++++++++++++++++ -# QIR_QIS -# (the same dance as for qir-rt) +# qir-qis #+++++++++++++++++++++++++++++++++++++ #=============================================================================== -# Step 1: create qir-qis-support lib from the C++ sources +# create a utility lib from bridge-qis.ll +# +set(bridge_qis_target "bridge_qis_target") +compile_from_qir(bridge-qis ${bridge_qis_target}) + +#=============================================================================== +# create qir-qis-support lib from the C++ sources # set(qis_sup_source_files "intrinsics.cpp" @@ -82,25 +62,7 @@ add_library(qir-qis-support-obj OBJECT ${qis_sup_source_files}) target_include_directories(qir-qis-support-obj PUBLIC ${public_includes}) set_property(TARGET qir-qis-support-obj PROPERTY POSITION_INDEPENDENT_CODE ON) -#=============================================================================== -# Step 2: create a utility lib from bridge-qis.ll -# -set(bridge_qis_target "bridge_qis_target") -compile_from_qir(bridge-qis ${bridge_qis_target}) - -#=============================================================================== -# Step 3: combine the utility lib and the support lib into a single static qir-qis -# library, the clients can link against. -# -add_library(qir-qis STATIC __dummy.cpp) - -target_link_libraries(qir-qis PUBLIC - ${QIR_UTILITY_LIB} # set in compile_from_qir - qir-qis-support - ${CMAKE_DL_LIBS} -) - -add_dependencies(qir-qis ${bridge_qis_target}) +add_dependencies(qir-qis-support ${bridge_qis_target}) diff --git a/src/QirRuntime/lib/QIR/__dummy.cpp b/src/QirRuntime/lib/QIR/__dummy.cpp deleted file mode 100644 index 00665926ec8..00000000000 --- a/src/QirRuntime/lib/QIR/__dummy.cpp +++ /dev/null @@ -1,3 +0,0 @@ -/* In order to combine the bridge and native support libs into a single - static library for the clients to link against, we have to provide a - source file to use in CMake target definitions... */ \ No newline at end of file diff --git a/src/QirRuntime/test/FullstateSimulator/CMakeLists.txt b/src/QirRuntime/test/FullstateSimulator/CMakeLists.txt index b9ad5584b51..6b790d80390 100644 --- a/src/QirRuntime/test/FullstateSimulator/CMakeLists.txt +++ b/src/QirRuntime/test/FullstateSimulator/CMakeLists.txt @@ -6,8 +6,10 @@ add_executable(fullstate-simulator-tests target_link_libraries(fullstate-simulator-tests PUBLIC ${QIR_UTILITY_LIB} # created by compile_from_qir - qir-rt - qir-qis + ${QIR_BRIDGE_UTILITY_LIB} + ${QIR_BRIDGE_QIS_UTILITY_LIB} + qir-rt-support + qir-qis-support simulators ) diff --git a/src/QirRuntime/test/unittests/QirRuntimeTests.cpp b/src/QirRuntime/test/unittests/QirRuntimeTests.cpp index a16308b56dd..03ea4d536a5 100644 --- a/src/QirRuntime/test/unittests/QirRuntimeTests.cpp +++ b/src/QirRuntime/test/unittests/QirRuntimeTests.cpp @@ -912,10 +912,10 @@ TEST_CASE("Allocation tracking for tuples", "[qir_support]") CHECK_NOTHROW(ReleaseQirContext()); } -static void DummyCallableEntry(PTuple, PTuple, PTuple) {} +static void NoopCallableEntry(PTuple, PTuple, PTuple) {} TEST_CASE("Allocation tracking for callables", "[qir_support]") { - t_CallableEntry entries[4] = {DummyCallableEntry, nullptr, nullptr, nullptr}; + t_CallableEntry entries[4] = {NoopCallableEntry, nullptr, nullptr, nullptr}; InitializeQirContext(nullptr /*don't need a simulator*/, true /*track allocations*/); @@ -940,7 +940,7 @@ TEST_CASE("Allocation tracking for callables", "[qir_support]") TEST_CASE("Callables: copy elision", "[qir_support]") { QirContextScope qirctx(nullptr, true); - t_CallableEntry entries[4] = {DummyCallableEntry, nullptr, nullptr, nullptr}; + t_CallableEntry entries[4] = {NoopCallableEntry, nullptr, nullptr, nullptr}; QirCallable* original = quantum__rt__callable_create(entries, nullptr /*capture callbacks*/, nullptr /*capture tuple*/); From e1649573989c9b8c5bd13820c7a7f9f42c06b414 Mon Sep 17 00:00:00 2001 From: Irina Yatsenko <36858951+irinayat-MS@users.noreply.github.com> Date: Thu, 11 Feb 2021 14:11:00 -0800 Subject: [PATCH 03/30] Better error reporting on failure to load the fullstate simulator (#515) --- .../lib/Simulators/FullstateSimulator.cpp | 127 +++++++++++------- 1 file changed, 78 insertions(+), 49 deletions(-) diff --git a/src/QirRuntime/lib/Simulators/FullstateSimulator.cpp b/src/QirRuntime/lib/Simulators/FullstateSimulator.cpp index 104b1913e40..d4d016da4e0 100644 --- a/src/QirRuntime/lib/Simulators/FullstateSimulator.cpp +++ b/src/QirRuntime/lib/Simulators/FullstateSimulator.cpp @@ -4,12 +4,13 @@ #include #include #include +#include #include #include #include -#include "SimFactory.hpp" #include "QuantumApi_I.hpp" +#include "SimFactory.hpp" using namespace std; @@ -21,15 +22,36 @@ typedef HMODULE QUANTUM_SIMULATOR; typedef void* QUANTUM_SIMULATOR; #endif -QUANTUM_SIMULATOR LoadQuantumSimulator() +namespace { #ifdef _WIN32 - return ::LoadLibraryA("Microsoft.Quantum.Simulator.Runtime.dll"); +const char* FULLSTATESIMULATORLIB = "Microsoft.Quantum.Simulator.Runtime.dll"; #elif __APPLE__ - return ::dlopen("libMicrosoft.Quantum.Simulator.Runtime.dylib", RTLD_LAZY); +const char* FULLSTATESIMULATORLIB = "libMicrosoft.Quantum.Simulator.Runtime.dylib"; +#else +const char* FULLSTATESIMULATORLIB = "libMicrosoft.Quantum.Simulator.Runtime.so"; +#endif + +QUANTUM_SIMULATOR LoadQuantumSimulator() +{ + QUANTUM_SIMULATOR handle = 0; +#ifdef _WIN32 + handle = ::LoadLibraryA(FULLSTATESIMULATORLIB); + if (handle == NULL) + { + throw std::runtime_error( + std::string("Failed to load ") + FULLSTATESIMULATORLIB + + " (error code: " + std::to_string(GetLastError()) + ")"); + } #else - return ::dlopen("libMicrosoft.Quantum.Simulator.Runtime.so", RTLD_LAZY); + handle = ::dlopen(FULLSTATESIMULATORLIB, RTLD_LAZY); + if (handle == nullptr) + { + throw std::runtime_error( + std::string("Failed to load ") + FULLSTATESIMULATORLIB + " (" + ::dlerror() + ")"); + } #endif + return handle; } bool UnloadQuantumSimulator(QUANTUM_SIMULATOR handle) @@ -49,6 +71,7 @@ void* LoadProc(QUANTUM_SIMULATOR handle, const char* procName) return ::dlsym(handle, procName); #endif } +} // namespace namespace Microsoft { @@ -78,7 +101,7 @@ namespace Quantum return static_cast(pauli); } - const QUANTUM_SIMULATOR handle; + const QUANTUM_SIMULATOR handle = 0; unsigned simulatorId = -1; unsigned nextQubitId = 0; // the QuantumSimulator expects contiguous ids, starting from 0 @@ -112,25 +135,39 @@ namespace Quantum std::cout << "*********************" << std::endl; } + void* GetProc(const char* name) + { + void* proc = LoadProc(this->handle, name); + if (proc == nullptr) + { + throw std::runtime_error(std::string("Failed to find '") + name + "' proc in " + FULLSTATESIMULATORLIB); + } + return proc; + } + public: CFullstateSimulator() : handle(LoadQuantumSimulator()) { typedef unsigned (*TInit)(); - static TInit initSimulatorInstance = reinterpret_cast(LoadProc(this->handle, "init")); + static TInit initSimulatorInstance = reinterpret_cast(this->GetProc("init")); this->simulatorId = initSimulatorInstance(); } ~CFullstateSimulator() { - typedef unsigned (*TDestroy)(unsigned); - static TDestroy destroySimulatorInstance = reinterpret_cast(LoadProc(this->handle, "destroy")); - - destroySimulatorInstance(this->simulatorId); - - // TODO: It seems that simulator might still be doing something on background threads so attempting to - // unload it might crash. - // UnloadQuantumSimulator(this->handle); + if (this->simulatorId != -1) + { + typedef unsigned (*TDestroy)(unsigned); + static TDestroy destroySimulatorInstance = + reinterpret_cast(LoadProc(this->handle, "destroy")); + assert(destroySimulatorInstance); + destroySimulatorInstance(this->simulatorId); + + // TODO: It seems that simulator might still be doing something on background threads so attempting to + // unload it might crash. + // UnloadQuantumSimulator(this->handle); + } } IQuantumGateSet* AsQuantumGateSet() override @@ -145,7 +182,7 @@ namespace Quantum void GetState(TGetStateCallback callback) override { typedef bool (*TDump)(unsigned, TGetStateCallback); - static TDump dump = reinterpret_cast(LoadProc(this->handle, "Dump")); + static TDump dump = reinterpret_cast(this->GetProc("Dump")); dump(this->simulatorId, callback); } @@ -157,8 +194,7 @@ namespace Quantum Qubit AllocateQubit() override { typedef void (*TAllocateQubit)(unsigned, unsigned); - static TAllocateQubit allocateQubit = - reinterpret_cast(LoadProc(this->handle, "allocateQubit")); + static TAllocateQubit allocateQubit = reinterpret_cast(this->GetProc("allocateQubit")); const unsigned id = this->nextQubitId; allocateQubit(this->simulatorId, id); @@ -169,7 +205,7 @@ namespace Quantum void ReleaseQubit(Qubit q) override { typedef void (*TReleaseQubit)(unsigned, unsigned); - static TReleaseQubit releaseQubit = reinterpret_cast(LoadProc(this->handle, "release")); + static TReleaseQubit releaseQubit = reinterpret_cast(this->GetProc("release")); releaseQubit(this->simulatorId, GetQubitId(q)); } @@ -177,7 +213,7 @@ namespace Quantum Result M(Qubit q) override { typedef unsigned (*TM)(unsigned, unsigned); - static TM m = reinterpret_cast(LoadProc(this->handle, "M")); + static TM m = reinterpret_cast(this->GetProc("M")); return reinterpret_cast(m(this->simulatorId, GetQubitId(q))); } @@ -185,7 +221,7 @@ namespace Quantum { assert(numBases == numTargets); typedef unsigned (*TMeasure)(unsigned, unsigned, unsigned*, unsigned*); - static TMeasure m = reinterpret_cast(LoadProc(this->handle, "Measure")); + static TMeasure m = reinterpret_cast(this->GetProc("Measure")); vector ids = GetQubitIds(numTargets, targets); return reinterpret_cast( m(this->simulatorId, numBases, reinterpret_cast(bases), ids.data())); @@ -217,112 +253,106 @@ namespace Quantum void X(Qubit q) override { - static TSingleQubitGate op = reinterpret_cast(LoadProc(this->handle, "X")); + static TSingleQubitGate op = reinterpret_cast(this->GetProc("X")); op(this->simulatorId, GetQubitId(q)); } void ControlledX(long numControls, Qubit controls[], Qubit target) override { - static TSingleQubitControlledGate op = - reinterpret_cast(LoadProc(this->handle, "MCX")); + static TSingleQubitControlledGate op = reinterpret_cast(this->GetProc("MCX")); vector ids = GetQubitIds(numControls, controls); op(this->simulatorId, numControls, ids.data(), GetQubitId(target)); } void Y(Qubit q) override { - static TSingleQubitGate op = reinterpret_cast(LoadProc(this->handle, "Y")); + static TSingleQubitGate op = reinterpret_cast(this->GetProc("Y")); op(this->simulatorId, GetQubitId(q)); } void ControlledY(long numControls, Qubit controls[], Qubit target) override { - static TSingleQubitControlledGate op = - reinterpret_cast(LoadProc(this->handle, "MCY")); + static TSingleQubitControlledGate op = reinterpret_cast(this->GetProc("MCY")); vector ids = GetQubitIds(numControls, controls); op(this->simulatorId, numControls, ids.data(), GetQubitId(target)); } void Z(Qubit q) override { - static TSingleQubitGate op = reinterpret_cast(LoadProc(this->handle, "Z")); + static TSingleQubitGate op = reinterpret_cast(this->GetProc("Z")); op(this->simulatorId, GetQubitId(q)); } void ControlledZ(long numControls, Qubit controls[], Qubit target) override { - static TSingleQubitControlledGate op = - reinterpret_cast(LoadProc(this->handle, "MCZ")); + static TSingleQubitControlledGate op = reinterpret_cast(this->GetProc("MCZ")); vector ids = GetQubitIds(numControls, controls); op(this->simulatorId, numControls, ids.data(), GetQubitId(target)); } void H(Qubit q) override { - static TSingleQubitGate op = reinterpret_cast(LoadProc(this->handle, "H")); + static TSingleQubitGate op = reinterpret_cast(this->GetProc("H")); op(this->simulatorId, GetQubitId(q)); } void ControlledH(long numControls, Qubit controls[], Qubit target) override { - static TSingleQubitControlledGate op = - reinterpret_cast(LoadProc(this->handle, "MCH")); + static TSingleQubitControlledGate op = reinterpret_cast(this->GetProc("MCH")); vector ids = GetQubitIds(numControls, controls); op(this->simulatorId, numControls, ids.data(), GetQubitId(target)); } void S(Qubit q) override { - static TSingleQubitGate op = reinterpret_cast(LoadProc(this->handle, "S")); + static TSingleQubitGate op = reinterpret_cast(this->GetProc("S")); op(this->simulatorId, GetQubitId(q)); } void ControlledS(long numControls, Qubit controls[], Qubit target) override { - static TSingleQubitControlledGate op = - reinterpret_cast(LoadProc(this->handle, "MCS")); + static TSingleQubitControlledGate op = reinterpret_cast(this->GetProc("MCS")); vector ids = GetQubitIds(numControls, controls); op(this->simulatorId, numControls, ids.data(), GetQubitId(target)); } void AdjointS(Qubit q) override { - static TSingleQubitGate op = reinterpret_cast(LoadProc(this->handle, "AdjS")); + static TSingleQubitGate op = reinterpret_cast(this->GetProc("AdjS")); op(this->simulatorId, GetQubitId(q)); } void ControlledAdjointS(long numControls, Qubit controls[], Qubit target) override { static TSingleQubitControlledGate op = - reinterpret_cast(LoadProc(this->handle, "MCAdjS")); + reinterpret_cast(this->GetProc("MCAdjS")); vector ids = GetQubitIds(numControls, controls); op(this->simulatorId, numControls, ids.data(), GetQubitId(target)); } void T(Qubit q) override { - static TSingleQubitGate op = reinterpret_cast(LoadProc(this->handle, "T")); + static TSingleQubitGate op = reinterpret_cast(this->GetProc("T")); op(this->simulatorId, GetQubitId(q)); } void ControlledT(long numControls, Qubit controls[], Qubit target) override { - static TSingleQubitControlledGate op = - reinterpret_cast(LoadProc(this->handle, "MCT")); + static TSingleQubitControlledGate op = reinterpret_cast(this->GetProc("MCT")); vector ids = GetQubitIds(numControls, controls); op(this->simulatorId, numControls, ids.data(), GetQubitId(target)); } void AdjointT(Qubit q) override { - static TSingleQubitGate op = reinterpret_cast(LoadProc(this->handle, "AdjT")); + static TSingleQubitGate op = reinterpret_cast(this->GetProc("AdjT")); op(this->simulatorId, GetQubitId(q)); } void ControlledAdjointT(long numControls, Qubit controls[], Qubit target) override { static TSingleQubitControlledGate op = - reinterpret_cast(LoadProc(this->handle, "MCAdjT")); + reinterpret_cast(this->GetProc("MCAdjT")); vector ids = GetQubitIds(numControls, controls); op(this->simulatorId, numControls, ids.data(), GetQubitId(target)); } @@ -330,7 +360,7 @@ namespace Quantum void R(PauliId axis, Qubit target, double theta) override { typedef unsigned (*TR)(unsigned, unsigned, double, unsigned); - static TR r = reinterpret_cast(LoadProc(this->handle, "R")); + static TR r = reinterpret_cast(this->GetProc("R")); r(this->simulatorId, GetBasis(axis), theta, GetQubitId(target)); } @@ -338,7 +368,7 @@ namespace Quantum void ControlledR(long numControls, Qubit controls[], PauliId axis, Qubit target, double theta) override { typedef unsigned (*TMCR)(unsigned, unsigned, double, unsigned, unsigned*, unsigned); - static TMCR cr = reinterpret_cast(LoadProc(this->handle, "MCR")); + static TMCR cr = reinterpret_cast(this->GetProc("MCR")); vector ids = GetQubitIds(numControls, controls); cr(this->simulatorId, GetBasis(axis), theta, numControls, ids.data(), GetQubitId(target)); @@ -347,7 +377,7 @@ namespace Quantum void Exp(long numTargets, PauliId paulis[], Qubit targets[], double theta) override { typedef unsigned (*TExp)(unsigned, unsigned, unsigned*, double, unsigned*); - static TExp exp = reinterpret_cast(LoadProc(this->handle, "Exp")); + static TExp exp = reinterpret_cast(this->GetProc("Exp")); vector ids = GetQubitIds(numTargets, targets); exp(this->simulatorId, numTargets, reinterpret_cast(paulis), theta, ids.data()); } @@ -361,7 +391,7 @@ namespace Quantum double theta) override { typedef unsigned (*TMCExp)(unsigned, unsigned, unsigned*, double, unsigned, unsigned*, unsigned*); - static TMCExp cexp = reinterpret_cast(LoadProc(this->handle, "MCExp")); + static TMCExp cexp = reinterpret_cast(this->GetProc("MCExp")); vector idsTargets = GetQubitIds(numTargets, targets); vector idsControls = GetQubitIds(numControls, controls); cexp( @@ -384,8 +414,7 @@ namespace Quantum const char* failureMessage) override { typedef double (*TOp)(unsigned id, unsigned n, int* b, unsigned* q); - static TOp jointEnsembleProbability = - reinterpret_cast(LoadProc(this->handle, "JointEnsembleProbability")); + static TOp jointEnsembleProbability = reinterpret_cast(this->GetProc("JointEnsembleProbability")); vector ids = GetQubitIds(numTargets, targets); double actualProbability = From 78d68dcfc7f3b6f08a5492e7a22ef11fbca658d7 Mon Sep 17 00:00:00 2001 From: "Stefan J. Wernli" Date: Fri, 12 Feb 2021 11:45:37 -0800 Subject: [PATCH 04/30] Clean up target package and simulator files. (#516) This gets rid of several unused files that had mistakenly been merged from the feature branch, as well as cleaning up the implementations to match the new Q# style and removing unusing `using` statements from C#. --- AdvantageBenchmark/privateBuild/host.csproj | 2 +- .../releasedBuild/quantum/quantum.csproj | 2 +- .../Microsoft.Quantum.CSharpGeneration.fsproj | 2 +- ....Microsoft.Quantum.Simulators.Type2.csproj | 2 +- ....Microsoft.Quantum.Simulators.Type3.csproj | 2 +- .../QuantumSimulator/ApplyControlledX.cs | 2 - .../QuantumSimulator/ApplyControlledZ.cs | 2 - .../QuantumSimulator/ApplyUncontrolledH.cs | 2 - .../QuantumSimulator/ApplyUncontrolledRx.cs | 2 - .../QuantumSimulator/ApplyUncontrolledRy.cs | 2 - .../QuantumSimulator/ApplyUncontrolledRz.cs | 2 - .../QuantumSimulator/ApplyUncontrolledS.cs | 2 - .../QuantumSimulator/ApplyUncontrolledSWAP.cs | 2 - .../QuantumSimulator/ApplyUncontrolledT.cs | 2 - .../QuantumSimulator/ApplyUncontrolledX.cs | 2 - .../QuantumSimulator/ApplyUncontrolledY.cs | 2 - .../QuantumSimulator/ApplyUncontrolledZ.cs | 2 - .../Simulators/QuantumSimulator/Assert.cs | 3 -- .../Simulators/QuantumSimulator/AssertProb.cs | 3 -- .../Simulators/QuantumSimulator/Exp.cs | 2 - .../Simulators/QuantumSimulator/Extensions.cs | 1 - .../Simulators/QuantumSimulator/H.cs | 2 - .../Simulators/QuantumSimulator/IsingXX.cs | 2 - .../Simulators/QuantumSimulator/IsingYY.cs | 2 - .../Simulators/QuantumSimulator/IsingZZ.cs | 2 - .../Simulators/QuantumSimulator/M.cs | 4 -- .../Simulators/QuantumSimulator/Measure.cs | 1 - .../QuantumSimulator/QuantumSimulator.cs | 2 - .../Simulators/QuantumSimulator/Qubit.cs | 1 - .../QuantumSimulator/QubitManager.cs | 1 - .../Simulators/QuantumSimulator/R.cs | 2 - .../Simulators/QuantumSimulator/Random.cs | 1 - .../Simulators/QuantumSimulator/Reset.cs | 2 - .../Simulators/QuantumSimulator/Rx.cs | 2 - .../Simulators/QuantumSimulator/Ry.cs | 2 - .../Simulators/QuantumSimulator/Rz.cs | 2 - .../Simulators/QuantumSimulator/S.cs | 2 - .../Simulators/QuantumSimulator/SWAP.cs | 2 - .../Simulators/QuantumSimulator/StackTrace.cs | 3 -- .../QuantumSimulator/StateDumper.cs | 1 - .../Simulators/QuantumSimulator/T.cs | 2 - .../Simulators/QuantumSimulator/X.cs | 2 - .../Simulators/QuantumSimulator/Y.cs | 2 - .../Simulators/QuantumSimulator/Z.cs | 2 - .../Decompositions/ApplyControlledX.qs | 39 ------------------- .../Decompositions/ApplyControlledZ.qs | 36 ----------------- .../Decompositions/ApplyUncontrolledH.qs | 28 ------------- .../Decompositions/ApplyUncontrolledRx.qs | 36 ----------------- .../Decompositions/ApplyUncontrolledRy.qs | 36 ----------------- .../Decompositions/ApplyUncontrolledRz.qs | 36 ----------------- .../Decompositions/ApplyUncontrolledS.qs | 27 ------------- .../Decompositions/ApplyUncontrolledT.qs | 27 ------------- .../Decompositions/ApplyUncontrolledX.qs | 26 ------------- .../Decompositions/ApplyUncontrolledY.qs | 26 ------------- .../Decompositions/ApplyUncontrolledZ.qs | 26 ------------- .../AssertOperationsEqualInPlace.qs | 8 ++-- .../AssertOperationsEqualReferenced.qs | 4 +- .../TargetDefinitions/Decompositions/CCNOT.qs | 2 - .../Decompositions/CCNOTFromCCZ.qs | 2 - .../TargetDefinitions/Decompositions/CNOT.qs | 2 - .../Decompositions/ExpFrac.qs | 2 - .../Decompositions/ExpFracFromExpUtil.qs | 2 - .../Decompositions/ExpFromExpUtil.qs | 2 - .../Decompositions/ExpUtil.qs | 4 +- .../Decompositions/ExpUtilFromIsing.qs | 4 +- .../Decompositions/HFromSinglyControlled.qs | 2 - .../TargetDefinitions/Decompositions/I.qs | 2 - .../Decompositions/IsingXX.qs | 32 --------------- .../Decompositions/IsingYY.qs | 32 --------------- .../Decompositions/IsingZZ.qs | 32 --------------- .../TargetDefinitions/Decompositions/M.qs | 2 - .../Decompositions/MResetX.qs | 2 - .../Decompositions/MResetXExplicit.qs | 2 - .../Decompositions/MResetXWithNoReuse.qs | 2 - .../Decompositions/MResetY.qs | 2 - .../Decompositions/MResetYExplicit.qs | 2 - .../Decompositions/MResetYWithNoReuse.qs | 2 - .../Decompositions/MResetZ.qs | 2 - .../Decompositions/MResetZExplicit.qs | 2 - .../Decompositions/MResetZWithNoReuse.qs | 2 - .../Decompositions/Measure.qs | 6 +-- .../Decompositions/MeasureWithNoReuse.qs | 6 +-- .../Decompositions/PreparePostM.qs | 2 - .../Decompositions/PreparePostMNoop.qs | 2 - .../TargetDefinitions/Decompositions/R.qs | 2 - .../TargetDefinitions/Decompositions/R1.qs | 2 - .../Decompositions/R1Frac.qs | 2 - .../TargetDefinitions/Decompositions/RFrac.qs | 2 - .../TargetDefinitions/Decompositions/Reset.qs | 1 - .../Decompositions/ResetAll.qs | 4 +- .../Decompositions/ResetWithoutReuse.qs | 2 - .../TargetDefinitions/Decompositions/Rx.qs | 2 - .../Decompositions/RxFromSinglyControlled.qs | 2 - .../TargetDefinitions/Decompositions/Ry.qs | 2 - .../Decompositions/RyFromSinglyControlled.qs | 2 - .../TargetDefinitions/Decompositions/Rz.qs | 2 - .../Decompositions/RzFromSinglyControlled.qs | 2 - .../Decompositions/SFromSinglyControlled.qs | 2 - .../TargetDefinitions/Decompositions/SWAP.qs | 2 - .../SWAPFromSinglyControlled.qs | 2 - .../Decompositions/SetToBasisState.qs | 2 - .../Decompositions/TFromSinglyControlled.qs | 2 - .../TargetDefinitions/Decompositions/Utils.qs | 22 +++-------- .../Decompositions/XFromSinglyControlled.qs | 2 - .../Decompositions/YFromSinglyControlled.qs | 2 - .../Decompositions/ZFromSinglyControlled.qs | 2 - .../Interfaces/IGate_ApplyControlledX.cs | 1 - .../Interfaces/IGate_ApplyControlledZ.cs | 1 - .../Interfaces/IGate_ApplyUncontrolledH.cs | 1 - .../Interfaces/IGate_ApplyUncontrolledRx.cs | 1 - .../Interfaces/IGate_ApplyUncontrolledRy.cs | 1 - .../Interfaces/IGate_ApplyUncontrolledRz.cs | 1 - .../Interfaces/IGate_ApplyUncontrolledS.cs | 1 - .../Interfaces/IGate_ApplyUncontrolledSWAP.cs | 1 - .../Interfaces/IGate_ApplyUncontrolledT.cs | 1 - .../Interfaces/IGate_ApplyUncontrolledX.cs | 1 - .../Interfaces/IGate_ApplyUncontrolledY.cs | 1 - .../Interfaces/IGate_ApplyUncontrolledZ.cs | 1 - .../TargetDefinitions/Interfaces/IGate_Exp.cs | 1 - .../TargetDefinitions/Interfaces/IGate_H.cs | 1 - .../Interfaces/IGate_IsingXX.cs | 1 - .../Interfaces/IGate_IsingYY.cs | 1 - .../Interfaces/IGate_IsingZZ.cs | 1 - .../TargetDefinitions/Interfaces/IGate_M.cs | 1 - .../Interfaces/IGate_Measure.cs | 1 - .../TargetDefinitions/Interfaces/IGate_R.cs | 1 - .../Interfaces/IGate_Reset.cs | 1 - .../TargetDefinitions/Interfaces/IGate_Rx.cs | 1 - .../TargetDefinitions/Interfaces/IGate_Ry.cs | 1 - .../TargetDefinitions/Interfaces/IGate_Rz.cs | 1 - .../TargetDefinitions/Interfaces/IGate_S.cs | 1 - .../TargetDefinitions/Interfaces/IGate_T.cs | 1 - .../TargetDefinitions/Interfaces/IGate_X.cs | 1 - .../TargetDefinitions/Interfaces/IGate_Y.cs | 1 - .../TargetDefinitions/Interfaces/IGate_Z.cs | 1 - .../Intrinsic/ApplyControlledX.qs | 2 - .../Intrinsic/ApplyControlledZ.qs | 2 - .../Intrinsic/ApplyUncontrolledH.qs | 2 - .../Intrinsic/ApplyUncontrolledRx.qs | 2 - .../Intrinsic/ApplyUncontrolledRy.qs | 2 - .../Intrinsic/ApplyUncontrolledRz.qs | 2 - .../Intrinsic/ApplyUncontrolledS.qs | 2 - .../Intrinsic/ApplyUncontrolledSWAP.qs | 2 - .../Intrinsic/ApplyUncontrolledT.qs | 2 - .../Intrinsic/ApplyUncontrolledX.qs | 2 - .../Intrinsic/ApplyUncontrolledY.qs | 2 - .../Intrinsic/ApplyUncontrolledZ.qs | 2 - .../TargetDefinitions/Intrinsic/Exp.qs | 2 - .../TargetDefinitions/Intrinsic/H.qs | 2 - .../TargetDefinitions/Intrinsic/IsingXX.qs | 2 - .../TargetDefinitions/Intrinsic/IsingYY.qs | 2 - .../TargetDefinitions/Intrinsic/IsingZZ.qs | 2 - .../TargetDefinitions/Intrinsic/M.qs | 2 - .../TargetDefinitions/Intrinsic/Measure.qs | 2 - .../TargetDefinitions/Intrinsic/R.qs | 2 - .../TargetDefinitions/Intrinsic/Reset.qs | 2 - .../TargetDefinitions/Intrinsic/Rx.qs | 2 - .../TargetDefinitions/Intrinsic/Ry.qs | 2 - .../TargetDefinitions/Intrinsic/Rz.qs | 2 - .../TargetDefinitions/Intrinsic/S.qs | 2 - .../TargetDefinitions/Intrinsic/SWAP.qs | 2 - .../TargetDefinitions/Intrinsic/T.qs | 1 - .../TargetDefinitions/Intrinsic/X.qs | 2 - .../TargetDefinitions/Intrinsic/Y.qs | 2 - .../TargetDefinitions/Intrinsic/Z.qs | 2 - .../TargetPackages/Type3.Package.props | 3 -- .../Microsoft.Quantum.Type3.Core.csproj | 2 +- 167 files changed, 24 insertions(+), 732 deletions(-) delete mode 100644 src/Simulation/TargetDefinitions/Decompositions/ApplyControlledX.qs delete mode 100644 src/Simulation/TargetDefinitions/Decompositions/ApplyControlledZ.qs delete mode 100644 src/Simulation/TargetDefinitions/Decompositions/ApplyUncontrolledH.qs delete mode 100644 src/Simulation/TargetDefinitions/Decompositions/ApplyUncontrolledRx.qs delete mode 100644 src/Simulation/TargetDefinitions/Decompositions/ApplyUncontrolledRy.qs delete mode 100644 src/Simulation/TargetDefinitions/Decompositions/ApplyUncontrolledRz.qs delete mode 100644 src/Simulation/TargetDefinitions/Decompositions/ApplyUncontrolledS.qs delete mode 100644 src/Simulation/TargetDefinitions/Decompositions/ApplyUncontrolledT.qs delete mode 100644 src/Simulation/TargetDefinitions/Decompositions/ApplyUncontrolledX.qs delete mode 100644 src/Simulation/TargetDefinitions/Decompositions/ApplyUncontrolledY.qs delete mode 100644 src/Simulation/TargetDefinitions/Decompositions/ApplyUncontrolledZ.qs delete mode 100644 src/Simulation/TargetDefinitions/Decompositions/IsingXX.qs delete mode 100644 src/Simulation/TargetDefinitions/Decompositions/IsingYY.qs delete mode 100644 src/Simulation/TargetDefinitions/Decompositions/IsingZZ.qs diff --git a/AdvantageBenchmark/privateBuild/host.csproj b/AdvantageBenchmark/privateBuild/host.csproj index 5fbd6838ee9..0af33ce55d9 100644 --- a/AdvantageBenchmark/privateBuild/host.csproj +++ b/AdvantageBenchmark/privateBuild/host.csproj @@ -1,4 +1,4 @@ - + diff --git a/AdvantageBenchmark/releasedBuild/quantum/quantum.csproj b/AdvantageBenchmark/releasedBuild/quantum/quantum.csproj index b7284bec583..e89bfdce2bb 100644 --- a/AdvantageBenchmark/releasedBuild/quantum/quantum.csproj +++ b/AdvantageBenchmark/releasedBuild/quantum/quantum.csproj @@ -1,4 +1,4 @@ - + diff --git a/src/Simulation/CSharpGeneration/Microsoft.Quantum.CSharpGeneration.fsproj b/src/Simulation/CSharpGeneration/Microsoft.Quantum.CSharpGeneration.fsproj index a78b19dd7c0..2dc489c798d 100644 --- a/src/Simulation/CSharpGeneration/Microsoft.Quantum.CSharpGeneration.fsproj +++ b/src/Simulation/CSharpGeneration/Microsoft.Quantum.CSharpGeneration.fsproj @@ -22,7 +22,7 @@ - + diff --git a/src/Simulation/Simulators.Type2.Tests/Tests.Microsoft.Quantum.Simulators.Type2.csproj b/src/Simulation/Simulators.Type2.Tests/Tests.Microsoft.Quantum.Simulators.Type2.csproj index 094f95bf64b..8b0e1a36091 100644 --- a/src/Simulation/Simulators.Type2.Tests/Tests.Microsoft.Quantum.Simulators.Type2.csproj +++ b/src/Simulation/Simulators.Type2.Tests/Tests.Microsoft.Quantum.Simulators.Type2.csproj @@ -1,4 +1,4 @@ - + diff --git a/src/Simulation/Simulators.Type3.Tests/Tests.Microsoft.Quantum.Simulators.Type3.csproj b/src/Simulation/Simulators.Type3.Tests/Tests.Microsoft.Quantum.Simulators.Type3.csproj index 8bd1d9a7828..d3011e0359f 100644 --- a/src/Simulation/Simulators.Type3.Tests/Tests.Microsoft.Quantum.Simulators.Type3.csproj +++ b/src/Simulation/Simulators.Type3.Tests/Tests.Microsoft.Quantum.Simulators.Type3.csproj @@ -1,4 +1,4 @@ - + diff --git a/src/Simulation/Simulators/QuantumSimulator/ApplyControlledX.cs b/src/Simulation/Simulators/QuantumSimulator/ApplyControlledX.cs index 8e179f2d8a7..63f155fd82e 100644 --- a/src/Simulation/Simulators/QuantumSimulator/ApplyControlledX.cs +++ b/src/Simulation/Simulators/QuantumSimulator/ApplyControlledX.cs @@ -1,8 +1,6 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. -using System; -using System.Runtime.InteropServices; using Microsoft.Quantum.Simulation.Core; namespace Microsoft.Quantum.Simulation.Simulators diff --git a/src/Simulation/Simulators/QuantumSimulator/ApplyControlledZ.cs b/src/Simulation/Simulators/QuantumSimulator/ApplyControlledZ.cs index 1cfbeed2a0e..5fff7c75896 100644 --- a/src/Simulation/Simulators/QuantumSimulator/ApplyControlledZ.cs +++ b/src/Simulation/Simulators/QuantumSimulator/ApplyControlledZ.cs @@ -1,8 +1,6 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. -using System; -using System.Runtime.InteropServices; using Microsoft.Quantum.Simulation.Core; namespace Microsoft.Quantum.Simulation.Simulators diff --git a/src/Simulation/Simulators/QuantumSimulator/ApplyUncontrolledH.cs b/src/Simulation/Simulators/QuantumSimulator/ApplyUncontrolledH.cs index 453fbe28339..274a2801506 100644 --- a/src/Simulation/Simulators/QuantumSimulator/ApplyUncontrolledH.cs +++ b/src/Simulation/Simulators/QuantumSimulator/ApplyUncontrolledH.cs @@ -1,8 +1,6 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. -using System; -using System.Runtime.InteropServices; using Microsoft.Quantum.Simulation.Core; namespace Microsoft.Quantum.Simulation.Simulators diff --git a/src/Simulation/Simulators/QuantumSimulator/ApplyUncontrolledRx.cs b/src/Simulation/Simulators/QuantumSimulator/ApplyUncontrolledRx.cs index 56e7358db96..d20ed65cdfa 100644 --- a/src/Simulation/Simulators/QuantumSimulator/ApplyUncontrolledRx.cs +++ b/src/Simulation/Simulators/QuantumSimulator/ApplyUncontrolledRx.cs @@ -1,8 +1,6 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. -using System; -using System.Runtime.InteropServices; using Microsoft.Quantum.Simulation.Core; namespace Microsoft.Quantum.Simulation.Simulators diff --git a/src/Simulation/Simulators/QuantumSimulator/ApplyUncontrolledRy.cs b/src/Simulation/Simulators/QuantumSimulator/ApplyUncontrolledRy.cs index 05c811dc667..b8f7b623790 100644 --- a/src/Simulation/Simulators/QuantumSimulator/ApplyUncontrolledRy.cs +++ b/src/Simulation/Simulators/QuantumSimulator/ApplyUncontrolledRy.cs @@ -1,8 +1,6 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. -using System; -using System.Runtime.InteropServices; using Microsoft.Quantum.Simulation.Core; namespace Microsoft.Quantum.Simulation.Simulators diff --git a/src/Simulation/Simulators/QuantumSimulator/ApplyUncontrolledRz.cs b/src/Simulation/Simulators/QuantumSimulator/ApplyUncontrolledRz.cs index d6343548bca..5227619f2c1 100644 --- a/src/Simulation/Simulators/QuantumSimulator/ApplyUncontrolledRz.cs +++ b/src/Simulation/Simulators/QuantumSimulator/ApplyUncontrolledRz.cs @@ -1,8 +1,6 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. -using System; -using System.Runtime.InteropServices; using Microsoft.Quantum.Simulation.Core; namespace Microsoft.Quantum.Simulation.Simulators diff --git a/src/Simulation/Simulators/QuantumSimulator/ApplyUncontrolledS.cs b/src/Simulation/Simulators/QuantumSimulator/ApplyUncontrolledS.cs index e9e08bdb6f3..6398e5de92d 100644 --- a/src/Simulation/Simulators/QuantumSimulator/ApplyUncontrolledS.cs +++ b/src/Simulation/Simulators/QuantumSimulator/ApplyUncontrolledS.cs @@ -1,8 +1,6 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. -using System; -using System.Runtime.InteropServices; using Microsoft.Quantum.Simulation.Core; namespace Microsoft.Quantum.Simulation.Simulators diff --git a/src/Simulation/Simulators/QuantumSimulator/ApplyUncontrolledSWAP.cs b/src/Simulation/Simulators/QuantumSimulator/ApplyUncontrolledSWAP.cs index e6d8e72382a..de99bdeec47 100644 --- a/src/Simulation/Simulators/QuantumSimulator/ApplyUncontrolledSWAP.cs +++ b/src/Simulation/Simulators/QuantumSimulator/ApplyUncontrolledSWAP.cs @@ -1,8 +1,6 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. -using System; -using System.Runtime.InteropServices; using Microsoft.Quantum.Simulation.Core; namespace Microsoft.Quantum.Simulation.Simulators diff --git a/src/Simulation/Simulators/QuantumSimulator/ApplyUncontrolledT.cs b/src/Simulation/Simulators/QuantumSimulator/ApplyUncontrolledT.cs index 3f3e4902be5..f371a01c766 100644 --- a/src/Simulation/Simulators/QuantumSimulator/ApplyUncontrolledT.cs +++ b/src/Simulation/Simulators/QuantumSimulator/ApplyUncontrolledT.cs @@ -1,8 +1,6 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. -using System; -using System.Runtime.InteropServices; using Microsoft.Quantum.Simulation.Core; namespace Microsoft.Quantum.Simulation.Simulators diff --git a/src/Simulation/Simulators/QuantumSimulator/ApplyUncontrolledX.cs b/src/Simulation/Simulators/QuantumSimulator/ApplyUncontrolledX.cs index 13aaf95b503..fd34d8e56ed 100644 --- a/src/Simulation/Simulators/QuantumSimulator/ApplyUncontrolledX.cs +++ b/src/Simulation/Simulators/QuantumSimulator/ApplyUncontrolledX.cs @@ -1,8 +1,6 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. -using System; -using System.Runtime.InteropServices; using Microsoft.Quantum.Simulation.Core; namespace Microsoft.Quantum.Simulation.Simulators diff --git a/src/Simulation/Simulators/QuantumSimulator/ApplyUncontrolledY.cs b/src/Simulation/Simulators/QuantumSimulator/ApplyUncontrolledY.cs index 5e409019a40..064773160bc 100644 --- a/src/Simulation/Simulators/QuantumSimulator/ApplyUncontrolledY.cs +++ b/src/Simulation/Simulators/QuantumSimulator/ApplyUncontrolledY.cs @@ -1,8 +1,6 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. -using System; -using System.Runtime.InteropServices; using Microsoft.Quantum.Simulation.Core; namespace Microsoft.Quantum.Simulation.Simulators diff --git a/src/Simulation/Simulators/QuantumSimulator/ApplyUncontrolledZ.cs b/src/Simulation/Simulators/QuantumSimulator/ApplyUncontrolledZ.cs index 09eb9f70e9e..4e08dcda810 100644 --- a/src/Simulation/Simulators/QuantumSimulator/ApplyUncontrolledZ.cs +++ b/src/Simulation/Simulators/QuantumSimulator/ApplyUncontrolledZ.cs @@ -1,8 +1,6 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. -using System; -using System.Runtime.InteropServices; using Microsoft.Quantum.Simulation.Core; namespace Microsoft.Quantum.Simulation.Simulators diff --git a/src/Simulation/Simulators/QuantumSimulator/Assert.cs b/src/Simulation/Simulators/QuantumSimulator/Assert.cs index fb7d2ec5952..fc54ba1fcc1 100644 --- a/src/Simulation/Simulators/QuantumSimulator/Assert.cs +++ b/src/Simulation/Simulators/QuantumSimulator/Assert.cs @@ -2,9 +2,6 @@ // Licensed under the MIT License. using System; -using System.Diagnostics; -using System.Linq; -using System.Runtime.InteropServices; using Microsoft.Quantum.Simulation.Core; using static System.Math; diff --git a/src/Simulation/Simulators/QuantumSimulator/AssertProb.cs b/src/Simulation/Simulators/QuantumSimulator/AssertProb.cs index 8cc32bb0f9e..955ba22929b 100644 --- a/src/Simulation/Simulators/QuantumSimulator/AssertProb.cs +++ b/src/Simulation/Simulators/QuantumSimulator/AssertProb.cs @@ -2,9 +2,6 @@ // Licensed under the MIT License. using System; -using System.Diagnostics; -using System.Linq; -using System.Runtime.InteropServices; using Microsoft.Quantum.Simulation.Core; using static System.Math; diff --git a/src/Simulation/Simulators/QuantumSimulator/Exp.cs b/src/Simulation/Simulators/QuantumSimulator/Exp.cs index fcab47bc9e8..a9450d1c388 100644 --- a/src/Simulation/Simulators/QuantumSimulator/Exp.cs +++ b/src/Simulation/Simulators/QuantumSimulator/Exp.cs @@ -2,8 +2,6 @@ // Licensed under the MIT License. using System; -using System.Linq; -using System.Runtime.InteropServices; using Microsoft.Quantum.Simulation.Core; namespace Microsoft.Quantum.Simulation.Simulators diff --git a/src/Simulation/Simulators/QuantumSimulator/Extensions.cs b/src/Simulation/Simulators/QuantumSimulator/Extensions.cs index 9f073e6c6bd..2e38358e372 100644 --- a/src/Simulation/Simulators/QuantumSimulator/Extensions.cs +++ b/src/Simulation/Simulators/QuantumSimulator/Extensions.cs @@ -2,7 +2,6 @@ // Licensed under the MIT License. using System; -using System.Reflection; using System.Diagnostics; using System.Linq; diff --git a/src/Simulation/Simulators/QuantumSimulator/H.cs b/src/Simulation/Simulators/QuantumSimulator/H.cs index 57b9aed9169..4b8a954bdd8 100644 --- a/src/Simulation/Simulators/QuantumSimulator/H.cs +++ b/src/Simulation/Simulators/QuantumSimulator/H.cs @@ -1,8 +1,6 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. -using System; -using System.Runtime.InteropServices; using Microsoft.Quantum.Simulation.Core; namespace Microsoft.Quantum.Simulation.Simulators diff --git a/src/Simulation/Simulators/QuantumSimulator/IsingXX.cs b/src/Simulation/Simulators/QuantumSimulator/IsingXX.cs index e36cdf6c602..e3283033548 100644 --- a/src/Simulation/Simulators/QuantumSimulator/IsingXX.cs +++ b/src/Simulation/Simulators/QuantumSimulator/IsingXX.cs @@ -1,8 +1,6 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. -using System; -using System.Runtime.InteropServices; using Microsoft.Quantum.Simulation.Core; namespace Microsoft.Quantum.Simulation.Simulators diff --git a/src/Simulation/Simulators/QuantumSimulator/IsingYY.cs b/src/Simulation/Simulators/QuantumSimulator/IsingYY.cs index 924a7903981..11338a1576d 100644 --- a/src/Simulation/Simulators/QuantumSimulator/IsingYY.cs +++ b/src/Simulation/Simulators/QuantumSimulator/IsingYY.cs @@ -1,8 +1,6 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. -using System; -using System.Runtime.InteropServices; using Microsoft.Quantum.Simulation.Core; namespace Microsoft.Quantum.Simulation.Simulators diff --git a/src/Simulation/Simulators/QuantumSimulator/IsingZZ.cs b/src/Simulation/Simulators/QuantumSimulator/IsingZZ.cs index 6265db3967d..8d182817fae 100644 --- a/src/Simulation/Simulators/QuantumSimulator/IsingZZ.cs +++ b/src/Simulation/Simulators/QuantumSimulator/IsingZZ.cs @@ -1,8 +1,6 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. -using System; -using System.Runtime.InteropServices; using Microsoft.Quantum.Simulation.Core; namespace Microsoft.Quantum.Simulation.Simulators diff --git a/src/Simulation/Simulators/QuantumSimulator/M.cs b/src/Simulation/Simulators/QuantumSimulator/M.cs index 860daa5643e..51afcaf347c 100644 --- a/src/Simulation/Simulators/QuantumSimulator/M.cs +++ b/src/Simulation/Simulators/QuantumSimulator/M.cs @@ -1,10 +1,6 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. -using System; -using System.Diagnostics; -using System.Linq; -using System.Runtime.InteropServices; using Microsoft.Quantum.Simulation.Core; namespace Microsoft.Quantum.Simulation.Simulators diff --git a/src/Simulation/Simulators/QuantumSimulator/Measure.cs b/src/Simulation/Simulators/QuantumSimulator/Measure.cs index 56b0dfaf6fb..630d2a1fbcf 100644 --- a/src/Simulation/Simulators/QuantumSimulator/Measure.cs +++ b/src/Simulation/Simulators/QuantumSimulator/Measure.cs @@ -2,7 +2,6 @@ // Licensed under the MIT License. using System; -using System.Runtime.InteropServices; using Microsoft.Quantum.Simulation.Core; namespace Microsoft.Quantum.Simulation.Simulators diff --git a/src/Simulation/Simulators/QuantumSimulator/QuantumSimulator.cs b/src/Simulation/Simulators/QuantumSimulator/QuantumSimulator.cs index c99bced0ba2..8b8059f22be 100644 --- a/src/Simulation/Simulators/QuantumSimulator/QuantumSimulator.cs +++ b/src/Simulation/Simulators/QuantumSimulator/QuantumSimulator.cs @@ -2,11 +2,9 @@ // Licensed under the MIT License. using System; -using System.Linq; using Microsoft.Quantum.Simulation.Core; using Microsoft.Quantum.Simulation.Common; using System.Runtime.InteropServices; -using System.Threading; using Microsoft.Quantum.Simulation.Simulators.Exceptions; using Microsoft.Quantum.Intrinsic.Interfaces; using System.Collections.Generic; diff --git a/src/Simulation/Simulators/QuantumSimulator/Qubit.cs b/src/Simulation/Simulators/QuantumSimulator/Qubit.cs index 010ccab7bf7..02424dfbc3e 100644 --- a/src/Simulation/Simulators/QuantumSimulator/Qubit.cs +++ b/src/Simulation/Simulators/QuantumSimulator/Qubit.cs @@ -5,7 +5,6 @@ using Microsoft.Quantum.Simulation.Core; using System; using System.Diagnostics; -using System.Runtime.InteropServices; namespace Microsoft.Quantum.Simulation.Simulators { diff --git a/src/Simulation/Simulators/QuantumSimulator/QubitManager.cs b/src/Simulation/Simulators/QuantumSimulator/QubitManager.cs index da2ac62b7cb..b1bcc7b09fa 100644 --- a/src/Simulation/Simulators/QuantumSimulator/QubitManager.cs +++ b/src/Simulation/Simulators/QuantumSimulator/QubitManager.cs @@ -5,7 +5,6 @@ using Microsoft.Quantum.Simulation.Core; using Microsoft.Quantum.Simulation.Simulators.Exceptions; using System.Diagnostics; -using System.Runtime.InteropServices; namespace Microsoft.Quantum.Simulation.Simulators { diff --git a/src/Simulation/Simulators/QuantumSimulator/R.cs b/src/Simulation/Simulators/QuantumSimulator/R.cs index fb3d85ab9ea..9308e6f49b5 100644 --- a/src/Simulation/Simulators/QuantumSimulator/R.cs +++ b/src/Simulation/Simulators/QuantumSimulator/R.cs @@ -1,8 +1,6 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. -using System; -using System.Runtime.InteropServices; using Microsoft.Quantum.Simulation.Core; namespace Microsoft.Quantum.Simulation.Simulators diff --git a/src/Simulation/Simulators/QuantumSimulator/Random.cs b/src/Simulation/Simulators/QuantumSimulator/Random.cs index 419bf425112..073e9e2ab92 100644 --- a/src/Simulation/Simulators/QuantumSimulator/Random.cs +++ b/src/Simulation/Simulators/QuantumSimulator/Random.cs @@ -2,7 +2,6 @@ // Licensed under the MIT License. using System; -using System.Runtime.InteropServices; using Microsoft.Quantum.Simulation.Core; namespace Microsoft.Quantum.Simulation.Simulators diff --git a/src/Simulation/Simulators/QuantumSimulator/Reset.cs b/src/Simulation/Simulators/QuantumSimulator/Reset.cs index 16f0ff3e39c..7a52215b8ec 100644 --- a/src/Simulation/Simulators/QuantumSimulator/Reset.cs +++ b/src/Simulation/Simulators/QuantumSimulator/Reset.cs @@ -1,8 +1,6 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. -using System; -using System.Runtime.InteropServices; using Microsoft.Quantum.Simulation.Core; namespace Microsoft.Quantum.Simulation.Simulators diff --git a/src/Simulation/Simulators/QuantumSimulator/Rx.cs b/src/Simulation/Simulators/QuantumSimulator/Rx.cs index ec092af18d9..547ca6e0146 100644 --- a/src/Simulation/Simulators/QuantumSimulator/Rx.cs +++ b/src/Simulation/Simulators/QuantumSimulator/Rx.cs @@ -1,8 +1,6 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. -using System; -using System.Runtime.InteropServices; using Microsoft.Quantum.Simulation.Core; namespace Microsoft.Quantum.Simulation.Simulators diff --git a/src/Simulation/Simulators/QuantumSimulator/Ry.cs b/src/Simulation/Simulators/QuantumSimulator/Ry.cs index 81962a986de..9813e2d15b4 100644 --- a/src/Simulation/Simulators/QuantumSimulator/Ry.cs +++ b/src/Simulation/Simulators/QuantumSimulator/Ry.cs @@ -1,8 +1,6 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. -using System; -using System.Runtime.InteropServices; using Microsoft.Quantum.Simulation.Core; namespace Microsoft.Quantum.Simulation.Simulators diff --git a/src/Simulation/Simulators/QuantumSimulator/Rz.cs b/src/Simulation/Simulators/QuantumSimulator/Rz.cs index 6784d7eb149..d3e052d88d0 100644 --- a/src/Simulation/Simulators/QuantumSimulator/Rz.cs +++ b/src/Simulation/Simulators/QuantumSimulator/Rz.cs @@ -1,8 +1,6 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. -using System; -using System.Runtime.InteropServices; using Microsoft.Quantum.Simulation.Core; namespace Microsoft.Quantum.Simulation.Simulators diff --git a/src/Simulation/Simulators/QuantumSimulator/S.cs b/src/Simulation/Simulators/QuantumSimulator/S.cs index c1e265bba3d..592f23a17d2 100644 --- a/src/Simulation/Simulators/QuantumSimulator/S.cs +++ b/src/Simulation/Simulators/QuantumSimulator/S.cs @@ -1,8 +1,6 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. -using System; -using System.Runtime.InteropServices; using Microsoft.Quantum.Simulation.Core; namespace Microsoft.Quantum.Simulation.Simulators diff --git a/src/Simulation/Simulators/QuantumSimulator/SWAP.cs b/src/Simulation/Simulators/QuantumSimulator/SWAP.cs index 88f967a5bc3..b3cc0597e1e 100644 --- a/src/Simulation/Simulators/QuantumSimulator/SWAP.cs +++ b/src/Simulation/Simulators/QuantumSimulator/SWAP.cs @@ -1,8 +1,6 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. -using System; -using System.Runtime.InteropServices; using Microsoft.Quantum.Simulation.Core; namespace Microsoft.Quantum.Simulation.Simulators diff --git a/src/Simulation/Simulators/QuantumSimulator/StackTrace.cs b/src/Simulation/Simulators/QuantumSimulator/StackTrace.cs index c0840d02100..63ede1e4457 100644 --- a/src/Simulation/Simulators/QuantumSimulator/StackTrace.cs +++ b/src/Simulation/Simulators/QuantumSimulator/StackTrace.cs @@ -1,13 +1,10 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. -using Newtonsoft.Json; using System; using System.Collections.Generic; -using System.Text; using Microsoft.Quantum.Simulation.Core; using System.Diagnostics; -using System.Linq; namespace Microsoft.Quantum.Simulation.Common { diff --git a/src/Simulation/Simulators/QuantumSimulator/StateDumper.cs b/src/Simulation/Simulators/QuantumSimulator/StateDumper.cs index 9198616d9cd..1faa7bda1a5 100644 --- a/src/Simulation/Simulators/QuantumSimulator/StateDumper.cs +++ b/src/Simulation/Simulators/QuantumSimulator/StateDumper.cs @@ -2,7 +2,6 @@ // Licensed under the MIT License. using System; -using System.Runtime.InteropServices; using Microsoft.Quantum.Simulation.Core; namespace Microsoft.Quantum.Simulation.Simulators diff --git a/src/Simulation/Simulators/QuantumSimulator/T.cs b/src/Simulation/Simulators/QuantumSimulator/T.cs index c95dde4528d..8e8e5d364a1 100644 --- a/src/Simulation/Simulators/QuantumSimulator/T.cs +++ b/src/Simulation/Simulators/QuantumSimulator/T.cs @@ -1,8 +1,6 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. -using System; -using System.Runtime.InteropServices; using Microsoft.Quantum.Simulation.Core; namespace Microsoft.Quantum.Simulation.Simulators diff --git a/src/Simulation/Simulators/QuantumSimulator/X.cs b/src/Simulation/Simulators/QuantumSimulator/X.cs index c01f5b65535..3ea6d275b5e 100644 --- a/src/Simulation/Simulators/QuantumSimulator/X.cs +++ b/src/Simulation/Simulators/QuantumSimulator/X.cs @@ -1,8 +1,6 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. -using System; -using System.Runtime.InteropServices; using Microsoft.Quantum.Simulation.Core; namespace Microsoft.Quantum.Simulation.Simulators diff --git a/src/Simulation/Simulators/QuantumSimulator/Y.cs b/src/Simulation/Simulators/QuantumSimulator/Y.cs index 42fda7e38e7..4469dd356ea 100644 --- a/src/Simulation/Simulators/QuantumSimulator/Y.cs +++ b/src/Simulation/Simulators/QuantumSimulator/Y.cs @@ -1,8 +1,6 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. -using System; -using System.Runtime.InteropServices; using Microsoft.Quantum.Simulation.Core; namespace Microsoft.Quantum.Simulation.Simulators diff --git a/src/Simulation/Simulators/QuantumSimulator/Z.cs b/src/Simulation/Simulators/QuantumSimulator/Z.cs index ef8a174c2a6..efe41238a64 100644 --- a/src/Simulation/Simulators/QuantumSimulator/Z.cs +++ b/src/Simulation/Simulators/QuantumSimulator/Z.cs @@ -1,8 +1,6 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. -using System; -using System.Runtime.InteropServices; using Microsoft.Quantum.Simulation.Core; namespace Microsoft.Quantum.Simulation.Simulators diff --git a/src/Simulation/TargetDefinitions/Decompositions/ApplyControlledX.qs b/src/Simulation/TargetDefinitions/Decompositions/ApplyControlledX.qs deleted file mode 100644 index 4bf60fd83dd..00000000000 --- a/src/Simulation/TargetDefinitions/Decompositions/ApplyControlledX.qs +++ /dev/null @@ -1,39 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -namespace Microsoft.Quantum.Intrinsic { - open Microsoft.Quantum.Diagnostics; - - /// # Summary - /// Applies the controlled-X (or CNOT) gate to a pair of qubits. Note that the Controlled - /// functor is not supported. - /// - /// # Description - /// \begin{align} - /// \operatorname{CNOT} \mathrel{:=} - /// \begin{bmatrix} - /// 1 & 0 & 0 & 0 \\\\ - /// 0 & 1 & 0 & 0 \\\\ - /// 0 & 0 & 0 & 1 \\\\ - /// 0 & 0 & 1 & 0 - /// \end{bmatrix}, - /// \end{align} - /// - /// where rows and columns are ordered as in the quantum concepts guide. - /// - /// # Input - /// ## control - /// Control qubit for the CNOT gate. - /// ## target - /// Target qubit for the CNOT gate. - /// - /// # Remarks - /// Equivalent to: - /// ```qsharp - /// CNOT(control, target); - /// ``` - @EnableTestingViaName("Test.TargetDefinitions.ApplyControlledX") - internal operation ApplyControlledX (control : Qubit, target : Qubit) : Unit is Adj { - CNOT(control, target); - } -} \ No newline at end of file diff --git a/src/Simulation/TargetDefinitions/Decompositions/ApplyControlledZ.qs b/src/Simulation/TargetDefinitions/Decompositions/ApplyControlledZ.qs deleted file mode 100644 index 70cb2a6d068..00000000000 --- a/src/Simulation/TargetDefinitions/Decompositions/ApplyControlledZ.qs +++ /dev/null @@ -1,36 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -namespace Microsoft.Quantum.Intrinsic { - open Microsoft.Quantum.Diagnostics; - - /// # Summary - /// Applies the controlled-Z (CZ) gate to a pair of qubits. Note that the Controlled - /// functor is not supported. - /// - /// $$ - /// \begin{align} - /// 1 & 0 & 0 & 0 \\\\ - /// 0 & 1 & 0 & 0 \\\\ - /// 0 & 0 & 1 & 0 \\\\ - /// 0 & 0 & 0 & -1 - /// \end{align}, - /// $$ - /// where rows and columns are organized as in the quantum concepts guide. - /// - /// # Input - /// ## control - /// Control qubit for the CZ gate. - /// ## target - /// Target qubit for the CZ gate. - /// - /// # Remarks - /// Equivalent to: - /// ```qsharp - /// Controlled Z([control], target); - /// ``` - @EnableTestingViaName("Test.TargetDefinitions.ApplyControlledZ") - internal operation ApplyControlledZ (control : Qubit, target : Qubit) : Unit is Adj { - Controlled Z([control], target); - } -} \ No newline at end of file diff --git a/src/Simulation/TargetDefinitions/Decompositions/ApplyUncontrolledH.qs b/src/Simulation/TargetDefinitions/Decompositions/ApplyUncontrolledH.qs deleted file mode 100644 index 74a4e5f61fa..00000000000 --- a/src/Simulation/TargetDefinitions/Decompositions/ApplyUncontrolledH.qs +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -namespace Microsoft.Quantum.Intrinsic { - open Microsoft.Quantum.Diagnostics; - - /// # Summary - /// Applies the Hadamard transformation to a single qubit. Note that the Controlled - /// functor is not supported. - /// - /// # Description - /// \begin{align} - /// H \mathrel{:=} - /// \frac{1}{\sqrt{2}} - /// \begin{bmatrix} - /// 1 & 1 \\\\ - /// 1 & -1 - /// \end{bmatrix}. - /// \end{align} - /// - /// # Input - /// ## qubit - /// Qubit to which the gate should be applied. - @EnableTestingViaName("Test.TargetDefinitions.ApplyUncontrolledH") - internal operation ApplyUncontrolledH (qubit : Qubit) : Unit is Adj { - H(qubit); - } -} \ No newline at end of file diff --git a/src/Simulation/TargetDefinitions/Decompositions/ApplyUncontrolledRx.qs b/src/Simulation/TargetDefinitions/Decompositions/ApplyUncontrolledRx.qs deleted file mode 100644 index 8db8b3733a1..00000000000 --- a/src/Simulation/TargetDefinitions/Decompositions/ApplyUncontrolledRx.qs +++ /dev/null @@ -1,36 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -namespace Microsoft.Quantum.Intrinsic { - open Microsoft.Quantum.Diagnostics; - - /// # Summary - /// Applies a rotation about the $x$-axis by a given angle. Note that the Controlled - /// functor is not supported. - /// - /// # Description - /// \begin{align} - /// R_x(\theta) \mathrel{:=} - /// e^{-i \theta \sigma_x / 2} = - /// \begin{bmatrix} - /// \cos \frac{\theta}{2} & -i\sin \frac{\theta}{2} \\\\ - /// -i\sin \frac{\theta}{2} & \cos \frac{\theta}{2} - /// \end{bmatrix}. - /// \end{align} - /// - /// # Input - /// ## theta - /// Angle about which the qubit is to be rotated. - /// ## qubit - /// Qubit to which the gate should be applied. - /// - /// # Remarks - /// Equivalent to: - /// ```qsharp - /// R(PauliX, theta, qubit); - /// ``` - @EnableTestingViaName("Test.TargetDefinitions.ApplyUncontrolledRx") - internal operation ApplyUncontrolledRx (theta : Double, qubit : Qubit) : Unit is Adj { - Rx(theta, qubit); - } -} \ No newline at end of file diff --git a/src/Simulation/TargetDefinitions/Decompositions/ApplyUncontrolledRy.qs b/src/Simulation/TargetDefinitions/Decompositions/ApplyUncontrolledRy.qs deleted file mode 100644 index bd72cde1e7e..00000000000 --- a/src/Simulation/TargetDefinitions/Decompositions/ApplyUncontrolledRy.qs +++ /dev/null @@ -1,36 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -namespace Microsoft.Quantum.Intrinsic { - open Microsoft.Quantum.Diagnostics; - - /// # Summary - /// Applies a rotation about the $y$-axis by a given angle. Note that the Controlled - /// functor is not supported. - /// - /// # Description - /// \begin{align} - /// R_y(\theta) \mathrel{:=} - /// e^{-i \theta \sigma_y / 2} = - /// \begin{bmatrix} - /// \cos \frac{\theta}{2} & -\sin \frac{\theta}{2} \\\\ - /// \sin \frac{\theta}{2} & \cos \frac{\theta}{2} - /// \end{bmatrix}. - /// \end{align} - /// - /// # Input - /// ## theta - /// Angle about which the qubit is to be rotated. - /// ## qubit - /// Qubit to which the gate should be applied. - /// - /// # Remarks - /// Equivalent to: - /// ```qsharp - /// R(PauliY, theta, qubit); - /// ``` - @EnableTestingViaName("Test.TargetDefinitions.ApplyUncontrolledRy") - internal operation ApplyUncontrolledRy (theta : Double, qubit : Qubit) : Unit is Adj { - Ry(theta, qubit); - } -} \ No newline at end of file diff --git a/src/Simulation/TargetDefinitions/Decompositions/ApplyUncontrolledRz.qs b/src/Simulation/TargetDefinitions/Decompositions/ApplyUncontrolledRz.qs deleted file mode 100644 index 5c864aae74f..00000000000 --- a/src/Simulation/TargetDefinitions/Decompositions/ApplyUncontrolledRz.qs +++ /dev/null @@ -1,36 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -namespace Microsoft.Quantum.Intrinsic { - open Microsoft.Quantum.Diagnostics; - - /// # Summary - /// Applies a rotation about the $z$-axis by a given angle. Note that the Controlled - /// functor is not supported. - /// - /// # Description - /// \begin{align} - /// R_z(\theta) \mathrel{:=} - /// e^{-i \theta \sigma_z / 2} = - /// \begin{bmatrix} - /// e^{-i \theta / 2} & 0 \\\\ - /// 0 & e^{i \theta / 2} - /// \end{bmatrix}. - /// \end{align} - /// - /// # Input - /// ## theta - /// Angle about which the qubit is to be rotated. - /// ## qubit - /// Qubit to which the gate should be applied. - /// - /// # Remarks - /// Equivalent to: - /// ```qsharp - /// R(PauliZ, theta, qubit); - /// ``` - @EnableTestingViaName("Test.TargetDefinitions.ApplyUncontrolledRz") - internal operation ApplyUncontrolledRz (theta : Double, qubit : Qubit) : Unit is Adj { - Rz(theta, qubit); - } -} \ No newline at end of file diff --git a/src/Simulation/TargetDefinitions/Decompositions/ApplyUncontrolledS.qs b/src/Simulation/TargetDefinitions/Decompositions/ApplyUncontrolledS.qs deleted file mode 100644 index 1b64c5e7839..00000000000 --- a/src/Simulation/TargetDefinitions/Decompositions/ApplyUncontrolledS.qs +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -namespace Microsoft.Quantum.Intrinsic { - open Microsoft.Quantum.Diagnostics; - - /// # Summary - /// Applies the π/4 phase gate to a single qubit. Note that the Controlled functor - /// is not supported. - /// - /// # Description - /// \begin{align} - /// S \mathrel{:=} - /// \begin{bmatrix} - /// 1 & 0 \\\\ - /// 0 & i - /// \end{bmatrix}. - /// \end{align} - /// - /// # Input - /// ## qubit - /// Qubit to which the gate should be applied. - @EnableTestingViaName("Test.TargetDefinitions.ApplyUncontrolledS") - internal operation ApplyUncontrolledS (qubit : Qubit) : Unit is Adj { - S(qubit); - } -} \ No newline at end of file diff --git a/src/Simulation/TargetDefinitions/Decompositions/ApplyUncontrolledT.qs b/src/Simulation/TargetDefinitions/Decompositions/ApplyUncontrolledT.qs deleted file mode 100644 index 60cf1ecd7b8..00000000000 --- a/src/Simulation/TargetDefinitions/Decompositions/ApplyUncontrolledT.qs +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -namespace Microsoft.Quantum.Intrinsic { - open Microsoft.Quantum.Diagnostics; - - /// # Summary - /// Applies the π/8 gate to a single qubit. Note that the Controlled functor is - /// not supported. - /// - /// # Description - /// \begin{align} - /// T \mathrel{:=} - /// \begin{bmatrix} - /// 1 & 0 \\\\ - /// 0 & e^{i \pi / 4} - /// \end{bmatrix}. - /// \end{align} - /// - /// # Input - /// ## qubit - /// Qubit to which the gate should be applied. - @EnableTestingViaName("Test.TargetDefinitions.ApplyUncontrolledT") - internal operation ApplyUncontrolledT (qubit : Qubit) : Unit is Adj { - T(qubit); - } -} \ No newline at end of file diff --git a/src/Simulation/TargetDefinitions/Decompositions/ApplyUncontrolledX.qs b/src/Simulation/TargetDefinitions/Decompositions/ApplyUncontrolledX.qs deleted file mode 100644 index 08a566bc840..00000000000 --- a/src/Simulation/TargetDefinitions/Decompositions/ApplyUncontrolledX.qs +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -namespace Microsoft.Quantum.Intrinsic { - open Microsoft.Quantum.Diagnostics; - - /// # Summary - /// Applies the Pauli $X$ gate. Note that the Controlled functor is not supported. - /// - /// # Description - /// \begin{align} - /// \sigma_x \mathrel{:=} - /// \begin{bmatrix} - /// 0 & 1 \\\\ - /// 1 & 0 - /// \end{bmatrix}. - /// \end{align} - /// - /// # Input - /// ## qubit - /// Qubit to which the gate should be applied. - @EnableTestingViaName("Test.TargetDefinitions.ApplyUncontrolledX") - internal operation ApplyUncontrolledX (qubit : Qubit) : Unit is Adj { - X(qubit); - } -} \ No newline at end of file diff --git a/src/Simulation/TargetDefinitions/Decompositions/ApplyUncontrolledY.qs b/src/Simulation/TargetDefinitions/Decompositions/ApplyUncontrolledY.qs deleted file mode 100644 index bee6d1fb7b3..00000000000 --- a/src/Simulation/TargetDefinitions/Decompositions/ApplyUncontrolledY.qs +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -namespace Microsoft.Quantum.Intrinsic { - open Microsoft.Quantum.Diagnostics; - - /// # Summary - /// Applies the Pauli $Y$ gate. Note that the Controlled functor is not supported. - /// - /// # Description - /// \begin{align} - /// \sigma_y \mathrel{:=} - /// \begin{bmatrix} - /// 0 & -i \\\\ - /// i & 0 - /// \end{bmatrix}. - /// \end{align} - /// - /// # Input - /// ## qubit - /// Qubit to which the gate should be applied. - @EnableTestingViaName("Test.TargetDefinitions.ApplyUncontrolledY") - internal operation ApplyUncontrolledY (qubit : Qubit) : Unit is Adj { - Y(qubit); - } -} \ No newline at end of file diff --git a/src/Simulation/TargetDefinitions/Decompositions/ApplyUncontrolledZ.qs b/src/Simulation/TargetDefinitions/Decompositions/ApplyUncontrolledZ.qs deleted file mode 100644 index ffb10e7e364..00000000000 --- a/src/Simulation/TargetDefinitions/Decompositions/ApplyUncontrolledZ.qs +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -namespace Microsoft.Quantum.Intrinsic { - open Microsoft.Quantum.Diagnostics; - - /// # Summary - /// Applies the Pauli $Z$ gate. Note that the Controlled functor is not supported. - /// - /// # Description - /// \begin{align} - /// \sigma_z \mathrel{:=} - /// \begin{bmatrix} - /// 1 & 0 \\\\ - /// 0 & -1 - /// \end{bmatrix}. - /// \end{align} - /// - /// # Input - /// ## qubit - /// Qubit to which the gate should be applied. - @EnableTestingViaName("Test.TargetDefinitions.ApplyUncontrolledZ") - internal operation ApplyUncontrolledZ (qubit : Qubit) : Unit is Adj { - Z(qubit); - } -} \ No newline at end of file diff --git a/src/Simulation/TargetDefinitions/Decompositions/AssertOperationsEqualInPlace.qs b/src/Simulation/TargetDefinitions/Decompositions/AssertOperationsEqualInPlace.qs index 427e3c50451..b6db789bc43 100644 --- a/src/Simulation/TargetDefinitions/Decompositions/AssertOperationsEqualInPlace.qs +++ b/src/Simulation/TargetDefinitions/Decompositions/AssertOperationsEqualInPlace.qs @@ -12,7 +12,7 @@ namespace Microsoft.Quantum.Diagnostics { internal operation IterateThroughCartesianPower (length : Int, value : Int, op : (Int[] => Unit)) : Unit { mutable bounds = new Int[length]; - for (i in 0 .. length - 1) + for i in 0 .. length - 1 { set bounds = bounds w/ i <- value; } @@ -33,7 +33,7 @@ namespace Microsoft.Quantum.Diagnostics { //computes the next element in the Cartesian product set arr = arr w/ 0 <- arr[0] + 1; - for (i in 0 .. Length(arr) - 2) + for i in 0 .. Length(arr) - 2 { if (arr[i] == bounds[i]) { @@ -73,7 +73,7 @@ namespace Microsoft.Quantum.Diagnostics { fail "qubits and stateIds must have the same length"; } - for (i in 0 .. Length(qubits) - 1) + for i in 0 .. Length(qubits) - 1 { let id = basis[i]; let qubit = qubits[i]; @@ -120,7 +120,7 @@ namespace Microsoft.Quantum.Diagnostics { internal operation AssertEqualOnBasisVector (basis : Int[], givenU : (Qubit[] => Unit), expectedU : (Qubit[] => Unit is Adj)) : Unit { let tolerance = 1e-5; - using (qubits = Qubit[Length(basis)]) { + use qubits = Qubit[Length(basis)] { AssertAllZeroWithinTolerance(qubits, tolerance); FlipToBasis(basis, qubits); givenU(qubits); diff --git a/src/Simulation/TargetDefinitions/Decompositions/AssertOperationsEqualReferenced.qs b/src/Simulation/TargetDefinitions/Decompositions/AssertOperationsEqualReferenced.qs index eed629789a1..8766380257a 100644 --- a/src/Simulation/TargetDefinitions/Decompositions/AssertOperationsEqualReferenced.qs +++ b/src/Simulation/TargetDefinitions/Decompositions/AssertOperationsEqualReferenced.qs @@ -20,7 +20,7 @@ namespace Microsoft.Quantum.Diagnostics { internal operation PrepareEntangledState (left : Qubit[], right : Qubit[]) : Unit is Adj + Ctl { - for (idxQubit in 0 .. Length(left) - 1) + for idxQubit in 0 .. Length(left) - 1 { H(left[idxQubit]); Controlled X([left[idxQubit]], right[idxQubit]); @@ -55,7 +55,7 @@ namespace Microsoft.Quantum.Diagnostics { /// Operation defining the expected behavior for the operation under test. operation AssertOperationsEqualReferenced (nQubits : Int, actual : (Qubit[] => Unit), expected : (Qubit[] => Unit is Adj)) : Unit { // Prepare a reference register entangled with the target register. - using ((reference, target) = (Qubit[nQubits], Qubit[nQubits])) { + use (reference, target) = (Qubit[nQubits], Qubit[nQubits]) { PrepareEntangledState(reference, target); actual(target); Adjoint expected(target); diff --git a/src/Simulation/TargetDefinitions/Decompositions/CCNOT.qs b/src/Simulation/TargetDefinitions/Decompositions/CCNOT.qs index 99772e845a0..e845e6cc422 100644 --- a/src/Simulation/TargetDefinitions/Decompositions/CCNOT.qs +++ b/src/Simulation/TargetDefinitions/Decompositions/CCNOT.qs @@ -2,7 +2,6 @@ // Licensed under the MIT License. namespace Microsoft.Quantum.Intrinsic { - open Microsoft.Quantum.Diagnostics; /// # Summary /// Applies the doubly controlled–NOT (CCNOT) gate to three qubits. @@ -20,7 +19,6 @@ namespace Microsoft.Quantum.Intrinsic { /// ```qsharp /// Controlled X([control1, control2], target); /// ``` - @EnableTestingViaName("Test.TargetDefinitions.CCNOT") operation CCNOT (control1 : Qubit, control2 : Qubit, target : Qubit) : Unit is Adj + Ctl { body (...) { Controlled X([control1, control2], target); diff --git a/src/Simulation/TargetDefinitions/Decompositions/CCNOTFromCCZ.qs b/src/Simulation/TargetDefinitions/Decompositions/CCNOTFromCCZ.qs index 5b1601c799b..fcc55c6aaa6 100644 --- a/src/Simulation/TargetDefinitions/Decompositions/CCNOTFromCCZ.qs +++ b/src/Simulation/TargetDefinitions/Decompositions/CCNOTFromCCZ.qs @@ -2,7 +2,6 @@ // Licensed under the MIT License. namespace Microsoft.Quantum.Intrinsic { - open Microsoft.Quantum.Diagnostics; /// # Summary /// Applies the doubly controlled–NOT (CCNOT) gate to three qubits. @@ -20,7 +19,6 @@ namespace Microsoft.Quantum.Intrinsic { /// ```qsharp /// Controlled X([control1, control2], target); /// ``` - @EnableTestingViaName("Test.TargetDefinitions.CCNOT") operation CCNOT (control1 : Qubit, control2 : Qubit, target : Qubit) : Unit is Adj + Ctl { body (...) { // [Page 15 of arXiv:1206.0758v3](https://arxiv.org/pdf/1206.0758v3.pdf#page=15) diff --git a/src/Simulation/TargetDefinitions/Decompositions/CNOT.qs b/src/Simulation/TargetDefinitions/Decompositions/CNOT.qs index 7817f4fd480..ead30a51ac0 100644 --- a/src/Simulation/TargetDefinitions/Decompositions/CNOT.qs +++ b/src/Simulation/TargetDefinitions/Decompositions/CNOT.qs @@ -2,7 +2,6 @@ // Licensed under the MIT License. namespace Microsoft.Quantum.Intrinsic { - open Microsoft.Quantum.Diagnostics; /// # Summary /// Applies the controlled-NOT (CNOT) gate to a pair of qubits. @@ -31,7 +30,6 @@ namespace Microsoft.Quantum.Intrinsic { /// ```qsharp /// Controlled X([control], target); /// ``` - @EnableTestingViaName("Test.TargetDefinitions.CNOT") operation CNOT (control : Qubit, target : Qubit) : Unit is Adj + Ctl { body (...) { Controlled X([control], target); diff --git a/src/Simulation/TargetDefinitions/Decompositions/ExpFrac.qs b/src/Simulation/TargetDefinitions/Decompositions/ExpFrac.qs index 5a0bdeeb3f1..e891e67c7f3 100644 --- a/src/Simulation/TargetDefinitions/Decompositions/ExpFrac.qs +++ b/src/Simulation/TargetDefinitions/Decompositions/ExpFrac.qs @@ -4,7 +4,6 @@ namespace Microsoft.Quantum.Intrinsic { open Microsoft.Quantum.Math; open Microsoft.Quantum.Convert; - open Microsoft.Quantum.Diagnostics; /// # Summary /// Applies the exponential of a multi-qubit Pauli operator @@ -29,7 +28,6 @@ namespace Microsoft.Quantum.Intrinsic { /// the qubit register is to be rotated. /// ## qubits /// Register to apply the given rotation to. - @EnableTestingViaName("Test.TargetDefinitions.ExpFrac") operation ExpFrac (paulis : Pauli[], numerator : Int, power : Int, qubits : Qubit[]) : Unit is Adj + Ctl { // Note that power must be converted to a double and used with 2.0 instead of 2 to allow for // negative exponents that result in a fractional denominator. diff --git a/src/Simulation/TargetDefinitions/Decompositions/ExpFracFromExpUtil.qs b/src/Simulation/TargetDefinitions/Decompositions/ExpFracFromExpUtil.qs index 226630ffb79..10f986aac12 100644 --- a/src/Simulation/TargetDefinitions/Decompositions/ExpFracFromExpUtil.qs +++ b/src/Simulation/TargetDefinitions/Decompositions/ExpFracFromExpUtil.qs @@ -4,7 +4,6 @@ namespace Microsoft.Quantum.Intrinsic { open Microsoft.Quantum.Math; open Microsoft.Quantum.Convert; - open Microsoft.Quantum.Diagnostics; /// # Summary /// Applies the exponential of a multi-qubit Pauli operator @@ -29,7 +28,6 @@ namespace Microsoft.Quantum.Intrinsic { /// the qubit register is to be rotated. /// ## qubits /// Register to apply the given rotation to. - @EnableTestingViaName("Test.TargetDefinitions.ExpFrac") operation ExpFrac (paulis : Pauli[], numerator : Int, power : Int, qubits : Qubit[]) : Unit is Adj + Ctl { body (...) { if (Length(paulis) != Length(qubits)) { fail "Arrays 'pauli' and 'target' must have the same length"; } diff --git a/src/Simulation/TargetDefinitions/Decompositions/ExpFromExpUtil.qs b/src/Simulation/TargetDefinitions/Decompositions/ExpFromExpUtil.qs index 6bd54f9a036..74118c24c5d 100644 --- a/src/Simulation/TargetDefinitions/Decompositions/ExpFromExpUtil.qs +++ b/src/Simulation/TargetDefinitions/Decompositions/ExpFromExpUtil.qs @@ -2,7 +2,6 @@ // Licensed under the MIT License. namespace Microsoft.Quantum.Intrinsic { - open Microsoft.Quantum.Diagnostics; /// # Summary /// Applies the exponential of a multi-qubit Pauli operator. @@ -23,7 +22,6 @@ namespace Microsoft.Quantum.Intrinsic { /// target register is to be rotated. /// ## qubits /// Register to apply the given rotation to. - @EnableTestingViaName("Test.TargetDefinitions.Exp") operation Exp (paulis : Pauli[], theta : Double, qubits : Qubit[]) : Unit is Adj + Ctl { body (...) { if (Length(paulis) != Length(qubits)) { fail "Arrays 'pauli' and 'qubits' must have the same length"; } diff --git a/src/Simulation/TargetDefinitions/Decompositions/ExpUtil.qs b/src/Simulation/TargetDefinitions/Decompositions/ExpUtil.qs index 5131ac27048..2b25fdb2ea4 100644 --- a/src/Simulation/TargetDefinitions/Decompositions/ExpUtil.qs +++ b/src/Simulation/TargetDefinitions/Decompositions/ExpUtil.qs @@ -2,9 +2,7 @@ // Licensed under the MIT License. namespace Microsoft.Quantum.Intrinsic { - open Microsoft.Quantum.Diagnostics; - @EnableTestingViaName("Test.TargetDefinitions.ExpUtil") internal operation ExpUtil (paulis : Pauli[], theta : Double, qubits : Qubit[], rotation : ((Pauli, Qubit) => Unit is Adj + Ctl)) : Unit is Ctl { if (Length(paulis) != Length(qubits)) { fail "Arrays 'paulis' and 'qubits' must have the same length"; } if (Length(paulis) == 1) { @@ -12,7 +10,7 @@ namespace Microsoft.Quantum.Intrinsic { } else { // Length(paulis) > 1 within { - for (i in 0 .. Length(paulis) - 1) { + for i in 0 .. Length(paulis) - 1 { MapPauli(qubits[i], PauliZ, paulis[i]); } } diff --git a/src/Simulation/TargetDefinitions/Decompositions/ExpUtilFromIsing.qs b/src/Simulation/TargetDefinitions/Decompositions/ExpUtilFromIsing.qs index b1bbc9d0dc4..ebc50d9aca4 100644 --- a/src/Simulation/TargetDefinitions/Decompositions/ExpUtilFromIsing.qs +++ b/src/Simulation/TargetDefinitions/Decompositions/ExpUtilFromIsing.qs @@ -2,9 +2,7 @@ // Licensed under the MIT License. namespace Microsoft.Quantum.Intrinsic { - open Microsoft.Quantum.Diagnostics; - @EnableTestingViaName("Test.TargetDefinitions.ExpUtil") internal operation ExpUtil (paulis : Pauli[], theta : Double, qubits : Qubit[], rotation : ((Pauli, Qubit) => Unit is Adj + Ctl)) : Unit is Ctl { if (Length(paulis) != Length(qubits)) { fail "Arrays 'paulis' and 'qubits' must have the same length"; } if (Length(paulis) == 1) { @@ -28,7 +26,7 @@ namespace Microsoft.Quantum.Intrinsic { } else { // Length(paulis) > 2 within { - for (i in 0 .. Length(paulis) - 1) { + for i in 0 .. Length(paulis) - 1 { MapPauli(qubits[i], PauliZ, paulis[i]); } } diff --git a/src/Simulation/TargetDefinitions/Decompositions/HFromSinglyControlled.qs b/src/Simulation/TargetDefinitions/Decompositions/HFromSinglyControlled.qs index ff638cc18d9..f1c18932cc9 100644 --- a/src/Simulation/TargetDefinitions/Decompositions/HFromSinglyControlled.qs +++ b/src/Simulation/TargetDefinitions/Decompositions/HFromSinglyControlled.qs @@ -2,7 +2,6 @@ // Licensed under the MIT License. namespace Microsoft.Quantum.Intrinsic { - open Microsoft.Quantum.Diagnostics; /// # Summary /// Applies the Hadamard transformation to a single qubit. @@ -20,7 +19,6 @@ namespace Microsoft.Quantum.Intrinsic { /// # Input /// ## qubit /// Qubit to which the gate should be applied. - @EnableTestingViaName("Test.TargetDefinitions.H") operation H (qubit : Qubit) : Unit is Adj + Ctl { body (...) { ApplyUncontrolledH(qubit); diff --git a/src/Simulation/TargetDefinitions/Decompositions/I.qs b/src/Simulation/TargetDefinitions/Decompositions/I.qs index 39ddebc3797..debdfc92c68 100644 --- a/src/Simulation/TargetDefinitions/Decompositions/I.qs +++ b/src/Simulation/TargetDefinitions/Decompositions/I.qs @@ -2,7 +2,6 @@ // Licensed under the MIT License. namespace Microsoft.Quantum.Intrinsic { - open Microsoft.Quantum.Diagnostics; /// # Summary /// Performs the identity operation (no-op) on a single qubit. @@ -10,7 +9,6 @@ namespace Microsoft.Quantum.Intrinsic { /// # Remarks /// This is a no-op. It is provided for completeness and because /// sometimes it is useful to call the identity in an algorithm or to pass it as a parameter. - @EnableTestingViaName("Test.TargetDefinitions.I") operation I (target : Qubit) : Unit is Adj + Ctl { body (...) { } adjoint self; diff --git a/src/Simulation/TargetDefinitions/Decompositions/IsingXX.qs b/src/Simulation/TargetDefinitions/Decompositions/IsingXX.qs deleted file mode 100644 index 2c4b93c4ad1..00000000000 --- a/src/Simulation/TargetDefinitions/Decompositions/IsingXX.qs +++ /dev/null @@ -1,32 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -namespace Microsoft.Quantum.Intrinsic { - open Microsoft.Quantum.Diagnostics; - - /// # Summary - /// Applies the two qubit Ising $XX$ rotation gate. - /// - /// # Description - /// \begin{align} - /// XX(\theta) \mathrel{:=} - /// \begin{bmatrix} - /// \cos \theta & 0 & 0 & -i\sin \theta \\\\ - /// 0 & \cos \theta & -i\sin \theta & 0 \\\\ - /// 0 & -i\sin \theta & \cos \theta & 0 \\\\ - /// -i\sin \theta & 0 & 0 & \cos \theta - /// \end{bmatrix}. - /// \end{align} - /// - /// # Input - /// ## theta - /// The angle about which the qubits are rotated. - /// ## qubit0 - /// The first qubit input to the gate. - /// ## qubit1 - /// The second qubit input to the gate. - @EnableTestingViaName("Test.TargetDefinitions.IsingXX") - internal operation IsingXX (theta : Double, qubit0 : Qubit, qubit1 : Qubit) : Unit is Adj + Ctl { - Exp([PauliX, PauliX], theta * 2.0, [qubit0, qubit1]); - } -} \ No newline at end of file diff --git a/src/Simulation/TargetDefinitions/Decompositions/IsingYY.qs b/src/Simulation/TargetDefinitions/Decompositions/IsingYY.qs deleted file mode 100644 index 51d3a76c929..00000000000 --- a/src/Simulation/TargetDefinitions/Decompositions/IsingYY.qs +++ /dev/null @@ -1,32 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -namespace Microsoft.Quantum.Intrinsic { - open Microsoft.Quantum.Diagnostics; - - /// # Summary - /// Applies the two qubit Ising $YY$ rotation gate. - /// - /// # Description - /// \begin{align} - /// YY(\theta) \mathrel{:=} - /// \begin{bmatrix} - /// \cos \theta & 0 & 0 & i\sin \theta \\\\ - /// 0 & \cos \theta & -i\sin \theta & 0 \\\\ - /// 0 & -i\sin \theta & \cos \theta & 0 \\\\ - /// i\sin \theta & 0 & 0 & \cos \theta - /// \end{bmatrix}. - /// \end{align} - /// - /// # Input - /// ## theta - /// The angle about which the qubits are rotated. - /// ## qubit0 - /// The first qubit input to the gate. - /// ## qubit1 - /// The second qubit input to the gate. - @EnableTestingViaName("Test.TargetDefinitions.IsingYY") - internal operation IsingYY (theta : Double, qubit0 : Qubit, qubit1 : Qubit) : Unit is Adj + Ctl { - Exp([PauliY, PauliY], theta * 2.0, [qubit0, qubit1]); - } -} \ No newline at end of file diff --git a/src/Simulation/TargetDefinitions/Decompositions/IsingZZ.qs b/src/Simulation/TargetDefinitions/Decompositions/IsingZZ.qs deleted file mode 100644 index 16649bcffd6..00000000000 --- a/src/Simulation/TargetDefinitions/Decompositions/IsingZZ.qs +++ /dev/null @@ -1,32 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -namespace Microsoft.Quantum.Intrinsic { - open Microsoft.Quantum.Diagnostics; - - /// # Summary - /// Applies the two qubit Ising $ZZ$ rotation gate. - /// - /// # Description - /// \begin{align} - /// ZZ(\theta) \mathrel{:=} - /// \begin{bmatrix} - /// e^{-i \theta / 2} & 0 & 0 & 0 \\\\ - /// 0 & e^{-i \theta / 2} & 0 & 0 \\\\ - /// 0 & 0 & e^{-i \theta / 2} & 0 \\\\ - /// 0 & 0 & 0 & e^{i \theta / 2} - /// \end{bmatrix}. - /// \end{align} - /// - /// # Input - /// ## theta - /// The angle about which the qubits are rotated. - /// ## qubit0 - /// The first qubit input to the gate. - /// ## qubit1 - /// The second qubit input to the gate. - @EnableTestingViaName("Test.TargetDefinitions.IsingZZ") - internal operation IsingZZ (theta : Double, qubit0 : Qubit, qubit1 : Qubit) : Unit is Adj + Ctl { - Exp([PauliZ, PauliZ], theta * 2.0, [qubit0, qubit1]); - } -} \ No newline at end of file diff --git a/src/Simulation/TargetDefinitions/Decompositions/M.qs b/src/Simulation/TargetDefinitions/Decompositions/M.qs index a8fb33939a2..77f43700d34 100644 --- a/src/Simulation/TargetDefinitions/Decompositions/M.qs +++ b/src/Simulation/TargetDefinitions/Decompositions/M.qs @@ -2,7 +2,6 @@ // Licensed under the MIT License. namespace Microsoft.Quantum.Intrinsic { - open Microsoft.Quantum.Diagnostics; /// # Summary /// Performs a measurement of a single qubit in the @@ -29,7 +28,6 @@ namespace Microsoft.Quantum.Intrinsic { /// ```qsharp /// Measure([PauliZ], [qubit]); /// ``` - @EnableTestingViaName("Test.TargetDefinitions.M") operation M (qubit : Qubit) : Result { return Measure([PauliZ], [qubit]); } diff --git a/src/Simulation/TargetDefinitions/Decompositions/MResetX.qs b/src/Simulation/TargetDefinitions/Decompositions/MResetX.qs index 183fbbfd0e3..13a09519900 100644 --- a/src/Simulation/TargetDefinitions/Decompositions/MResetX.qs +++ b/src/Simulation/TargetDefinitions/Decompositions/MResetX.qs @@ -3,7 +3,6 @@ namespace Microsoft.Quantum.Measurement { open Microsoft.Quantum.Intrinsic; - open Microsoft.Quantum.Diagnostics; open Microsoft.Quantum.Targeting; /// # Summary @@ -26,7 +25,6 @@ namespace Microsoft.Quantum.Measurement { "BasicQuantumFunctionality", "MResetX is replaced by a supported implementation on all execution targets." ) - @EnableTestingViaName("Test.TargetDefinitions.MResetX") operation MResetX (target : Qubit) : Result { let result = Measure([PauliX], [target]); diff --git a/src/Simulation/TargetDefinitions/Decompositions/MResetXExplicit.qs b/src/Simulation/TargetDefinitions/Decompositions/MResetXExplicit.qs index d853dc9e8e5..0254cd0fc6e 100644 --- a/src/Simulation/TargetDefinitions/Decompositions/MResetXExplicit.qs +++ b/src/Simulation/TargetDefinitions/Decompositions/MResetXExplicit.qs @@ -3,7 +3,6 @@ namespace Microsoft.Quantum.Measurement { open Microsoft.Quantum.Intrinsic; - open Microsoft.Quantum.Diagnostics; /// # Summary /// Measures a single qubit in the X basis, @@ -21,7 +20,6 @@ namespace Microsoft.Quantum.Measurement { /// /// # Output /// The result of measuring `target` in the Pauli $X$ basis. - @EnableTestingViaName("Test.TargetDefinitions.MResetX") operation MResetX (target : Qubit) : Result { MapPauli(target, PauliZ, PauliX); let result = M(target); diff --git a/src/Simulation/TargetDefinitions/Decompositions/MResetXWithNoReuse.qs b/src/Simulation/TargetDefinitions/Decompositions/MResetXWithNoReuse.qs index 0db2613018d..b83ed66273b 100644 --- a/src/Simulation/TargetDefinitions/Decompositions/MResetXWithNoReuse.qs +++ b/src/Simulation/TargetDefinitions/Decompositions/MResetXWithNoReuse.qs @@ -3,7 +3,6 @@ namespace Microsoft.Quantum.Measurement { open Microsoft.Quantum.Intrinsic; - open Microsoft.Quantum.Diagnostics; /// # Summary /// Measures a single qubit in the X basis, @@ -21,7 +20,6 @@ namespace Microsoft.Quantum.Measurement { /// /// # Output /// The result of measuring `target` in the Pauli $X$ basis. - @EnableTestingViaName("Test.TargetDefinitions.MResetX") operation MResetX (target : Qubit) : Result { // Because the qubit cannot be reused after measurement, no actual // reset is required. diff --git a/src/Simulation/TargetDefinitions/Decompositions/MResetY.qs b/src/Simulation/TargetDefinitions/Decompositions/MResetY.qs index 290b1fb8352..80beea433fb 100644 --- a/src/Simulation/TargetDefinitions/Decompositions/MResetY.qs +++ b/src/Simulation/TargetDefinitions/Decompositions/MResetY.qs @@ -3,7 +3,6 @@ namespace Microsoft.Quantum.Measurement { open Microsoft.Quantum.Intrinsic; - open Microsoft.Quantum.Diagnostics; open Microsoft.Quantum.Targeting; /// # Summary @@ -26,7 +25,6 @@ namespace Microsoft.Quantum.Measurement { "BasicQuantumFunctionality", "MResetY is replaced by a supported implementation on all execution targets." ) - @EnableTestingViaName("Test.TargetDefinitions.MResetY") operation MResetY (target : Qubit) : Result { let result = Measure([PauliY], [target]); diff --git a/src/Simulation/TargetDefinitions/Decompositions/MResetYExplicit.qs b/src/Simulation/TargetDefinitions/Decompositions/MResetYExplicit.qs index 1bb586cfb8e..940b959b726 100644 --- a/src/Simulation/TargetDefinitions/Decompositions/MResetYExplicit.qs +++ b/src/Simulation/TargetDefinitions/Decompositions/MResetYExplicit.qs @@ -3,7 +3,6 @@ namespace Microsoft.Quantum.Measurement { open Microsoft.Quantum.Intrinsic; - open Microsoft.Quantum.Diagnostics; /// # Summary /// Measures a single qubit in the Y basis, @@ -21,7 +20,6 @@ namespace Microsoft.Quantum.Measurement { /// /// # Output /// The result of measuring `target` in the Pauli $Y$ basis. - @EnableTestingViaName("Test.TargetDefinitions.MResetY") operation MResetY (target : Qubit) : Result { MapPauli(target, PauliZ, PauliY); let result = M(target); diff --git a/src/Simulation/TargetDefinitions/Decompositions/MResetYWithNoReuse.qs b/src/Simulation/TargetDefinitions/Decompositions/MResetYWithNoReuse.qs index a2337edb403..20a68832277 100644 --- a/src/Simulation/TargetDefinitions/Decompositions/MResetYWithNoReuse.qs +++ b/src/Simulation/TargetDefinitions/Decompositions/MResetYWithNoReuse.qs @@ -3,7 +3,6 @@ namespace Microsoft.Quantum.Measurement { open Microsoft.Quantum.Intrinsic; - open Microsoft.Quantum.Diagnostics; /// # Summary /// Measures a single qubit in the Y basis, @@ -21,7 +20,6 @@ namespace Microsoft.Quantum.Measurement { /// /// # Output /// The result of measuring `target` in the Pauli $Y$ basis. - @EnableTestingViaName("Test.TargetDefinitions.MResetY") operation MResetY (target : Qubit) : Result { // Because the qubit cannot be reused after measurement, no actual // reset is required. diff --git a/src/Simulation/TargetDefinitions/Decompositions/MResetZ.qs b/src/Simulation/TargetDefinitions/Decompositions/MResetZ.qs index 8bb4a8849ec..34db679ded6 100644 --- a/src/Simulation/TargetDefinitions/Decompositions/MResetZ.qs +++ b/src/Simulation/TargetDefinitions/Decompositions/MResetZ.qs @@ -3,7 +3,6 @@ namespace Microsoft.Quantum.Measurement { open Microsoft.Quantum.Intrinsic; - open Microsoft.Quantum.Diagnostics; open Microsoft.Quantum.Targeting; /// # Summary @@ -26,7 +25,6 @@ namespace Microsoft.Quantum.Measurement { "BasicQuantumFunctionality", "MResetZ is replaced by a supported implementation on all execution targets." ) - @EnableTestingViaName("Test.TargetDefinitions.MResetZ") operation MResetZ (target : Qubit) : Result { let result = M(target); diff --git a/src/Simulation/TargetDefinitions/Decompositions/MResetZExplicit.qs b/src/Simulation/TargetDefinitions/Decompositions/MResetZExplicit.qs index 53dac30786f..00fafd690db 100644 --- a/src/Simulation/TargetDefinitions/Decompositions/MResetZExplicit.qs +++ b/src/Simulation/TargetDefinitions/Decompositions/MResetZExplicit.qs @@ -3,7 +3,6 @@ namespace Microsoft.Quantum.Measurement { open Microsoft.Quantum.Intrinsic; - open Microsoft.Quantum.Diagnostics; /// # Summary /// Measures a single qubit in the Z basis, @@ -21,7 +20,6 @@ namespace Microsoft.Quantum.Measurement { /// /// # Output /// The result of measuring `target` in the Pauli $Z$ basis. - @EnableTestingViaName("Test.TargetDefinitions.MResetZ") operation MResetZ (target : Qubit) : Result { let result = M(target); Reset(target); diff --git a/src/Simulation/TargetDefinitions/Decompositions/MResetZWithNoReuse.qs b/src/Simulation/TargetDefinitions/Decompositions/MResetZWithNoReuse.qs index a18daeb426b..2f0b3a56e24 100644 --- a/src/Simulation/TargetDefinitions/Decompositions/MResetZWithNoReuse.qs +++ b/src/Simulation/TargetDefinitions/Decompositions/MResetZWithNoReuse.qs @@ -3,7 +3,6 @@ namespace Microsoft.Quantum.Measurement { open Microsoft.Quantum.Intrinsic; - open Microsoft.Quantum.Diagnostics; /// # Summary /// Measures a single qubit in the Z basis, @@ -21,7 +20,6 @@ namespace Microsoft.Quantum.Measurement { /// /// # Output /// The result of measuring `target` in the Pauli $Z$ basis. - @EnableTestingViaName("Test.TargetDefinitions.MResetZ") operation MResetZ (target : Qubit) : Result { // Because the qubit cannot be reused after measurement, no actual // reset is required. diff --git a/src/Simulation/TargetDefinitions/Decompositions/Measure.qs b/src/Simulation/TargetDefinitions/Decompositions/Measure.qs index 06f83119c47..bee6db6510b 100644 --- a/src/Simulation/TargetDefinitions/Decompositions/Measure.qs +++ b/src/Simulation/TargetDefinitions/Decompositions/Measure.qs @@ -2,7 +2,6 @@ // Licensed under the MIT License. namespace Microsoft.Quantum.Intrinsic { - open Microsoft.Quantum.Diagnostics; /// # Summary /// Performs a joint measurement of one or more qubits in the @@ -39,7 +38,6 @@ namespace Microsoft.Quantum.Intrinsic { /// # Remarks /// If the basis array and qubit array are different lengths, then the /// operation will fail. - @EnableTestingViaName("Test.TargetDefinitions.Measure") operation Measure (bases : Pauli[], qubits : Qubit[]) : Result { if (Length(bases) != Length(qubits)) { fail "Arrays 'bases' and 'qubits' must be of the same length."; } mutable res = One; @@ -53,9 +51,9 @@ namespace Microsoft.Quantum.Intrinsic { } } else { - using( q = Qubit() ) { + use q = Qubit() { H(q); - for( k in 0 .. Length(bases) - 1 ) { + for k in 0 .. Length(bases) - 1 { if( bases[k] == PauliX ) { Controlled X ([qubits[k]], q); } if( bases[k] == PauliZ ) { Controlled Z ([qubits[k]], q); } if( bases[k] == PauliY ) { Controlled Y ([qubits[k]], q); } diff --git a/src/Simulation/TargetDefinitions/Decompositions/MeasureWithNoReuse.qs b/src/Simulation/TargetDefinitions/Decompositions/MeasureWithNoReuse.qs index 1bac07a8550..19d2f6c1c53 100644 --- a/src/Simulation/TargetDefinitions/Decompositions/MeasureWithNoReuse.qs +++ b/src/Simulation/TargetDefinitions/Decompositions/MeasureWithNoReuse.qs @@ -2,7 +2,6 @@ // Licensed under the MIT License. namespace Microsoft.Quantum.Intrinsic { - open Microsoft.Quantum.Diagnostics; /// # Summary /// Performs a joint measurement of one or more qubits in the @@ -39,7 +38,6 @@ namespace Microsoft.Quantum.Intrinsic { /// # Remarks /// If the basis array and qubit array are different lengths, then the /// operation will fail. - @EnableTestingViaName("Test.TargetDefinitions.Measure") operation Measure (bases : Pauli[], qubits : Qubit[]) : Result { if (Length(bases) != Length(qubits)) { fail "Arrays 'bases' and 'qubits' must be of the same length."; } if (Length(bases) == 1) { @@ -49,12 +47,12 @@ namespace Microsoft.Quantum.Intrinsic { return M(qubits[0]); } else { - using (q = Qubit()) { + use q = Qubit() { within { H(q); } apply { - for (k in 0 .. Length(bases) - 1) { + for k in 0 .. Length(bases) - 1 { if (bases[k] == PauliX) { Controlled X([qubits[k]], q); } if (bases[k] == PauliZ) { Controlled Z([qubits[k]], q); } if (bases[k] == PauliY) { Controlled Y([qubits[k]], q); } diff --git a/src/Simulation/TargetDefinitions/Decompositions/PreparePostM.qs b/src/Simulation/TargetDefinitions/Decompositions/PreparePostM.qs index 680d9d74faf..bfdd2c314b1 100644 --- a/src/Simulation/TargetDefinitions/Decompositions/PreparePostM.qs +++ b/src/Simulation/TargetDefinitions/Decompositions/PreparePostM.qs @@ -2,9 +2,7 @@ // Licensed under the MIT License. namespace Microsoft.Quantum.Intrinsic { - open Microsoft.Quantum.Diagnostics; - @EnableTestingViaName("Test.TargetDefinitions.PreparePostM") internal operation PreparePostM(result : Result, qubit : Qubit) : Unit { // This platform requires reset after measurement, and then must // re-prepare the measured state in the qubit. diff --git a/src/Simulation/TargetDefinitions/Decompositions/PreparePostMNoop.qs b/src/Simulation/TargetDefinitions/Decompositions/PreparePostMNoop.qs index fb5dc304282..dfd4afd5cc1 100644 --- a/src/Simulation/TargetDefinitions/Decompositions/PreparePostMNoop.qs +++ b/src/Simulation/TargetDefinitions/Decompositions/PreparePostMNoop.qs @@ -2,9 +2,7 @@ // Licensed under the MIT License. namespace Microsoft.Quantum.Intrinsic { - open Microsoft.Quantum.Diagnostics; - @EnableTestingViaName("Test.TargetDefinitions.PreparePostM") internal operation PreparePostM(result : Result, qubit : Qubit) : Unit { // This platform does not require any post-measurement reset, so // no additional work is needed. diff --git a/src/Simulation/TargetDefinitions/Decompositions/R.qs b/src/Simulation/TargetDefinitions/Decompositions/R.qs index bd3f376d6d4..6a8572b92d2 100644 --- a/src/Simulation/TargetDefinitions/Decompositions/R.qs +++ b/src/Simulation/TargetDefinitions/Decompositions/R.qs @@ -2,7 +2,6 @@ // Licensed under the MIT License. namespace Microsoft.Quantum.Intrinsic { - open Microsoft.Quantum.Diagnostics; /// # Summary /// Applies a rotation about the given Pauli axis. @@ -26,7 +25,6 @@ namespace Microsoft.Quantum.Intrinsic { /// When called with `pauli = PauliI`, this operation applies /// a *global phase*. This phase can be significant /// when used with the `Controlled` functor. - @EnableTestingViaName("Test.TargetDefinitions.R") operation R (pauli : Pauli, theta : Double, qubit : Qubit) : Unit is Adj + Ctl { if (pauli == PauliX) { Rx(theta, qubit); diff --git a/src/Simulation/TargetDefinitions/Decompositions/R1.qs b/src/Simulation/TargetDefinitions/Decompositions/R1.qs index 02ed51439bd..87c88d9f4dc 100644 --- a/src/Simulation/TargetDefinitions/Decompositions/R1.qs +++ b/src/Simulation/TargetDefinitions/Decompositions/R1.qs @@ -2,7 +2,6 @@ // Licensed under the MIT License. namespace Microsoft.Quantum.Intrinsic { - open Microsoft.Quantum.Diagnostics; /// # Summary /// Applies a rotation about the $\ket{1}$ state by a given angle. @@ -25,7 +24,6 @@ namespace Microsoft.Quantum.Intrinsic { /// R(PauliZ, theta, qubit); /// R(PauliI, -theta, qubit); /// ``` - @EnableTestingViaName("Test.TargetDefinitions.R1") operation R1 (theta : Double, qubit : Qubit) : Unit is Adj + Ctl { R(PauliZ, theta, qubit); R(PauliI, -theta, qubit); diff --git a/src/Simulation/TargetDefinitions/Decompositions/R1Frac.qs b/src/Simulation/TargetDefinitions/Decompositions/R1Frac.qs index 056e0ba9e54..8cd29fb0fba 100644 --- a/src/Simulation/TargetDefinitions/Decompositions/R1Frac.qs +++ b/src/Simulation/TargetDefinitions/Decompositions/R1Frac.qs @@ -2,7 +2,6 @@ // Licensed under the MIT License. namespace Microsoft.Quantum.Intrinsic { - open Microsoft.Quantum.Diagnostics; /// # Summary /// Applies a rotation about the $\ket{1}$ state by an angle specified @@ -35,7 +34,6 @@ namespace Microsoft.Quantum.Intrinsic { /// RFrac(PauliZ, -numerator, denominator + 1, qubit); /// RFrac(PauliI, numerator, denominator + 1, qubit); /// ``` - @EnableTestingViaName("Test.TargetDefinitions.R1Frac") operation R1Frac (numerator : Int, power : Int, qubit : Qubit) : Unit is Adj + Ctl { RFrac(PauliZ, -numerator, power + 1, qubit); RFrac(PauliI, numerator, power + 1, qubit); diff --git a/src/Simulation/TargetDefinitions/Decompositions/RFrac.qs b/src/Simulation/TargetDefinitions/Decompositions/RFrac.qs index 2c1e1b7be37..e99a14bf512 100644 --- a/src/Simulation/TargetDefinitions/Decompositions/RFrac.qs +++ b/src/Simulation/TargetDefinitions/Decompositions/RFrac.qs @@ -4,7 +4,6 @@ namespace Microsoft.Quantum.Intrinsic { open Microsoft.Quantum.Math; open Microsoft.Quantum.Convert; - open Microsoft.Quantum.Diagnostics; /// # Summary /// Applies a rotation about the given Pauli axis by an angle specified @@ -39,7 +38,6 @@ namespace Microsoft.Quantum.Intrinsic { /// // PI() is a Q# function that returns an approximation of π. /// R(pauli, -PI() * IntAsDouble(numerator) / IntAsDouble(2 ^ (power - 1)), qubit); /// ``` - @EnableTestingViaName("Test.TargetDefinitions.RFrac") operation RFrac (pauli : Pauli, numerator : Int, power : Int, qubit : Qubit) : Unit is Adj + Ctl { // Note that power must be converted to a double and used with 2.0 instead of 2 to allow for // negative exponents that result in a fractional denominator. diff --git a/src/Simulation/TargetDefinitions/Decompositions/Reset.qs b/src/Simulation/TargetDefinitions/Decompositions/Reset.qs index 651ba46e2fa..b6a06a5761b 100644 --- a/src/Simulation/TargetDefinitions/Decompositions/Reset.qs +++ b/src/Simulation/TargetDefinitions/Decompositions/Reset.qs @@ -16,7 +16,6 @@ namespace Microsoft.Quantum.Intrinsic { "BasicQuantumFunctionality", "Reset is replaced by a supported implementation on all execution targets." ) - @EnableTestingViaName("Test.TargetDefinitions.Reset") operation Reset (qubit : Qubit) : Unit { if (M(qubit) == One) { X(qubit); diff --git a/src/Simulation/TargetDefinitions/Decompositions/ResetAll.qs b/src/Simulation/TargetDefinitions/Decompositions/ResetAll.qs index 6a520c85bb7..9daa8d3c1d5 100644 --- a/src/Simulation/TargetDefinitions/Decompositions/ResetAll.qs +++ b/src/Simulation/TargetDefinitions/Decompositions/ResetAll.qs @@ -2,7 +2,6 @@ // Licensed under the MIT License. namespace Microsoft.Quantum.Intrinsic { - open Microsoft.Quantum.Diagnostics; /// # Summary /// Given an array of qubits, measure them and ensure they are in the |0⟩ state @@ -11,9 +10,8 @@ namespace Microsoft.Quantum.Intrinsic { /// # Input /// ## qubits /// An array of qubits whose states are to be reset to $\ket{0}$. - @EnableTestingViaName("Test.TargetDefinitions.ResetAll") operation ResetAll (qubits : Qubit[]) : Unit { - for (qubit in qubits) { + for qubit in qubits { Reset(qubit); } } diff --git a/src/Simulation/TargetDefinitions/Decompositions/ResetWithoutReuse.qs b/src/Simulation/TargetDefinitions/Decompositions/ResetWithoutReuse.qs index ef28b037cf2..f301194b99b 100644 --- a/src/Simulation/TargetDefinitions/Decompositions/ResetWithoutReuse.qs +++ b/src/Simulation/TargetDefinitions/Decompositions/ResetWithoutReuse.qs @@ -2,7 +2,6 @@ // Licensed under the MIT License. namespace Microsoft.Quantum.Intrinsic { - open Microsoft.Quantum.Diagnostics; /// # Summary /// Given a single qubit, measures it and ensures it is in the |0⟩ state @@ -11,7 +10,6 @@ namespace Microsoft.Quantum.Intrinsic { /// # Input /// ## qubit /// The qubit whose state is to be reset to $\ket{0}$. - @EnableTestingViaName("Test.TargetDefinitions.Reset") operation Reset (qubit : Qubit) : Unit { // This platform doesn't support use of a qubit after measurement, so // `Reset` is really just marking the qubit as measured. diff --git a/src/Simulation/TargetDefinitions/Decompositions/Rx.qs b/src/Simulation/TargetDefinitions/Decompositions/Rx.qs index e7cf9c8be1c..7c13b9d3715 100644 --- a/src/Simulation/TargetDefinitions/Decompositions/Rx.qs +++ b/src/Simulation/TargetDefinitions/Decompositions/Rx.qs @@ -2,7 +2,6 @@ // Licensed under the MIT License. namespace Microsoft.Quantum.Intrinsic { - open Microsoft.Quantum.Diagnostics; /// # Summary /// Applies a rotation about the $x$-axis by a given angle. @@ -28,7 +27,6 @@ namespace Microsoft.Quantum.Intrinsic { /// ```qsharp /// R(PauliX, theta, qubit); /// ``` - @EnableTestingViaName("Test.TargetDefinitions.Rx") operation Rx (theta : Double, qubit : Qubit) : Unit is Adj + Ctl { body (...) { R(PauliX, theta, qubit); diff --git a/src/Simulation/TargetDefinitions/Decompositions/RxFromSinglyControlled.qs b/src/Simulation/TargetDefinitions/Decompositions/RxFromSinglyControlled.qs index e964eb00297..bcf07a180dd 100644 --- a/src/Simulation/TargetDefinitions/Decompositions/RxFromSinglyControlled.qs +++ b/src/Simulation/TargetDefinitions/Decompositions/RxFromSinglyControlled.qs @@ -2,7 +2,6 @@ // Licensed under the MIT License. namespace Microsoft.Quantum.Intrinsic { - open Microsoft.Quantum.Diagnostics; /// # SummaRx /// Applies a rotation about the $x$-axis by a given angle. @@ -28,7 +27,6 @@ namespace Microsoft.Quantum.Intrinsic { /// ```qsharp /// R(PauliX, theta, qubit); /// ``` - @EnableTestingViaName("Test.TargetDefinitions.Rx") operation Rx (theta : Double, qubit : Qubit) : Unit is Adj + Ctl { body (...) { ApplyUncontrolledRx(theta, qubit); diff --git a/src/Simulation/TargetDefinitions/Decompositions/Ry.qs b/src/Simulation/TargetDefinitions/Decompositions/Ry.qs index ac1cb18dcfa..06c3c0bfa92 100644 --- a/src/Simulation/TargetDefinitions/Decompositions/Ry.qs +++ b/src/Simulation/TargetDefinitions/Decompositions/Ry.qs @@ -2,7 +2,6 @@ // Licensed under the MIT License. namespace Microsoft.Quantum.Intrinsic { - open Microsoft.Quantum.Diagnostics; /// # Summary /// Applies a rotation about the $y$-axis by a given angle. @@ -28,7 +27,6 @@ namespace Microsoft.Quantum.Intrinsic { /// ```qsharp /// R(PauliY, theta, qubit); /// ``` - @EnableTestingViaName("Test.TargetDefinitions.Ry") operation Ry (theta : Double, qubit : Qubit) : Unit is Adj + Ctl { body (...) { R(PauliY, theta, qubit); diff --git a/src/Simulation/TargetDefinitions/Decompositions/RyFromSinglyControlled.qs b/src/Simulation/TargetDefinitions/Decompositions/RyFromSinglyControlled.qs index 5b6d46032b0..93b0f404e12 100644 --- a/src/Simulation/TargetDefinitions/Decompositions/RyFromSinglyControlled.qs +++ b/src/Simulation/TargetDefinitions/Decompositions/RyFromSinglyControlled.qs @@ -2,7 +2,6 @@ // Licensed under the MIT License. namespace Microsoft.Quantum.Intrinsic { - open Microsoft.Quantum.Diagnostics; /// # Summary /// Applies a rotation about the $y$-axis by a given angle. @@ -28,7 +27,6 @@ namespace Microsoft.Quantum.Intrinsic { /// ```qsharp /// R(PauliY, theta, qubit); /// ``` - @EnableTestingViaName("Test.TargetDefinitions.Ry") operation Ry (theta : Double, qubit : Qubit) : Unit is Adj + Ctl { body (...) { ApplyUncontrolledRy(theta, qubit); diff --git a/src/Simulation/TargetDefinitions/Decompositions/Rz.qs b/src/Simulation/TargetDefinitions/Decompositions/Rz.qs index 08d0e2b239b..8aadfb3859b 100644 --- a/src/Simulation/TargetDefinitions/Decompositions/Rz.qs +++ b/src/Simulation/TargetDefinitions/Decompositions/Rz.qs @@ -2,7 +2,6 @@ // Licensed under the MIT License. namespace Microsoft.Quantum.Intrinsic { - open Microsoft.Quantum.Diagnostics; /// # Summary /// Applies a rotation about the $z$-axis by a given angle. @@ -28,7 +27,6 @@ namespace Microsoft.Quantum.Intrinsic { /// ```qsharp /// R(PauliZ, theta, qubit); /// ``` - @EnableTestingViaName("Test.TargetDefinitions.Rz") operation Rz (theta : Double, qubit : Qubit) : Unit is Adj + Ctl { body (...) { R(PauliZ, theta, qubit); diff --git a/src/Simulation/TargetDefinitions/Decompositions/RzFromSinglyControlled.qs b/src/Simulation/TargetDefinitions/Decompositions/RzFromSinglyControlled.qs index 09bfa6aec51..f8901ea1d6b 100644 --- a/src/Simulation/TargetDefinitions/Decompositions/RzFromSinglyControlled.qs +++ b/src/Simulation/TargetDefinitions/Decompositions/RzFromSinglyControlled.qs @@ -2,7 +2,6 @@ // Licensed under the MIT License. namespace Microsoft.Quantum.Intrinsic { - open Microsoft.Quantum.Diagnostics; /// # Summary /// Applies a rotation about the $z$-axis by a given angle. @@ -28,7 +27,6 @@ namespace Microsoft.Quantum.Intrinsic { /// ```qsharp /// R(PauliZ, theta, qubit); /// ``` - @EnableTestingViaName("Test.TargetDefinitions.Rz") operation Rz (theta : Double, qubit : Qubit) : Unit is Adj + Ctl { body (...) { ApplyUncontrolledRz(theta, qubit); diff --git a/src/Simulation/TargetDefinitions/Decompositions/SFromSinglyControlled.qs b/src/Simulation/TargetDefinitions/Decompositions/SFromSinglyControlled.qs index 09655566e04..a3522892acf 100644 --- a/src/Simulation/TargetDefinitions/Decompositions/SFromSinglyControlled.qs +++ b/src/Simulation/TargetDefinitions/Decompositions/SFromSinglyControlled.qs @@ -2,7 +2,6 @@ // Licensed under the MIT License. namespace Microsoft.Quantum.Intrinsic { - open Microsoft.Quantum.Diagnostics; /// # Summary /// Applies the π/4 phase gate to a single qubit. @@ -19,7 +18,6 @@ namespace Microsoft.Quantum.Intrinsic { /// # Input /// ## qubit /// Qubit to which the gate should be applied. - @EnableTestingViaName("Test.TargetDefinitions.S") operation S (qubit : Qubit) : Unit is Adj + Ctl { body (...) { ApplyUncontrolledS(qubit); diff --git a/src/Simulation/TargetDefinitions/Decompositions/SWAP.qs b/src/Simulation/TargetDefinitions/Decompositions/SWAP.qs index 5b987ea6b74..ee2c95aae1b 100644 --- a/src/Simulation/TargetDefinitions/Decompositions/SWAP.qs +++ b/src/Simulation/TargetDefinitions/Decompositions/SWAP.qs @@ -2,7 +2,6 @@ // Licensed under the MIT License. namespace Microsoft.Quantum.Intrinsic { - open Microsoft.Quantum.Diagnostics; /// # Summary /// Applies the SWAP gate to a pair of qubits. @@ -33,7 +32,6 @@ namespace Microsoft.Quantum.Intrinsic { /// CNOT(qubit2, qubit1); /// CNOT(qubit1, qubit2); /// ``` - @EnableTestingViaName("Test.TargetDefinitions.SWAP") operation SWAP (qubit1 : Qubit, qubit2 : Qubit) : Unit is Adj + Ctl { body (...) { within { diff --git a/src/Simulation/TargetDefinitions/Decompositions/SWAPFromSinglyControlled.qs b/src/Simulation/TargetDefinitions/Decompositions/SWAPFromSinglyControlled.qs index 90eaf3fba0a..3beb32ccbd7 100644 --- a/src/Simulation/TargetDefinitions/Decompositions/SWAPFromSinglyControlled.qs +++ b/src/Simulation/TargetDefinitions/Decompositions/SWAPFromSinglyControlled.qs @@ -2,7 +2,6 @@ // Licensed under the MIT License. namespace Microsoft.Quantum.Intrinsic { - open Microsoft.Quantum.Diagnostics; /// # Summary /// Applies the SWAP gate to a pair of qubits. @@ -33,7 +32,6 @@ namespace Microsoft.Quantum.Intrinsic { /// CNOT(qubit2, qubit1); /// CNOT(qubit1, qubit2); /// ``` - @EnableTestingViaName("Test.TargetDefinitions.SWAP") operation SWAP (qubit1 : Qubit, qubit2 : Qubit) : Unit is Adj + Ctl { body (...) { ApplyUncontrolledSWAP(qubit1, qubit2); diff --git a/src/Simulation/TargetDefinitions/Decompositions/SetToBasisState.qs b/src/Simulation/TargetDefinitions/Decompositions/SetToBasisState.qs index 740925d833c..ff7ec846edf 100644 --- a/src/Simulation/TargetDefinitions/Decompositions/SetToBasisState.qs +++ b/src/Simulation/TargetDefinitions/Decompositions/SetToBasisState.qs @@ -3,7 +3,6 @@ namespace Microsoft.Quantum.Measurement { open Microsoft.Quantum.Intrinsic; - open Microsoft.Quantum.Diagnostics; /// # Summary /// Sets a qubit to a given computational basis state by measuring the @@ -18,7 +17,6 @@ namespace Microsoft.Quantum.Measurement { /// # Remarks /// As an invariant of this operation, calling `M(q)` immediately /// after `SetToBasisState(result, q)` will return `result`. - @EnableTestingViaName("Test.TargetDefinitions.SetToBasisState") operation SetToBasisState(desired : Result, target : Qubit) : Unit { if (desired != M(target)) { X(target); diff --git a/src/Simulation/TargetDefinitions/Decompositions/TFromSinglyControlled.qs b/src/Simulation/TargetDefinitions/Decompositions/TFromSinglyControlled.qs index 222ef97d9e1..e327305d5ae 100644 --- a/src/Simulation/TargetDefinitions/Decompositions/TFromSinglyControlled.qs +++ b/src/Simulation/TargetDefinitions/Decompositions/TFromSinglyControlled.qs @@ -2,7 +2,6 @@ // Licensed under the MIT License. namespace Microsoft.Quantum.Intrinsic { - open Microsoft.Quantum.Diagnostics; /// # Summary /// Applies the π/8 gate to a single qubit. @@ -19,7 +18,6 @@ namespace Microsoft.Quantum.Intrinsic { /// # Input /// ## qubit /// Qubit to which the gate should be applied. - @EnableTestingViaName("Test.TargetDefinitions.T") operation T (qubit : Qubit) : Unit is Adj + Ctl { body (...) { ApplyUncontrolledT(qubit); diff --git a/src/Simulation/TargetDefinitions/Decompositions/Utils.qs b/src/Simulation/TargetDefinitions/Decompositions/Utils.qs index d2808323a0d..32c31472a42 100644 --- a/src/Simulation/TargetDefinitions/Decompositions/Utils.qs +++ b/src/Simulation/TargetDefinitions/Decompositions/Utils.qs @@ -2,9 +2,7 @@ // Licensed under the MIT License. namespace Microsoft.Quantum.Intrinsic { - open Microsoft.Quantum.Diagnostics; - @EnableTestingViaName("Test.TargetDefinitions.SpreadZ") internal operation SpreadZ (from : Qubit, to : Qubit[]) : Unit is Adj { if (Length(to) > 0) { CNOT(to[0], from); @@ -16,7 +14,6 @@ namespace Microsoft.Quantum.Intrinsic { } } - @EnableTestingViaName("Test.TargetDefinitions.ApplyGlobalPhase") internal operation ApplyGlobalPhase (theta : Double) : Unit is Ctl + Adj { body (...) {} controlled (controls, (...)) { @@ -30,7 +27,6 @@ namespace Microsoft.Quantum.Intrinsic { } } - @EnableTestingViaName("Test.TargetDefinitions.ApplyGlobalPhaseFracWithR1Frac") internal operation ApplyGlobalPhaseFracWithR1Frac (numerator : Int, power : Int) : Unit is Adj + Ctl { body (...) {} controlled (ctrls, ...) { @@ -43,7 +39,6 @@ namespace Microsoft.Quantum.Intrinsic { } } - @EnableTestingViaName("Test.TargetDefinitions.MapPauli") internal operation MapPauli (qubit : Qubit, from : Pauli, to : Pauli) : Unit is Adj { if (from == to) { } @@ -73,13 +68,12 @@ namespace Microsoft.Quantum.Intrinsic { /// Given a multiply-controlled operation that requires k controls /// applies it using ceiling(k/2) controls and using floor(k/2) temporary qubits - @EnableTestingViaName("Test.TargetDefinitions.ApplyWithLessControlsA") internal operation ApplyWithLessControlsA<'T> (op : ((Qubit[],'T) => Unit is Adj), (controls : Qubit[], arg : 'T)) : Unit is Adj { let numControls = Length(controls); let numControlPairs = numControls / 2; - using (temps = Qubit[numControlPairs]) { + use temps = Qubit[numControlPairs] { within { - for (numPair in 0 .. numControlPairs - 1) { // constant depth + for numPair in 0 .. numControlPairs - 1 { // constant depth PhaseCCX(controls[2*numPair], controls[2*numPair + 1], temps[numPair]); } } @@ -90,7 +84,6 @@ namespace Microsoft.Quantum.Intrinsic { } } - @EnableTestingViaName("Test.TargetDefinitions.PhaseCCX") internal operation PhaseCCX (control1 : Qubit, control2 : Qubit, target : Qubit) : Unit is Adj { // https://arxiv.org/pdf/1210.0974.pdf#page=2 H(target); @@ -106,7 +99,6 @@ namespace Microsoft.Quantum.Intrinsic { H(target); } - @EnableTestingViaName("Test.TargetDefinitions.ReducedDyadicFraction") internal function ReducedDyadicFraction (numerator : Int, denominatorPowerOfTwo : Int) : (Int, Int) { if (numerator == 0) { return (0,0); } mutable num = numerator; @@ -118,7 +110,6 @@ namespace Microsoft.Quantum.Intrinsic { return (num,denPow); } - @EnableTestingViaName("Test.TargetDefinitions.ReducedDyadicFractionPeriodic") internal function ReducedDyadicFractionPeriodic (numerator : Int, denominatorPowerOfTwo : Int) : (Int, Int) { let (k,n) = ReducedDyadicFraction(numerator,denominatorPowerOfTwo); // k is odd, or (k,n) are both 0 let period = 2*2^n; // \pi k / 2^n is 2\pi periodic, therefore k is 2 * 2^n periodic @@ -129,30 +120,28 @@ namespace Microsoft.Quantum.Intrinsic { // TODO(swernli): Consider removing this in favor of pulling Microsoft.Quantum.Arrays.Subarray // into the runtime. - @EnableTestingViaName("Test.TargetDefinitions.Subarray") internal function Subarray<'T> (indices : Int[], array : 'T[]) : 'T[] { let nSliced = Length(indices); mutable sliced = new 'T[nSliced]; - for (idx in 0 .. nSliced - 1) { + for idx in 0 .. nSliced - 1 { set sliced w/= idx <- array[indices[idx]]; } return sliced; } - @EnableTestingViaName("Test.TargetDefinitions.IndicesOfNonIdentity") internal function IndicesOfNonIdentity (paulies : Pauli[]) : Int[] { mutable nonIdPauliCount = 0; - for (i in 0 .. Length(paulies) - 1) { + for i in 0 .. Length(paulies) - 1 { if (paulies[i] != PauliI) { set nonIdPauliCount += 1; } } mutable indices = new Int[nonIdPauliCount]; mutable index = 0; - for (i in 0 .. Length(paulies) - 1) { + for i in 0 .. Length(paulies) - 1 { if (paulies[i] != PauliI) { set indices w/= index <- i; set index = index + 1; @@ -162,7 +151,6 @@ namespace Microsoft.Quantum.Intrinsic { return indices; } - @EnableTestingViaName("Test.TargetDefinitions.RemovePauliI") internal function RemovePauliI (paulis : Pauli[], qubits : Qubit[]) : (Pauli[], Qubit[]) { let indices = IndicesOfNonIdentity(paulis); let newPaulis = Subarray(indices, paulis); diff --git a/src/Simulation/TargetDefinitions/Decompositions/XFromSinglyControlled.qs b/src/Simulation/TargetDefinitions/Decompositions/XFromSinglyControlled.qs index e33e1c88263..1441748ff54 100644 --- a/src/Simulation/TargetDefinitions/Decompositions/XFromSinglyControlled.qs +++ b/src/Simulation/TargetDefinitions/Decompositions/XFromSinglyControlled.qs @@ -2,7 +2,6 @@ // Licensed under the MIT License. namespace Microsoft.Quantum.Intrinsic { - open Microsoft.Quantum.Diagnostics; /// # Summary /// Applies the Pauli $X$ gate. @@ -19,7 +18,6 @@ namespace Microsoft.Quantum.Intrinsic { /// # Input /// ## qubit /// Qubit to which the gate should be applied. - @EnableTestingViaName("Test.TargetDefinitions.X") operation X (qubit : Qubit) : Unit is Adj + Ctl { body (...) { ApplyUncontrolledX(qubit); diff --git a/src/Simulation/TargetDefinitions/Decompositions/YFromSinglyControlled.qs b/src/Simulation/TargetDefinitions/Decompositions/YFromSinglyControlled.qs index 7636e127f22..71499ff03ac 100644 --- a/src/Simulation/TargetDefinitions/Decompositions/YFromSinglyControlled.qs +++ b/src/Simulation/TargetDefinitions/Decompositions/YFromSinglyControlled.qs @@ -2,7 +2,6 @@ // Licensed under the MIT License. namespace Microsoft.Quantum.Intrinsic { - open Microsoft.Quantum.Diagnostics; /// # Summary /// Applies the Pauli $Y$ gate. @@ -19,7 +18,6 @@ namespace Microsoft.Quantum.Intrinsic { /// # Input /// ## qubit /// Qubit to which the gate should be applied. - @EnableTestingViaName("Test.TargetDefinitions.Y") operation Y (qubit : Qubit) : Unit is Adj + Ctl { body (...) { ApplyUncontrolledY(qubit); diff --git a/src/Simulation/TargetDefinitions/Decompositions/ZFromSinglyControlled.qs b/src/Simulation/TargetDefinitions/Decompositions/ZFromSinglyControlled.qs index 146bfc5da6d..7072a4f17c9 100644 --- a/src/Simulation/TargetDefinitions/Decompositions/ZFromSinglyControlled.qs +++ b/src/Simulation/TargetDefinitions/Decompositions/ZFromSinglyControlled.qs @@ -2,7 +2,6 @@ // Licensed under the MIT License. namespace Microsoft.Quantum.Intrinsic { - open Microsoft.Quantum.Diagnostics; /// # Summary /// Applies the Pauli $Z$ gate. @@ -19,7 +18,6 @@ namespace Microsoft.Quantum.Intrinsic { /// # Input /// ## qubit /// Qubit to which the gate should be applied. - @EnableTestingViaName("Test.TargetDefinitions.Z") operation Z (qubit : Qubit) : Unit is Adj + Ctl { body (...) { ApplyUncontrolledZ(qubit); diff --git a/src/Simulation/TargetDefinitions/Interfaces/IGate_ApplyControlledX.cs b/src/Simulation/TargetDefinitions/Interfaces/IGate_ApplyControlledX.cs index 180c37afce2..ff92c2b126a 100644 --- a/src/Simulation/TargetDefinitions/Interfaces/IGate_ApplyControlledX.cs +++ b/src/Simulation/TargetDefinitions/Interfaces/IGate_ApplyControlledX.cs @@ -1,7 +1,6 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. -using System; using Microsoft.Quantum.Simulation.Core; namespace Microsoft.Quantum.Intrinsic.Interfaces diff --git a/src/Simulation/TargetDefinitions/Interfaces/IGate_ApplyControlledZ.cs b/src/Simulation/TargetDefinitions/Interfaces/IGate_ApplyControlledZ.cs index f1277c03508..82e30238f70 100644 --- a/src/Simulation/TargetDefinitions/Interfaces/IGate_ApplyControlledZ.cs +++ b/src/Simulation/TargetDefinitions/Interfaces/IGate_ApplyControlledZ.cs @@ -1,7 +1,6 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. -using System; using Microsoft.Quantum.Simulation.Core; namespace Microsoft.Quantum.Intrinsic.Interfaces diff --git a/src/Simulation/TargetDefinitions/Interfaces/IGate_ApplyUncontrolledH.cs b/src/Simulation/TargetDefinitions/Interfaces/IGate_ApplyUncontrolledH.cs index 6c3a3853527..9f731e3dec9 100644 --- a/src/Simulation/TargetDefinitions/Interfaces/IGate_ApplyUncontrolledH.cs +++ b/src/Simulation/TargetDefinitions/Interfaces/IGate_ApplyUncontrolledH.cs @@ -1,7 +1,6 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. -using System; using Microsoft.Quantum.Simulation.Core; namespace Microsoft.Quantum.Intrinsic.Interfaces diff --git a/src/Simulation/TargetDefinitions/Interfaces/IGate_ApplyUncontrolledRx.cs b/src/Simulation/TargetDefinitions/Interfaces/IGate_ApplyUncontrolledRx.cs index df14696b85f..8812bda52bb 100644 --- a/src/Simulation/TargetDefinitions/Interfaces/IGate_ApplyUncontrolledRx.cs +++ b/src/Simulation/TargetDefinitions/Interfaces/IGate_ApplyUncontrolledRx.cs @@ -1,7 +1,6 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. -using System; using Microsoft.Quantum.Simulation.Core; namespace Microsoft.Quantum.Intrinsic.Interfaces diff --git a/src/Simulation/TargetDefinitions/Interfaces/IGate_ApplyUncontrolledRy.cs b/src/Simulation/TargetDefinitions/Interfaces/IGate_ApplyUncontrolledRy.cs index c817abd4cd9..bc78be1cd1f 100644 --- a/src/Simulation/TargetDefinitions/Interfaces/IGate_ApplyUncontrolledRy.cs +++ b/src/Simulation/TargetDefinitions/Interfaces/IGate_ApplyUncontrolledRy.cs @@ -1,7 +1,6 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. -using System; using Microsoft.Quantum.Simulation.Core; namespace Microsoft.Quantum.Intrinsic.Interfaces diff --git a/src/Simulation/TargetDefinitions/Interfaces/IGate_ApplyUncontrolledRz.cs b/src/Simulation/TargetDefinitions/Interfaces/IGate_ApplyUncontrolledRz.cs index 97bab6ea276..b4bc21b06ed 100644 --- a/src/Simulation/TargetDefinitions/Interfaces/IGate_ApplyUncontrolledRz.cs +++ b/src/Simulation/TargetDefinitions/Interfaces/IGate_ApplyUncontrolledRz.cs @@ -1,7 +1,6 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. -using System; using Microsoft.Quantum.Simulation.Core; namespace Microsoft.Quantum.Intrinsic.Interfaces diff --git a/src/Simulation/TargetDefinitions/Interfaces/IGate_ApplyUncontrolledS.cs b/src/Simulation/TargetDefinitions/Interfaces/IGate_ApplyUncontrolledS.cs index 2ffe2a2e9af..87c09f35a93 100644 --- a/src/Simulation/TargetDefinitions/Interfaces/IGate_ApplyUncontrolledS.cs +++ b/src/Simulation/TargetDefinitions/Interfaces/IGate_ApplyUncontrolledS.cs @@ -1,7 +1,6 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. -using System; using Microsoft.Quantum.Simulation.Core; namespace Microsoft.Quantum.Intrinsic.Interfaces diff --git a/src/Simulation/TargetDefinitions/Interfaces/IGate_ApplyUncontrolledSWAP.cs b/src/Simulation/TargetDefinitions/Interfaces/IGate_ApplyUncontrolledSWAP.cs index e3e07a58db3..f39033606bd 100644 --- a/src/Simulation/TargetDefinitions/Interfaces/IGate_ApplyUncontrolledSWAP.cs +++ b/src/Simulation/TargetDefinitions/Interfaces/IGate_ApplyUncontrolledSWAP.cs @@ -1,7 +1,6 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. -using System; using Microsoft.Quantum.Simulation.Core; namespace Microsoft.Quantum.Intrinsic.Interfaces diff --git a/src/Simulation/TargetDefinitions/Interfaces/IGate_ApplyUncontrolledT.cs b/src/Simulation/TargetDefinitions/Interfaces/IGate_ApplyUncontrolledT.cs index e06d80675a6..dd46b16a0e0 100644 --- a/src/Simulation/TargetDefinitions/Interfaces/IGate_ApplyUncontrolledT.cs +++ b/src/Simulation/TargetDefinitions/Interfaces/IGate_ApplyUncontrolledT.cs @@ -1,7 +1,6 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. -using System; using Microsoft.Quantum.Simulation.Core; namespace Microsoft.Quantum.Intrinsic.Interfaces diff --git a/src/Simulation/TargetDefinitions/Interfaces/IGate_ApplyUncontrolledX.cs b/src/Simulation/TargetDefinitions/Interfaces/IGate_ApplyUncontrolledX.cs index 1f7163e9af1..829d2a860ba 100644 --- a/src/Simulation/TargetDefinitions/Interfaces/IGate_ApplyUncontrolledX.cs +++ b/src/Simulation/TargetDefinitions/Interfaces/IGate_ApplyUncontrolledX.cs @@ -1,7 +1,6 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. -using System; using Microsoft.Quantum.Simulation.Core; namespace Microsoft.Quantum.Intrinsic.Interfaces diff --git a/src/Simulation/TargetDefinitions/Interfaces/IGate_ApplyUncontrolledY.cs b/src/Simulation/TargetDefinitions/Interfaces/IGate_ApplyUncontrolledY.cs index a280bbc4d5c..ad963c880c5 100644 --- a/src/Simulation/TargetDefinitions/Interfaces/IGate_ApplyUncontrolledY.cs +++ b/src/Simulation/TargetDefinitions/Interfaces/IGate_ApplyUncontrolledY.cs @@ -1,7 +1,6 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. -using System; using Microsoft.Quantum.Simulation.Core; namespace Microsoft.Quantum.Intrinsic.Interfaces diff --git a/src/Simulation/TargetDefinitions/Interfaces/IGate_ApplyUncontrolledZ.cs b/src/Simulation/TargetDefinitions/Interfaces/IGate_ApplyUncontrolledZ.cs index ce04925ca51..a147edd306a 100644 --- a/src/Simulation/TargetDefinitions/Interfaces/IGate_ApplyUncontrolledZ.cs +++ b/src/Simulation/TargetDefinitions/Interfaces/IGate_ApplyUncontrolledZ.cs @@ -1,7 +1,6 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. -using System; using Microsoft.Quantum.Simulation.Core; namespace Microsoft.Quantum.Intrinsic.Interfaces diff --git a/src/Simulation/TargetDefinitions/Interfaces/IGate_Exp.cs b/src/Simulation/TargetDefinitions/Interfaces/IGate_Exp.cs index f02267efb24..2161395ce4f 100644 --- a/src/Simulation/TargetDefinitions/Interfaces/IGate_Exp.cs +++ b/src/Simulation/TargetDefinitions/Interfaces/IGate_Exp.cs @@ -1,7 +1,6 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. -using System; using Microsoft.Quantum.Simulation.Core; namespace Microsoft.Quantum.Intrinsic.Interfaces diff --git a/src/Simulation/TargetDefinitions/Interfaces/IGate_H.cs b/src/Simulation/TargetDefinitions/Interfaces/IGate_H.cs index a1cce04429e..94fa8055444 100644 --- a/src/Simulation/TargetDefinitions/Interfaces/IGate_H.cs +++ b/src/Simulation/TargetDefinitions/Interfaces/IGate_H.cs @@ -1,7 +1,6 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. -using System; using Microsoft.Quantum.Simulation.Core; namespace Microsoft.Quantum.Intrinsic.Interfaces diff --git a/src/Simulation/TargetDefinitions/Interfaces/IGate_IsingXX.cs b/src/Simulation/TargetDefinitions/Interfaces/IGate_IsingXX.cs index 364d078bd31..57ad1f6e534 100644 --- a/src/Simulation/TargetDefinitions/Interfaces/IGate_IsingXX.cs +++ b/src/Simulation/TargetDefinitions/Interfaces/IGate_IsingXX.cs @@ -1,7 +1,6 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. -using System; using Microsoft.Quantum.Simulation.Core; namespace Microsoft.Quantum.Intrinsic.Interfaces diff --git a/src/Simulation/TargetDefinitions/Interfaces/IGate_IsingYY.cs b/src/Simulation/TargetDefinitions/Interfaces/IGate_IsingYY.cs index 606161eafda..b6b44c917bc 100644 --- a/src/Simulation/TargetDefinitions/Interfaces/IGate_IsingYY.cs +++ b/src/Simulation/TargetDefinitions/Interfaces/IGate_IsingYY.cs @@ -1,7 +1,6 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. -using System; using Microsoft.Quantum.Simulation.Core; namespace Microsoft.Quantum.Intrinsic.Interfaces diff --git a/src/Simulation/TargetDefinitions/Interfaces/IGate_IsingZZ.cs b/src/Simulation/TargetDefinitions/Interfaces/IGate_IsingZZ.cs index e53f81f828d..3d8121676e8 100644 --- a/src/Simulation/TargetDefinitions/Interfaces/IGate_IsingZZ.cs +++ b/src/Simulation/TargetDefinitions/Interfaces/IGate_IsingZZ.cs @@ -1,7 +1,6 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. -using System; using Microsoft.Quantum.Simulation.Core; namespace Microsoft.Quantum.Intrinsic.Interfaces diff --git a/src/Simulation/TargetDefinitions/Interfaces/IGate_M.cs b/src/Simulation/TargetDefinitions/Interfaces/IGate_M.cs index 6cce4c7c6a5..2b3dcef9785 100644 --- a/src/Simulation/TargetDefinitions/Interfaces/IGate_M.cs +++ b/src/Simulation/TargetDefinitions/Interfaces/IGate_M.cs @@ -1,7 +1,6 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. -using System; using Microsoft.Quantum.Simulation.Core; namespace Microsoft.Quantum.Intrinsic.Interfaces diff --git a/src/Simulation/TargetDefinitions/Interfaces/IGate_Measure.cs b/src/Simulation/TargetDefinitions/Interfaces/IGate_Measure.cs index 09caaa5ac47..f463a2f4152 100644 --- a/src/Simulation/TargetDefinitions/Interfaces/IGate_Measure.cs +++ b/src/Simulation/TargetDefinitions/Interfaces/IGate_Measure.cs @@ -1,7 +1,6 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. -using System; using Microsoft.Quantum.Simulation.Core; namespace Microsoft.Quantum.Intrinsic.Interfaces diff --git a/src/Simulation/TargetDefinitions/Interfaces/IGate_R.cs b/src/Simulation/TargetDefinitions/Interfaces/IGate_R.cs index d7853d09844..f9a1edb0286 100644 --- a/src/Simulation/TargetDefinitions/Interfaces/IGate_R.cs +++ b/src/Simulation/TargetDefinitions/Interfaces/IGate_R.cs @@ -1,7 +1,6 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. -using System; using Microsoft.Quantum.Simulation.Core; namespace Microsoft.Quantum.Intrinsic.Interfaces diff --git a/src/Simulation/TargetDefinitions/Interfaces/IGate_Reset.cs b/src/Simulation/TargetDefinitions/Interfaces/IGate_Reset.cs index 7d9111327ad..9d173078257 100644 --- a/src/Simulation/TargetDefinitions/Interfaces/IGate_Reset.cs +++ b/src/Simulation/TargetDefinitions/Interfaces/IGate_Reset.cs @@ -1,7 +1,6 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. -using System; using Microsoft.Quantum.Simulation.Core; namespace Microsoft.Quantum.Intrinsic.Interfaces diff --git a/src/Simulation/TargetDefinitions/Interfaces/IGate_Rx.cs b/src/Simulation/TargetDefinitions/Interfaces/IGate_Rx.cs index cad8505a864..8dc38c529eb 100644 --- a/src/Simulation/TargetDefinitions/Interfaces/IGate_Rx.cs +++ b/src/Simulation/TargetDefinitions/Interfaces/IGate_Rx.cs @@ -1,7 +1,6 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. -using System; using Microsoft.Quantum.Simulation.Core; namespace Microsoft.Quantum.Intrinsic.Interfaces diff --git a/src/Simulation/TargetDefinitions/Interfaces/IGate_Ry.cs b/src/Simulation/TargetDefinitions/Interfaces/IGate_Ry.cs index c8cee96909b..151f2ab9f1a 100644 --- a/src/Simulation/TargetDefinitions/Interfaces/IGate_Ry.cs +++ b/src/Simulation/TargetDefinitions/Interfaces/IGate_Ry.cs @@ -1,7 +1,6 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. -using System; using Microsoft.Quantum.Simulation.Core; namespace Microsoft.Quantum.Intrinsic.Interfaces diff --git a/src/Simulation/TargetDefinitions/Interfaces/IGate_Rz.cs b/src/Simulation/TargetDefinitions/Interfaces/IGate_Rz.cs index 716e68c6dec..3b4a950a765 100644 --- a/src/Simulation/TargetDefinitions/Interfaces/IGate_Rz.cs +++ b/src/Simulation/TargetDefinitions/Interfaces/IGate_Rz.cs @@ -1,7 +1,6 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. -using System; using Microsoft.Quantum.Simulation.Core; namespace Microsoft.Quantum.Intrinsic.Interfaces diff --git a/src/Simulation/TargetDefinitions/Interfaces/IGate_S.cs b/src/Simulation/TargetDefinitions/Interfaces/IGate_S.cs index f26c0923d2a..e6b15312f36 100644 --- a/src/Simulation/TargetDefinitions/Interfaces/IGate_S.cs +++ b/src/Simulation/TargetDefinitions/Interfaces/IGate_S.cs @@ -1,7 +1,6 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. -using System; using Microsoft.Quantum.Simulation.Core; namespace Microsoft.Quantum.Intrinsic.Interfaces diff --git a/src/Simulation/TargetDefinitions/Interfaces/IGate_T.cs b/src/Simulation/TargetDefinitions/Interfaces/IGate_T.cs index 1e665fc087f..76030a713e3 100644 --- a/src/Simulation/TargetDefinitions/Interfaces/IGate_T.cs +++ b/src/Simulation/TargetDefinitions/Interfaces/IGate_T.cs @@ -1,7 +1,6 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. -using System; using Microsoft.Quantum.Simulation.Core; namespace Microsoft.Quantum.Intrinsic.Interfaces diff --git a/src/Simulation/TargetDefinitions/Interfaces/IGate_X.cs b/src/Simulation/TargetDefinitions/Interfaces/IGate_X.cs index 17d5aee37e3..8b2337e9baa 100644 --- a/src/Simulation/TargetDefinitions/Interfaces/IGate_X.cs +++ b/src/Simulation/TargetDefinitions/Interfaces/IGate_X.cs @@ -1,7 +1,6 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. -using System; using Microsoft.Quantum.Simulation.Core; namespace Microsoft.Quantum.Intrinsic.Interfaces diff --git a/src/Simulation/TargetDefinitions/Interfaces/IGate_Y.cs b/src/Simulation/TargetDefinitions/Interfaces/IGate_Y.cs index 87fbef8300e..f77737c3816 100644 --- a/src/Simulation/TargetDefinitions/Interfaces/IGate_Y.cs +++ b/src/Simulation/TargetDefinitions/Interfaces/IGate_Y.cs @@ -1,7 +1,6 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. -using System; using Microsoft.Quantum.Simulation.Core; namespace Microsoft.Quantum.Intrinsic.Interfaces diff --git a/src/Simulation/TargetDefinitions/Interfaces/IGate_Z.cs b/src/Simulation/TargetDefinitions/Interfaces/IGate_Z.cs index 1264e6cd20a..d312415eb64 100644 --- a/src/Simulation/TargetDefinitions/Interfaces/IGate_Z.cs +++ b/src/Simulation/TargetDefinitions/Interfaces/IGate_Z.cs @@ -1,7 +1,6 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. -using System; using Microsoft.Quantum.Simulation.Core; namespace Microsoft.Quantum.Intrinsic.Interfaces diff --git a/src/Simulation/TargetDefinitions/Intrinsic/ApplyControlledX.qs b/src/Simulation/TargetDefinitions/Intrinsic/ApplyControlledX.qs index 0c5417ada18..1fcc9b663f9 100644 --- a/src/Simulation/TargetDefinitions/Intrinsic/ApplyControlledX.qs +++ b/src/Simulation/TargetDefinitions/Intrinsic/ApplyControlledX.qs @@ -2,7 +2,6 @@ // Licensed under the MIT License. namespace Microsoft.Quantum.Intrinsic { - open Microsoft.Quantum.Diagnostics; /// # Summary /// Applies the controlled-X (or CNOT) gate to a pair of qubits. Does not support @@ -32,7 +31,6 @@ namespace Microsoft.Quantum.Intrinsic { /// ```qsharp /// CNOT(control, target); /// ``` - @EnableTestingViaName("Test.TargetDefinitions.ApplyControlledX") internal operation ApplyControlledX (control : Qubit, target : Qubit) : Unit is Adj { body intrinsic; adjoint self; diff --git a/src/Simulation/TargetDefinitions/Intrinsic/ApplyControlledZ.qs b/src/Simulation/TargetDefinitions/Intrinsic/ApplyControlledZ.qs index c91154afdab..de997a2bd74 100644 --- a/src/Simulation/TargetDefinitions/Intrinsic/ApplyControlledZ.qs +++ b/src/Simulation/TargetDefinitions/Intrinsic/ApplyControlledZ.qs @@ -2,7 +2,6 @@ // Licensed under the MIT License. namespace Microsoft.Quantum.Intrinsic { - open Microsoft.Quantum.Diagnostics; /// # Summary /// Applies the controlled-Z (CZ) gate to a pair of qubits. @@ -28,7 +27,6 @@ namespace Microsoft.Quantum.Intrinsic { /// ```qsharp /// Controlled Z([control], target); /// ``` - @EnableTestingViaName("Test.TargetDefinitions.ApplyControlledZ") internal operation ApplyControlledZ (control : Qubit, target : Qubit) : Unit is Adj { body intrinsic; adjoint self; diff --git a/src/Simulation/TargetDefinitions/Intrinsic/ApplyUncontrolledH.qs b/src/Simulation/TargetDefinitions/Intrinsic/ApplyUncontrolledH.qs index 8b0f82af9c2..dc282b001e3 100644 --- a/src/Simulation/TargetDefinitions/Intrinsic/ApplyUncontrolledH.qs +++ b/src/Simulation/TargetDefinitions/Intrinsic/ApplyUncontrolledH.qs @@ -2,7 +2,6 @@ // Licensed under the MIT License. namespace Microsoft.Quantum.Intrinsic { - open Microsoft.Quantum.Diagnostics; /// # Summary /// Applies the Hadamard transformation to a single qubit. Note that the Controlled @@ -21,7 +20,6 @@ namespace Microsoft.Quantum.Intrinsic { /// # Input /// ## qubit /// Qubit to which the gate should be applied. - @EnableTestingViaName("Test.TargetDefinitions.ApplyUncontrolledH") internal operation ApplyUncontrolledH (qubit : Qubit) : Unit is Adj { body intrinsic; adjoint self; diff --git a/src/Simulation/TargetDefinitions/Intrinsic/ApplyUncontrolledRx.qs b/src/Simulation/TargetDefinitions/Intrinsic/ApplyUncontrolledRx.qs index af0e8fcac43..67ec399dc17 100644 --- a/src/Simulation/TargetDefinitions/Intrinsic/ApplyUncontrolledRx.qs +++ b/src/Simulation/TargetDefinitions/Intrinsic/ApplyUncontrolledRx.qs @@ -2,7 +2,6 @@ // Licensed under the MIT License. namespace Microsoft.Quantum.Intrinsic { - open Microsoft.Quantum.Diagnostics; /// # Summary /// Applies a rotation about the $x$-axis by a given angle. Note that the Controlled @@ -29,7 +28,6 @@ namespace Microsoft.Quantum.Intrinsic { /// ```qsharp /// R(PauliX, theta, qubit); /// ``` - @EnableTestingViaName("Test.TargetDefinitions.ApplyUncontrolledRx") internal operation ApplyUncontrolledRx (theta : Double, qubit : Qubit) : Unit is Adj { body intrinsic; } diff --git a/src/Simulation/TargetDefinitions/Intrinsic/ApplyUncontrolledRy.qs b/src/Simulation/TargetDefinitions/Intrinsic/ApplyUncontrolledRy.qs index 4c63a72fffe..b01e95321d8 100644 --- a/src/Simulation/TargetDefinitions/Intrinsic/ApplyUncontrolledRy.qs +++ b/src/Simulation/TargetDefinitions/Intrinsic/ApplyUncontrolledRy.qs @@ -2,7 +2,6 @@ // Licensed under the MIT License. namespace Microsoft.Quantum.Intrinsic { - open Microsoft.Quantum.Diagnostics; /// # Summary /// Applies a rotation about the $y$-axis by a given angle. Note that the Controlled @@ -29,7 +28,6 @@ namespace Microsoft.Quantum.Intrinsic { /// ```qsharp /// R(PauliY, theta, qubit); /// ``` - @EnableTestingViaName("Test.TargetDefinitions.ApplyUncontrolledRy") internal operation ApplyUncontrolledRy (theta : Double, qubit : Qubit) : Unit is Adj { body intrinsic; } diff --git a/src/Simulation/TargetDefinitions/Intrinsic/ApplyUncontrolledRz.qs b/src/Simulation/TargetDefinitions/Intrinsic/ApplyUncontrolledRz.qs index ffeb44ab440..4ae8e69c84c 100644 --- a/src/Simulation/TargetDefinitions/Intrinsic/ApplyUncontrolledRz.qs +++ b/src/Simulation/TargetDefinitions/Intrinsic/ApplyUncontrolledRz.qs @@ -2,7 +2,6 @@ // Licensed under the MIT License. namespace Microsoft.Quantum.Intrinsic { - open Microsoft.Quantum.Diagnostics; /// # Summary /// Applies a rotation about the $z$-axis by a given angle. Note that the Controlled @@ -29,7 +28,6 @@ namespace Microsoft.Quantum.Intrinsic { /// ```qsharp /// R(PauliZ, theta, qubit); /// ``` - @EnableTestingViaName("Test.TargetDefinitions.ApplyUncontrolledRz") internal operation ApplyUncontrolledRz (theta : Double, qubit : Qubit) : Unit is Adj { body intrinsic; } diff --git a/src/Simulation/TargetDefinitions/Intrinsic/ApplyUncontrolledS.qs b/src/Simulation/TargetDefinitions/Intrinsic/ApplyUncontrolledS.qs index f8fe46dd430..4314b1be9aa 100644 --- a/src/Simulation/TargetDefinitions/Intrinsic/ApplyUncontrolledS.qs +++ b/src/Simulation/TargetDefinitions/Intrinsic/ApplyUncontrolledS.qs @@ -2,7 +2,6 @@ // Licensed under the MIT License. namespace Microsoft.Quantum.Intrinsic { - open Microsoft.Quantum.Diagnostics; /// # Summary /// Applies the π/4 phase gate to a single qubit. Note that the Controlled functor @@ -20,7 +19,6 @@ namespace Microsoft.Quantum.Intrinsic { /// # Input /// ## qubit /// Qubit to which the gate should be applied. - @EnableTestingViaName("Test.TargetDefinitions.ApplyUncontrolledS") internal operation ApplyUncontrolledS (qubit : Qubit) : Unit is Adj { body intrinsic; } diff --git a/src/Simulation/TargetDefinitions/Intrinsic/ApplyUncontrolledSWAP.qs b/src/Simulation/TargetDefinitions/Intrinsic/ApplyUncontrolledSWAP.qs index 0bce09f38d6..5131366d57f 100644 --- a/src/Simulation/TargetDefinitions/Intrinsic/ApplyUncontrolledSWAP.qs +++ b/src/Simulation/TargetDefinitions/Intrinsic/ApplyUncontrolledSWAP.qs @@ -2,7 +2,6 @@ // Licensed under the MIT License. namespace Microsoft.Quantum.Intrinsic { - open Microsoft.Quantum.Diagnostics; /// # Summary /// Applies the SWAP gate to a pair of qubits. Note that the Controlled functor @@ -34,7 +33,6 @@ namespace Microsoft.Quantum.Intrinsic { /// CNOT(qubit2, qubit1); /// CNOT(qubit1, qubit2); /// ``` - @EnableTestingViaName("Test.TargetDefinitions.ApplyUncontrolledSWAP") operation ApplyUncontrolledSWAP (qubit1 : Qubit, qubit2 : Qubit) : Unit is Adj { body intrinsic; adjoint self; diff --git a/src/Simulation/TargetDefinitions/Intrinsic/ApplyUncontrolledT.qs b/src/Simulation/TargetDefinitions/Intrinsic/ApplyUncontrolledT.qs index 2fd4b11d41d..2a51a407196 100644 --- a/src/Simulation/TargetDefinitions/Intrinsic/ApplyUncontrolledT.qs +++ b/src/Simulation/TargetDefinitions/Intrinsic/ApplyUncontrolledT.qs @@ -2,7 +2,6 @@ // Licensed under the MIT License. namespace Microsoft.Quantum.Intrinsic { - open Microsoft.Quantum.Diagnostics; /// # Summary /// Applies the π/8 gate to a single qubit. Note that the Controlled functor is @@ -20,7 +19,6 @@ namespace Microsoft.Quantum.Intrinsic { /// # Input /// ## qubit /// Qubit to which the gate should be applied. - @EnableTestingViaName("Test.TargetDefinitions.ApplyUncontrolledT") internal operation ApplyUncontrolledT (qubit : Qubit) : Unit is Adj { body intrinsic; } diff --git a/src/Simulation/TargetDefinitions/Intrinsic/ApplyUncontrolledX.qs b/src/Simulation/TargetDefinitions/Intrinsic/ApplyUncontrolledX.qs index 45a27d9a5f6..3ef07a1052b 100644 --- a/src/Simulation/TargetDefinitions/Intrinsic/ApplyUncontrolledX.qs +++ b/src/Simulation/TargetDefinitions/Intrinsic/ApplyUncontrolledX.qs @@ -2,7 +2,6 @@ // Licensed under the MIT License. namespace Microsoft.Quantum.Intrinsic { - open Microsoft.Quantum.Diagnostics; /// # Summary /// Applies the Pauli $X$ gate. Note that the Controlled functor is not supported. @@ -19,7 +18,6 @@ namespace Microsoft.Quantum.Intrinsic { /// # Input /// ## qubit /// Qubit to which the gate should be applied. - @EnableTestingViaName("Test.TargetDefinitions.ApplyUncontrolledX") internal operation ApplyUncontrolledX (qubit : Qubit) : Unit is Adj { body intrinsic; adjoint self; diff --git a/src/Simulation/TargetDefinitions/Intrinsic/ApplyUncontrolledY.qs b/src/Simulation/TargetDefinitions/Intrinsic/ApplyUncontrolledY.qs index 8e9a2fe20bb..4209b2d3b42 100644 --- a/src/Simulation/TargetDefinitions/Intrinsic/ApplyUncontrolledY.qs +++ b/src/Simulation/TargetDefinitions/Intrinsic/ApplyUncontrolledY.qs @@ -2,7 +2,6 @@ // Licensed under the MIT License. namespace Microsoft.Quantum.Intrinsic { - open Microsoft.Quantum.Diagnostics; /// # Summary /// Applies the Pauli $Y$ gate. Note that the Controlled functor is not supported. @@ -19,7 +18,6 @@ namespace Microsoft.Quantum.Intrinsic { /// # Input /// ## qubit /// Qubit to which the gate should be applied. - @EnableTestingViaName("Test.TargetDefinitions.ApplyUncontrolledY") internal operation ApplyUncontrolledY (qubit : Qubit) : Unit is Adj { body intrinsic; adjoint self; diff --git a/src/Simulation/TargetDefinitions/Intrinsic/ApplyUncontrolledZ.qs b/src/Simulation/TargetDefinitions/Intrinsic/ApplyUncontrolledZ.qs index 302252cf68e..eedc7a4b7ff 100644 --- a/src/Simulation/TargetDefinitions/Intrinsic/ApplyUncontrolledZ.qs +++ b/src/Simulation/TargetDefinitions/Intrinsic/ApplyUncontrolledZ.qs @@ -2,7 +2,6 @@ // Licensed under the MIT License. namespace Microsoft.Quantum.Intrinsic { - open Microsoft.Quantum.Diagnostics; /// # Summary /// Applies the Pauli $Z$ gate. Note that the Controlled functor is not supported. @@ -19,7 +18,6 @@ namespace Microsoft.Quantum.Intrinsic { /// # Input /// ## qubit /// Qubit to which the gate should be applied. - @EnableTestingViaName("Test.TargetDefinitions.ApplyUncontrolledZ") internal operation ApplyUncontrolledZ (qubit : Qubit) : Unit is Adj { body intrinsic; adjoint self; diff --git a/src/Simulation/TargetDefinitions/Intrinsic/Exp.qs b/src/Simulation/TargetDefinitions/Intrinsic/Exp.qs index df58c4a3f3c..5283d126c34 100644 --- a/src/Simulation/TargetDefinitions/Intrinsic/Exp.qs +++ b/src/Simulation/TargetDefinitions/Intrinsic/Exp.qs @@ -2,7 +2,6 @@ // Licensed under the MIT License. namespace Microsoft.Quantum.Intrinsic { - open Microsoft.Quantum.Diagnostics; /// # Summary /// Applies the exponential of a multi-qubit Pauli operator. @@ -23,7 +22,6 @@ namespace Microsoft.Quantum.Intrinsic { /// target register is to be rotated. /// ## qubits /// Register to apply the given rotation to. - @EnableTestingViaName("Test.TargetDefinitions.Exp") operation Exp (paulis : Pauli[], theta : Double, qubits : Qubit[]) : Unit is Adj + Ctl { body intrinsic; } diff --git a/src/Simulation/TargetDefinitions/Intrinsic/H.qs b/src/Simulation/TargetDefinitions/Intrinsic/H.qs index 740796ddd0b..041926d8920 100644 --- a/src/Simulation/TargetDefinitions/Intrinsic/H.qs +++ b/src/Simulation/TargetDefinitions/Intrinsic/H.qs @@ -2,7 +2,6 @@ // Licensed under the MIT License. namespace Microsoft.Quantum.Intrinsic { - open Microsoft.Quantum.Diagnostics; /// # Summary /// Applies the Hadamard transformation to a single qubit. @@ -20,7 +19,6 @@ namespace Microsoft.Quantum.Intrinsic { /// # Input /// ## qubit /// Qubit to which the gate should be applied. - @EnableTestingViaName("Test.TargetDefinitions.H") operation H (qubit : Qubit) : Unit is Adj + Ctl { body intrinsic; adjoint self; diff --git a/src/Simulation/TargetDefinitions/Intrinsic/IsingXX.qs b/src/Simulation/TargetDefinitions/Intrinsic/IsingXX.qs index c20f4c56731..100481f9fcf 100644 --- a/src/Simulation/TargetDefinitions/Intrinsic/IsingXX.qs +++ b/src/Simulation/TargetDefinitions/Intrinsic/IsingXX.qs @@ -2,7 +2,6 @@ // Licensed under the MIT License. namespace Microsoft.Quantum.Intrinsic { - open Microsoft.Quantum.Diagnostics; /// # Summary /// Applies the two qubit Ising $XX$ rotation gate. @@ -25,7 +24,6 @@ namespace Microsoft.Quantum.Intrinsic { /// The first qubit input to the gate. /// ## qubit1 /// The second qubit input to the gate. - @EnableTestingViaName("Test.TargetDefinitions.IsingXX") internal operation IsingXX (theta : Double, qubit0 : Qubit, qubit1 : Qubit) : Unit is Adj + Ctl { body intrinsic; } diff --git a/src/Simulation/TargetDefinitions/Intrinsic/IsingYY.qs b/src/Simulation/TargetDefinitions/Intrinsic/IsingYY.qs index 39fe3a24ff5..ad2afb19265 100644 --- a/src/Simulation/TargetDefinitions/Intrinsic/IsingYY.qs +++ b/src/Simulation/TargetDefinitions/Intrinsic/IsingYY.qs @@ -2,7 +2,6 @@ // Licensed under the MIT License. namespace Microsoft.Quantum.Intrinsic { - open Microsoft.Quantum.Diagnostics; /// # Summary /// Applies the two qubit Ising $YY$ rotation gate. @@ -25,7 +24,6 @@ namespace Microsoft.Quantum.Intrinsic { /// The first qubit input to the gate. /// ## qubit1 /// The second qubit input to the gate. - @EnableTestingViaName("Test.TargetDefinitions.IsingYY") internal operation IsingYY (theta : Double, qubit0 : Qubit, qubit1 : Qubit) : Unit is Adj + Ctl { body intrinsic; } diff --git a/src/Simulation/TargetDefinitions/Intrinsic/IsingZZ.qs b/src/Simulation/TargetDefinitions/Intrinsic/IsingZZ.qs index c800c2b92f2..0453ad04a2a 100644 --- a/src/Simulation/TargetDefinitions/Intrinsic/IsingZZ.qs +++ b/src/Simulation/TargetDefinitions/Intrinsic/IsingZZ.qs @@ -2,7 +2,6 @@ // Licensed under the MIT License. namespace Microsoft.Quantum.Intrinsic { - open Microsoft.Quantum.Diagnostics; /// # Summary /// Applies the two qubit Ising $ZZ$ rotation gate. @@ -25,7 +24,6 @@ namespace Microsoft.Quantum.Intrinsic { /// The first qubit input to the gate. /// ## qubit1 /// The second qubit input to the gate. - @EnableTestingViaName("Test.TargetDefinitions.IsingZZ") internal operation IsingZZ (theta : Double, qubit0 : Qubit, qubit1 : Qubit) : Unit is Adj + Ctl { body intrinsic; } diff --git a/src/Simulation/TargetDefinitions/Intrinsic/M.qs b/src/Simulation/TargetDefinitions/Intrinsic/M.qs index 7c44f88a5d1..c02cbd2ef67 100644 --- a/src/Simulation/TargetDefinitions/Intrinsic/M.qs +++ b/src/Simulation/TargetDefinitions/Intrinsic/M.qs @@ -2,7 +2,6 @@ // Licensed under the MIT License. namespace Microsoft.Quantum.Intrinsic { - open Microsoft.Quantum.Diagnostics; /// # Summary /// Performs a measurement of a single qubit in the @@ -29,7 +28,6 @@ namespace Microsoft.Quantum.Intrinsic { /// ```qsharp /// Measure([PauliZ], [qubit]); /// ``` - @EnableTestingViaName("Test.TargetDefinitions.M") operation M (qubit : Qubit) : Result { body intrinsic; } diff --git a/src/Simulation/TargetDefinitions/Intrinsic/Measure.qs b/src/Simulation/TargetDefinitions/Intrinsic/Measure.qs index ada792ca5f4..c43d54d59fc 100644 --- a/src/Simulation/TargetDefinitions/Intrinsic/Measure.qs +++ b/src/Simulation/TargetDefinitions/Intrinsic/Measure.qs @@ -2,7 +2,6 @@ // Licensed under the MIT License. namespace Microsoft.Quantum.Intrinsic { - open Microsoft.Quantum.Diagnostics; /// # Summary /// Performs a joint measurement of one or more qubits in the @@ -39,7 +38,6 @@ namespace Microsoft.Quantum.Intrinsic { /// # Remarks /// If the basis array and qubit array are different lengths, then the /// operation will fail. - @EnableTestingViaName("Test.TargetDefinitions.Measure") operation Measure (bases : Pauli[], qubits : Qubit[]) : Result { body intrinsic; } diff --git a/src/Simulation/TargetDefinitions/Intrinsic/R.qs b/src/Simulation/TargetDefinitions/Intrinsic/R.qs index 8deee174ebb..7ea77eaba43 100644 --- a/src/Simulation/TargetDefinitions/Intrinsic/R.qs +++ b/src/Simulation/TargetDefinitions/Intrinsic/R.qs @@ -2,7 +2,6 @@ // Licensed under the MIT License. namespace Microsoft.Quantum.Intrinsic { - open Microsoft.Quantum.Diagnostics; /// # Summary /// Applies a rotation about the given Pauli axis. @@ -26,7 +25,6 @@ namespace Microsoft.Quantum.Intrinsic { /// When called with `pauli = PauliI`, this operation applies /// a *global phase*. This phase can be significant /// when used with the `Controlled` functor. - @EnableTestingViaName("Test.TargetDefinitions.R") operation R (pauli : Pauli, theta : Double, qubit : Qubit) : Unit is Adj + Ctl { body intrinsic; } diff --git a/src/Simulation/TargetDefinitions/Intrinsic/Reset.qs b/src/Simulation/TargetDefinitions/Intrinsic/Reset.qs index a8b16345b5e..86a78b96f00 100644 --- a/src/Simulation/TargetDefinitions/Intrinsic/Reset.qs +++ b/src/Simulation/TargetDefinitions/Intrinsic/Reset.qs @@ -2,7 +2,6 @@ // Licensed under the MIT License. namespace Microsoft.Quantum.Intrinsic { - open Microsoft.Quantum.Diagnostics; /// # Summary /// Given a single qubit, measures it and ensures it is in the |0⟩ state @@ -11,7 +10,6 @@ namespace Microsoft.Quantum.Intrinsic { /// # Input /// ## qubit /// The qubit whose state is to be reset to $\ket{0}$. - @EnableTestingViaName("Test.TargetDefinitions.Reset") operation Reset (qubit : Qubit) : Unit { body intrinsic; } diff --git a/src/Simulation/TargetDefinitions/Intrinsic/Rx.qs b/src/Simulation/TargetDefinitions/Intrinsic/Rx.qs index 20aa42633b7..62aa42f07b4 100644 --- a/src/Simulation/TargetDefinitions/Intrinsic/Rx.qs +++ b/src/Simulation/TargetDefinitions/Intrinsic/Rx.qs @@ -2,7 +2,6 @@ // Licensed under the MIT License. namespace Microsoft.Quantum.Intrinsic { - open Microsoft.Quantum.Diagnostics; /// # Summary /// Applies a rotation about the $x$-axis by a given angle. @@ -28,7 +27,6 @@ namespace Microsoft.Quantum.Intrinsic { /// ```qsharp /// R(PauliX, theta, qubit); /// ``` - @EnableTestingViaName("Test.TargetDefinitions.Rx") operation Rx (theta : Double, qubit : Qubit) : Unit is Adj + Ctl { body intrinsic; } diff --git a/src/Simulation/TargetDefinitions/Intrinsic/Ry.qs b/src/Simulation/TargetDefinitions/Intrinsic/Ry.qs index e8df5d1fe88..3308cd281db 100644 --- a/src/Simulation/TargetDefinitions/Intrinsic/Ry.qs +++ b/src/Simulation/TargetDefinitions/Intrinsic/Ry.qs @@ -2,7 +2,6 @@ // Licensed under the MIT License. namespace Microsoft.Quantum.Intrinsic { - open Microsoft.Quantum.Diagnostics; /// # Summary /// Applies a rotation about the $y$-axis by a given angle. @@ -28,7 +27,6 @@ namespace Microsoft.Quantum.Intrinsic { /// ```qsharp /// R(PauliY, theta, qubit); /// ``` - @EnableTestingViaName("Test.TargetDefinitions.Ry") operation Ry (theta : Double, qubit : Qubit) : Unit is Adj + Ctl { body intrinsic; } diff --git a/src/Simulation/TargetDefinitions/Intrinsic/Rz.qs b/src/Simulation/TargetDefinitions/Intrinsic/Rz.qs index ca7c2d4d74c..1d645c3aa44 100644 --- a/src/Simulation/TargetDefinitions/Intrinsic/Rz.qs +++ b/src/Simulation/TargetDefinitions/Intrinsic/Rz.qs @@ -2,7 +2,6 @@ // Licensed under the MIT License. namespace Microsoft.Quantum.Intrinsic { - open Microsoft.Quantum.Diagnostics; /// # Summary /// Applies a rotation about the $z$-axis by a given angle. @@ -28,7 +27,6 @@ namespace Microsoft.Quantum.Intrinsic { /// ```qsharp /// R(PauliZ, theta, qubit); /// ``` - @EnableTestingViaName("Test.TargetDefinitions.Rz") operation Rz (theta : Double, qubit : Qubit) : Unit is Adj + Ctl { body intrinsic; } diff --git a/src/Simulation/TargetDefinitions/Intrinsic/S.qs b/src/Simulation/TargetDefinitions/Intrinsic/S.qs index 816c8771b31..f934bf73ef5 100644 --- a/src/Simulation/TargetDefinitions/Intrinsic/S.qs +++ b/src/Simulation/TargetDefinitions/Intrinsic/S.qs @@ -2,7 +2,6 @@ // Licensed under the MIT License. namespace Microsoft.Quantum.Intrinsic { - open Microsoft.Quantum.Diagnostics; /// # Summary /// Applies the π/4 phase gate to a single qubit. @@ -19,7 +18,6 @@ namespace Microsoft.Quantum.Intrinsic { /// # Input /// ## qubit /// Qubit to which the gate should be applied. - @EnableTestingViaName("Test.TargetDefinitions.S") operation S (qubit : Qubit) : Unit is Adj + Ctl { body intrinsic; } diff --git a/src/Simulation/TargetDefinitions/Intrinsic/SWAP.qs b/src/Simulation/TargetDefinitions/Intrinsic/SWAP.qs index 9b9c80138ed..fc5ecff69f3 100644 --- a/src/Simulation/TargetDefinitions/Intrinsic/SWAP.qs +++ b/src/Simulation/TargetDefinitions/Intrinsic/SWAP.qs @@ -2,7 +2,6 @@ // Licensed under the MIT License. namespace Microsoft.Quantum.Intrinsic { - open Microsoft.Quantum.Diagnostics; /// # Summary /// Applies the SWAP gate to a pair of qubits. @@ -33,7 +32,6 @@ namespace Microsoft.Quantum.Intrinsic { /// CNOT(qubit2, qubit1); /// CNOT(qubit1, qubit2); /// ``` - @EnableTestingViaName("Test.TargetDefinitions.SWAP") operation SWAP (qubit1 : Qubit, qubit2 : Qubit) : Unit is Adj + Ctl { body intrinsic; adjoint self; diff --git a/src/Simulation/TargetDefinitions/Intrinsic/T.qs b/src/Simulation/TargetDefinitions/Intrinsic/T.qs index dc3affe449a..0f3dac1d95e 100644 --- a/src/Simulation/TargetDefinitions/Intrinsic/T.qs +++ b/src/Simulation/TargetDefinitions/Intrinsic/T.qs @@ -19,7 +19,6 @@ namespace Microsoft.Quantum.Intrinsic { /// # Input /// ## qubit /// Qubit to which the gate should be applied. - @EnableTestingViaName("Test.TargetDefinitions.T") operation T (qubit : Qubit) : Unit is Adj + Ctl { body intrinsic; } diff --git a/src/Simulation/TargetDefinitions/Intrinsic/X.qs b/src/Simulation/TargetDefinitions/Intrinsic/X.qs index 8b5f8feb62b..5a046afb5eb 100644 --- a/src/Simulation/TargetDefinitions/Intrinsic/X.qs +++ b/src/Simulation/TargetDefinitions/Intrinsic/X.qs @@ -2,7 +2,6 @@ // Licensed under the MIT License. namespace Microsoft.Quantum.Intrinsic { - open Microsoft.Quantum.Diagnostics; /// # Summary /// Applies the Pauli $X$ gate. @@ -19,7 +18,6 @@ namespace Microsoft.Quantum.Intrinsic { /// # Input /// ## qubit /// Qubit to which the gate should be applied. - @EnableTestingViaName("Test.TargetDefinitions.X") operation X (qubit : Qubit) : Unit is Adj + Ctl { body intrinsic; adjoint self; diff --git a/src/Simulation/TargetDefinitions/Intrinsic/Y.qs b/src/Simulation/TargetDefinitions/Intrinsic/Y.qs index 92b28478843..722391a6f45 100644 --- a/src/Simulation/TargetDefinitions/Intrinsic/Y.qs +++ b/src/Simulation/TargetDefinitions/Intrinsic/Y.qs @@ -2,7 +2,6 @@ // Licensed under the MIT License. namespace Microsoft.Quantum.Intrinsic { - open Microsoft.Quantum.Diagnostics; /// # Summary /// Applies the Pauli $Y$ gate. @@ -19,7 +18,6 @@ namespace Microsoft.Quantum.Intrinsic { /// # Input /// ## qubit /// Qubit to which the gate should be applied. - @EnableTestingViaName("Test.TargetDefinitions.Y") operation Y (qubit : Qubit) : Unit is Adj + Ctl { body intrinsic; adjoint self; diff --git a/src/Simulation/TargetDefinitions/Intrinsic/Z.qs b/src/Simulation/TargetDefinitions/Intrinsic/Z.qs index a05f34c60c8..2d2bd99df61 100644 --- a/src/Simulation/TargetDefinitions/Intrinsic/Z.qs +++ b/src/Simulation/TargetDefinitions/Intrinsic/Z.qs @@ -2,7 +2,6 @@ // Licensed under the MIT License. namespace Microsoft.Quantum.Intrinsic { - open Microsoft.Quantum.Diagnostics; /// # Summary /// Applies the Pauli $Z$ gate. @@ -19,7 +18,6 @@ namespace Microsoft.Quantum.Intrinsic { /// # Input /// ## qubit /// Qubit to which the gate should be applied. - @EnableTestingViaName("Test.TargetDefinitions.Z") operation Z (qubit : Qubit) : Unit is Adj + Ctl { body intrinsic; adjoint self; diff --git a/src/Simulation/TargetDefinitions/TargetPackages/Type3.Package.props b/src/Simulation/TargetDefinitions/TargetPackages/Type3.Package.props index ef68d7d949b..8babd91d65c 100644 --- a/src/Simulation/TargetDefinitions/TargetPackages/Type3.Package.props +++ b/src/Simulation/TargetDefinitions/TargetPackages/Type3.Package.props @@ -30,9 +30,6 @@ - - - diff --git a/src/Simulation/Type3Core/Microsoft.Quantum.Type3.Core.csproj b/src/Simulation/Type3Core/Microsoft.Quantum.Type3.Core.csproj index fb0d25c2e8e..37e8013917b 100644 --- a/src/Simulation/Type3Core/Microsoft.Quantum.Type3.Core.csproj +++ b/src/Simulation/Type3Core/Microsoft.Quantum.Type3.Core.csproj @@ -1,4 +1,4 @@ - + From 0c364696515dd187ead1f718d35f3cb231da13ba Mon Sep 17 00:00:00 2001 From: Irina Yatsenko <36858951+irinayat-MS@users.noreply.github.com> Date: Fri, 12 Feb 2021 14:03:25 -0800 Subject: [PATCH 05/30] Generate ll file for QIR-static tests from Q# project (#517) We are still checking-in the generated file so the CI pipeline isn't affected. Moving to completely automatic QIR generation will be the next step. --- .gitignore | 1 + src/QirRuntime/README.md | 36 +- src/QirRuntime/build.py | 12 +- src/QirRuntime/generateqir.py | 53 + src/QirRuntime/lib/QIR/bridge-rt.ll | 2 +- src/QirRuntime/test.py | 13 +- src/QirRuntime/test/QIR-static/CMakeLists.txt | 6 +- .../test/QIR-static/compiler/Constants.qs | 37 - .../test/QIR-static/compiler/QirCore.qs | 30 - .../test/QIR-static/compiler/QirTarget.qs | 104 -- src/QirRuntime/test/QIR-static/generate.py | 45 - src/QirRuntime/test/QIR-static/qir-driver.cpp | 9 +- .../{qir-test-qsharp.ll => qir-gen.ll} | 1348 ++++++++++------- .../test/QIR-static/qir-test-functors.qs | 84 - src/QirRuntime/test/QIR-static/qsharp/Math.qs | 31 + .../test/QIR-static/qsharp/qir-gen.csproj | 9 + .../{ => qsharp}/qir-test-arrays.qs | 16 +- .../QIR-static/qsharp/qir-test-functors.qs | 66 + .../QIR-static/{ => qsharp}/qir-test-math.qs | 0 .../{ => qsharp}/qir-test-partials.qs | 9 +- .../{ => qsharp}/qir-test-qubits-results.qs | 16 +- .../{ => qsharp}/qir-test-strings.qs | 0 22 files changed, 1033 insertions(+), 894 deletions(-) create mode 100644 src/QirRuntime/generateqir.py delete mode 100644 src/QirRuntime/test/QIR-static/compiler/Constants.qs delete mode 100644 src/QirRuntime/test/QIR-static/compiler/QirCore.qs delete mode 100644 src/QirRuntime/test/QIR-static/compiler/QirTarget.qs delete mode 100644 src/QirRuntime/test/QIR-static/generate.py rename src/QirRuntime/test/QIR-static/{qir-test-qsharp.ll => qir-gen.ll} (53%) delete mode 100644 src/QirRuntime/test/QIR-static/qir-test-functors.qs create mode 100644 src/QirRuntime/test/QIR-static/qsharp/Math.qs create mode 100644 src/QirRuntime/test/QIR-static/qsharp/qir-gen.csproj rename src/QirRuntime/test/QIR-static/{ => qsharp}/qir-test-arrays.qs (86%) create mode 100644 src/QirRuntime/test/QIR-static/qsharp/qir-test-functors.qs rename src/QirRuntime/test/QIR-static/{ => qsharp}/qir-test-math.qs (100%) rename src/QirRuntime/test/QIR-static/{ => qsharp}/qir-test-partials.qs (55%) rename src/QirRuntime/test/QIR-static/{ => qsharp}/qir-test-qubits-results.qs (60%) rename src/QirRuntime/test/QIR-static/{ => qsharp}/qir-test-strings.qs (100%) diff --git a/.gitignore b/.gitignore index 8ed0e07dbcc..7ada7136a29 100644 --- a/.gitignore +++ b/.gitignore @@ -24,6 +24,7 @@ bld/ [Oo]bj/ [Ll]og/ [Dd]rops/ +**/qir/qir-gen.ll # Visual Studio 2015/2017 cache/options directory .vs/ diff --git a/src/QirRuntime/README.md b/src/QirRuntime/README.md index 856bdbf0ce0..fbdd3a525c9 100644 --- a/src/QirRuntime/README.md +++ b/src/QirRuntime/README.md @@ -24,8 +24,8 @@ You can use CMake directly. For example, to produce a release build: Or you can run `build.py` script from QirRuntime folder. The default options for the script are `make debug`. -- (Windows) `python build.py [make/nomake] [debug|release]` -- (Linux) `python3 build.py [make/nomake] [debug|release]` +- (Windows) `python build.py [make/nomake] [debug|release] [noqirgen]` +- (Linux) `python3 build.py [make/nomake] [debug|release] [noqirgen]` The script will place the build artifacts into `build/[Windows|Linux]/[Debug|Release]` folder. We strongly recommend doing local builds using the build script because it also runs clang-tidy. @@ -36,26 +36,29 @@ CI builds and tests are enabled for this project. The build has no external depe ### Windows pre-reqs 1. Install Clang, Ninja and CMake from the public distros. -2. Add all three to your/system `%PATH%`. -3. Install VS 2019 and enable "Desktop development with C++" component (Clang uses MSVC's standard library on Windows). -4. Install clang-tidy and clang-format if your Clang/LLVM packages didn't include the tools. -5. <_optional_> To use build/test scripts install Python 3.8. +1. Add all three to your/system `%PATH%`. +1. Install VS 2019 and enable "Desktop development with C++" component (Clang uses MSVC's standard library on Windows). +1. Install clang-tidy and clang-format if your Clang/LLVM packages didn't include the tools. +1. Install the same version of dotnet as specified by qsharp-runtime [README](../../README.md) +1. <_optional_> To use build/test scripts install Python 3.8. *Building from Visual Studio and VS Code is **not** supported. Running cmake from the editors will likely default to MSVC or clang-cl and fail.* ### Linux via WSL pre-reqs -1. On the host Windows machine [enable WSL](https://docs.microsoft.com/en-us/windows/wsl/install-win10) and install Ubuntu 20.04 LTS. -2. In the Ubuntu's terminal: +1. On the host Windows machine [enable WSL](https://docs.microsoft.com/en-us/windows/wsl/install-win10) and install + Ubuntu 20.04 LTS. +1. In the Ubuntu's terminal: 1. `$ sudo apt install cmake` (`$ cmake --version` should return 3.16.3) - 2. `$ sudo apt-get install ninja-build` (`$ ninja --version` should return 1.10.0) - 3. `$ sudo apt install clang` (`$ clang++ --version` should return 10.0.0) - 4. Set Clang as the preferred C/C++ compiler: + 1. `$ sudo apt-get install ninja-build` (`$ ninja --version` should return 1.10.0) + 1. `$ sudo apt install clang` (`$ clang++ --version` should return 10.0.0) + 1. Set Clang as the preferred C/C++ compiler: - $ export CC=/usr/bin/clang - $ export CXX=/usr/bin/clang++ - 5. `$ sudo apt install clang-tidy` (`$ clang-tidy --version` should return 'LLVM version 10.0.0') - 6. <_optional_> To use build/test scripts, check that you have python3 installed (it should be by default). + 1. `$ sudo apt install clang-tidy` (`$ clang-tidy --version` should return 'LLVM version 10.0.0') + 1. Install the same version of dotnet as specified by qsharp-runtime [README](../../README.md) + 1. <_optional_> To use build/test scripts, check that you have python3 installed (it should be by default). See [https://code.visualstudio.com/docs/remote/wsl] on how to use VS Code with WSL. @@ -65,6 +68,11 @@ Some of the tests depend on Microsoft.Quantum.Simulator.Runtime library. To run from this repository or provide your own version of the library in a folder the OS would search during dynamic library lookup. +Some of the tests use generated QIR (*.ll) files as build input. Currently the files are checked-in as part of the project + but in the future they will be replaced by automatic generation during build. To regenerate the files, run generateqir.py + or build/test scripts without specifying `noqirgen`. To use the checked-in files without regenerating them, run build/test + scripts with `noqirgen` argument. + ### Running tests with test.py To execute all tests locally run `test.py` from the project's root folder: @@ -162,8 +170,6 @@ CMake doesn't support using LLVM's IR files as input so instead we invoke Clang 1. All functionality related to BigInt type (including `__quantum__rt__bigint_to_string`) NYI. 1. QIR is assumed to be __single threaded__. No effort was made to make the bridge and runtime thread safe. 1. Strings are implemented as a thin wrapper over std::string with virtually no optimizations. -1. `__quantum__rt__string_create` currently doesn't conform to the spec (it expects a null terminated string rather than - a string of specified length). 1. Variadic functions (e.g. `__quantum__rt__array_create`) require platform specific bridges. The currently implemented bridge is for Windows. 1. Qubit borrowing NYI (needs both bridge and simulator's support). diff --git a/src/QirRuntime/build.py b/src/QirRuntime/build.py index f4c52a32483..ac20d6ec86c 100644 --- a/src/QirRuntime/build.py +++ b/src/QirRuntime/build.py @@ -1,7 +1,8 @@ # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. -import sys, os, platform, subprocess, datetime +import sys, os, platform, subprocess, datetime, shutil +import generateqir # ============================================================================= # The script will create [root]\build\[OS]\[Debug|Release] folder for the output. @@ -67,6 +68,7 @@ def do_build(root_dir, should_make, should_build, flavor): flavor = "Debug" should_make = True should_build = True + noqirgen = False for arg in sys.argv: arg = arg.lower() @@ -80,9 +82,17 @@ def do_build(root_dir, should_make, should_build, flavor): should_make = False elif arg == "make": should_build = False + elif arg == "noqirgen": + noqirgen = True else: log("unrecognized argument: " + arg) sys.exit() root_dir = os.path.dirname(os.path.abspath(__file__)) + + if not noqirgen: + if generateqir.do_generate_all(root_dir) != 0: + log("Aborting build due to failures in QIR generation") + sys.exit() + do_build(root_dir, should_make, should_build, flavor) \ No newline at end of file diff --git a/src/QirRuntime/generateqir.py b/src/QirRuntime/generateqir.py new file mode 100644 index 00000000000..36f99bcbf77 --- /dev/null +++ b/src/QirRuntime/generateqir.py @@ -0,0 +1,53 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. + +import os, sys, subprocess, datetime, shutil, pathlib + +# ============================================================================= +def log(message): + now = datetime.datetime.now() + current_time = now.strftime("%H:%M:%S") + print(current_time + ": " + message) +# ============================================================================= + +# ============================================================================= +# The Q# project should be located in a subfolder under test_dir, named "qsharp" +def do_generate_qir(test_dir): + qsharp_project_path = os.path.join(test_dir, "qsharp") + qirgencmd = "dotnet build " + qsharp_project_path + log("running: " + qirgencmd) + result = subprocess.run(qirgencmd, shell = True) + if result.returncode != 0: + return result + + # really, expect to have only one file + for generated_qir_file in os.listdir(os.path.join(qsharp_project_path, "qir")): + shutil.copyfile( + os.path.join(os.path.join(qsharp_project_path, "qir", generated_qir_file)), + os.path.join(test_dir, generated_qir_file)) + + return result +# ============================================================================= + +# ============================================================================= +def do_generate_all(root_dir): + test_projects = [ + os.path.join(root_dir, "test", "QIR-static"), + # add other test folders here + ] + + for test_dir in test_projects: + log("generating QIR for: " + test_dir) + result = do_generate_qir(test_dir) + if result.returncode != 0: + log("Failed to generate QIR for: " + test_dir) + return result.returncode + + return 0 +# ============================================================================= + +if __name__ == '__main__': + root_dir = os.path.dirname(os.path.abspath(__file__)) + do_generate_all(root_dir) + + diff --git a/src/QirRuntime/lib/QIR/bridge-rt.ll b/src/QirRuntime/lib/QIR/bridge-rt.ll index 73a33d737a6..dba25ad0067 100644 --- a/src/QirRuntime/lib/QIR/bridge-rt.ll +++ b/src/QirRuntime/lib/QIR/bridge-rt.ll @@ -381,7 +381,7 @@ define void @__quantum__rt__callable_memory_management(i32 %index, %Callable* %. ; NYI: ;define %String* @__quantum__rt__bigint_to_string(%BigInt*) -define %String* @__quantum__rt__string_create(i32 %ignoredStrLength, i8* %null_terminated_buffer) { +define %String* @__quantum__rt__string_create(i32 %length_ignored, i8* %null_terminated_buffer) { %str = call %"struct.QirString"* @quantum__rt__string_create(i8* %null_terminated_buffer) %.str = bitcast %"struct.QirString"* %str to %String* ret %String* %.str diff --git a/src/QirRuntime/test.py b/src/QirRuntime/test.py index e69749d2b2f..7c3d3450806 100644 --- a/src/QirRuntime/test.py +++ b/src/QirRuntime/test.py @@ -2,11 +2,12 @@ # Licensed under the MIT License. import sys, os, platform, subprocess, datetime, shutil -import build +import build, generateqir # ============================================================================= # Accepts arguments: # nobuild [if omitted, will attempt to build the project] +# noqirgen [if omitted, will attempt to generate qir from Q# projects] # debug/release # # For example: "test.py nobuild debug" @@ -24,6 +25,7 @@ def log(message): # parameters flavor = "Debug" nobuild = False +noqirgen = False for arg in sys.argv: arg = arg.lower() if arg == "test.py": @@ -34,10 +36,19 @@ def log(message): flavor = "Release" elif arg == "nobuild": nobuild = True + noqirgen = True + elif arg == "noqirgen": + noqirgen = True else: log("unrecognized argument: " + arg) sys.exit() +if not noqirgen: + if generateqir.do_generate_all(root_dir) != 0: + log("build failed to generate QIR => won't execute the tests") + log("to execute the tests from the last successful build run `test.py nobuild`") + sys.exit() + if not nobuild: result = build.do_build(root_dir, True, True, flavor) # should_make, should_build if result.returncode != 0: diff --git a/src/QirRuntime/test/QIR-static/CMakeLists.txt b/src/QirRuntime/test/QIR-static/CMakeLists.txt index ae056c3453b..838e1f36f47 100644 --- a/src/QirRuntime/test/QIR-static/CMakeLists.txt +++ b/src/QirRuntime/test/QIR-static/CMakeLists.txt @@ -1,12 +1,8 @@ # compile test ll files into a library set(TEST_FILES -# qir-test-arrays -# qir-test-functors qir-test-noqsharp - qir-test-qsharp -# qir-test-partials -# qir-test-qubits-results + qir-gen ) foreach(file ${TEST_FILES}) diff --git a/src/QirRuntime/test/QIR-static/compiler/Constants.qs b/src/QirRuntime/test/QIR-static/compiler/Constants.qs deleted file mode 100644 index 1192459dbb2..00000000000 --- a/src/QirRuntime/test/QIR-static/compiler/Constants.qs +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -// these are all the static methods and const fields form System.Math class of .NET CLR -// that are not exposed as language operators and are relevant within type System. -// If there are two versions of the function for Int and Double types, the corresponding -// functions have suffix I or D. ExpD also has a suffix to avoid name clash with Primitives.Exp. - -namespace Microsoft.Quantum.Math { - - /// # Summary - /// Returns the natural logarithmic base to double-precision. - /// - /// # Output - /// A double-precision approximation of the natural logarithic base, - /// $e \approx 2.7182818284590452354$. - /// - /// # See Also - /// - Microsoft.Quantum.Math.PI - function E() : Double { - return 2.7182818284590452354; - } - - /// # Summary - /// Represents the ratio of the circumference of a circle to its diameter. - /// - /// # Ouptut - /// A double-precision approximation of the the circumference of a circle - /// to its diameter, $\pi \approx 3.14159265358979323846$. - /// - /// # See Also - /// - Microsoft.Quantum.Math.E - function PI() : Double { - return 3.14159265358979323846; - } - -} diff --git a/src/QirRuntime/test/QIR-static/compiler/QirCore.qs b/src/QirRuntime/test/QIR-static/compiler/QirCore.qs deleted file mode 100644 index e63359e95a9..00000000000 --- a/src/QirRuntime/test/QIR-static/compiler/QirCore.qs +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -namespace Microsoft.Quantum.Core{ - - @Attribute() - newtype Attribute = Unit; - - @Attribute() - newtype Inline = Unit; - - @Attribute() - newtype EntryPoint = Unit; - - function Length<'T> (array : 'T[]) : Int { body intrinsic; } - - function RangeStart (range : Range) : Int { body intrinsic; } - - function RangeStep (range : Range) : Int { body intrinsic; } - - function RangeEnd (range : Range) : Int { body intrinsic; } - - function RangeReverse (range : Range) : Range { body intrinsic; } -} - -namespace Microsoft.Quantum.Targeting { - - @Attribute() - newtype TargetInstruction = String; -} diff --git a/src/QirRuntime/test/QIR-static/compiler/QirTarget.qs b/src/QirRuntime/test/QIR-static/compiler/QirTarget.qs deleted file mode 100644 index c117bdd0667..00000000000 --- a/src/QirRuntime/test/QIR-static/compiler/QirTarget.qs +++ /dev/null @@ -1,104 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -namespace Microsoft.Quantum.Intrinsic { - - open Microsoft.Quantum.Targeting; - - @Inline() - function NAN() : Double { - body intrinsic; - } - - @Inline() - function IsNan(d: Double) : Bool { - body intrinsic; - } - - @Inline() - function INFINITY() : Double { - body intrinsic; - } - - @Inline() - function IsInf(d: Double) : Bool { - body intrinsic; - } - - @Inline() - function IsNegativeInfinity(d : Double) : Bool { - body intrinsic; - } - - @Inline() - function Sqrt(d : Double) : Double { - body intrinsic; - } - - @Inline() - function Log(d : Double) : Double { - body intrinsic; - } - - @Inline() - function ArcTan2(y : Double, x : Double) : Double { - body intrinsic; - } - - - operation X(qb : Qubit) : Unit - is Adj + Ctl { - body intrinsic; - adjoint self; - } - - operation Y(qb : Qubit) : Unit - is Adj + Ctl { - body intrinsic; - adjoint self; - } - - operation Z(qb : Qubit) : Unit - is Adj + Ctl { - body intrinsic; - adjoint self; - } - - operation H(qb : Qubit) : Unit - is Adj + Ctl { - body intrinsic; - adjoint self; - } - - operation S(qb : Qubit) : Unit - is Adj + Ctl { - body intrinsic; - } - - operation T(qb : Qubit) : Unit - is Adj + Ctl { - body intrinsic; - } - - operation R (pauli : Pauli, theta : Double, qubit : Qubit) : Unit - is Adj + Ctl - { - body intrinsic; - } - - operation Exp (paulis : Pauli[], theta : Double, qubits : Qubit[]) : Unit - is Adj + Ctl - { - body intrinsic; - } - - operation Measure(bases : Pauli[], qubits : Qubit[]) : Result { - body intrinsic; - } - - operation M(qb : Qubit) : Result { - body (...) { - return Measure([PauliZ], [qb]); - } - } -} diff --git a/src/QirRuntime/test/QIR-static/generate.py b/src/QirRuntime/test/QIR-static/generate.py deleted file mode 100644 index 99242f689b4..00000000000 --- a/src/QirRuntime/test/QIR-static/generate.py +++ /dev/null @@ -1,45 +0,0 @@ -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. - -import sys, os, platform, subprocess, datetime, shutil - -# ============================================================================= -# Generates QIR files for all *.qs files in this folder -# Accepts arguments: -# path to qsc.exe (absolute or rely on Path env) -# -# For example: "generate.py qsc.exe" -# ============================================================================= - -# ============================================================================= -def log(message): - now = datetime.datetime.now() - current_time = now.strftime("%H:%M:%S") - print(current_time + ": " + message) -# ============================================================================= - -root_dir = os.path.dirname(os.path.abspath(__file__)) - -# parameters -qsc = sys.argv[1] # argv[0] is the name of this script file - -# if no file name argument, process all qs files in this folder, otherwise treat all additional args as input files -files_to_process = "" -output_file = "qir-test-qsharp" -if len(sys.argv) == 2: - for file in os.listdir(root_dir): - (file_name, ext) = os.path.splitext(file) - if ext == ".qs": - files_to_process = files_to_process + " " + file -else: - for i in range(2, len(sys.argv)): - (file_name, ext) = os.path.splitext(sys.argv[i]) - files_to_process = files_to_process + " " + file_name + ".qs" - if i == 2 and len(sys.argv) == 3: - output_file = file_name - -command = (qsc + " build --qir s --build-exe --input " + files_to_process + - " compiler\\qircore.qs compiler\\qirtarget.qs compiler\\Constants.qs --proj " + output_file) -log("Executing: " + command) -subprocess.run(command, shell = True) - diff --git a/src/QirRuntime/test/QIR-static/qir-driver.cpp b/src/QirRuntime/test/QIR-static/qir-driver.cpp index c6059c93a8e..79f9cfdcca4 100644 --- a/src/QirRuntime/test/QIR-static/qir-driver.cpp +++ b/src/QirRuntime/test/QIR-static/qir-driver.cpp @@ -70,7 +70,7 @@ TEST_CASE("QIR: Using 1D arrays", "[qir][qir.arr1d]") REQUIRE(res == (0 + 42) + (42 + 3 + 4)); } -extern "C" bool Microsoft__Quantum__Testing__QIR__Test_Qubit_Result_Management__body(); // NOLINT +extern "C" void Microsoft__Quantum__Testing__QIR__TestQubitResultManagement__body(); // NOLINT struct QubitsResultsTestSimulator : public Microsoft::Quantum::SimulatorStub { // no intelligent reuse, we just want to check that QIR releases all qubits @@ -157,8 +157,7 @@ TEST_CASE("QIR: allocating and releasing qubits and results", "[qir][qir.qubit][ unique_ptr sim = make_unique(); QirContextScope qirctx(sim.get(), true /*trackAllocatedObjects*/); - int64_t res = Microsoft__Quantum__Testing__QIR__Test_Qubit_Result_Management__body(); - REQUIRE(res); + REQUIRE_NOTHROW(Microsoft__Quantum__Testing__QIR__TestQubitResultManagement__body()); // check that all qubits have been released for (size_t id = 0; id < sim->qubits.size(); id++) @@ -300,7 +299,7 @@ struct FunctorsTestSimulator : public Microsoft::Quantum::SimulatorStub } }; FunctorsTestSimulator* g_ctrqapi = nullptr; -extern "C" int64_t Microsoft__Quantum__Testing__QIR__TestControlled__body(); // NOLINT +extern "C" void Microsoft__Quantum__Testing__QIR__TestControlled__body(); // NOLINT extern "C" void __quantum__qis__k__body(Qubit q) // NOLINT { g_ctrqapi->X(q); @@ -315,7 +314,7 @@ TEST_CASE("QIR: application of nested controlled functor", "[qir][qir.functor]") QirContextScope qirctx(qapi.get(), true /*trackAllocatedObjects*/); g_ctrqapi = qapi.get(); - REQUIRE(0 == Microsoft__Quantum__Testing__QIR__TestControlled__body()); + CHECK_NOTHROW(Microsoft__Quantum__Testing__QIR__TestControlled__body()); g_ctrqapi = nullptr; } diff --git a/src/QirRuntime/test/QIR-static/qir-test-qsharp.ll b/src/QirRuntime/test/QIR-static/qir-gen.ll similarity index 53% rename from src/QirRuntime/test/QIR-static/qir-test-qsharp.ll rename to src/QirRuntime/test/QIR-static/qir-gen.ll index a619a2a6218..4a41d3df9e7 100644 --- a/src/QirRuntime/test/QIR-static/qir-test-qsharp.ll +++ b/src/QirRuntime/test/QIR-static/qir-gen.ll @@ -4,8 +4,8 @@ %Tuple = type opaque %Callable = type opaque %Qubit = type opaque -%Array = type opaque %String = type opaque +%Array = type opaque @ResultZero = external global %Result* @ResultOne = external global %Result* @@ -17,21 +17,29 @@ @Microsoft__Quantum__Testing__QIR__Qop = constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Testing__QIR__Qop__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Testing__QIR__Qop__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Testing__QIR__Qop__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Testing__QIR__Qop__ctladj__wrapper] @PartialApplication__1 = constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__1__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__1__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__1__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__1__ctladj__wrapper] @MemoryManagement__1 = constant [2 x void (%Tuple*, i64)*] [void (%Tuple*, i64)* @MemoryManagement__1__RefCount, void (%Tuple*, i64)* @MemoryManagement__1__AliasCount] +@0 = internal constant [14 x i8] c"error code: 1\00" +@1 = internal constant [14 x i8] c"error code: 2\00" +@2 = internal constant [14 x i8] c"error code: 3\00" +@3 = internal constant [14 x i8] c"error code: 2\00" +@4 = internal constant [14 x i8] c"error code: 5\00" +@5 = internal constant [14 x i8] c"error code: 6\00" +@6 = internal constant [14 x i8] c"error code: 7\00" +@7 = internal constant [30 x i8] c"Unexpected measurement result\00" @Microsoft__Quantum__Testing__QIR__Subtract = constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Testing__QIR__Subtract__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null] @PartialApplication__2 = constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__2__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null] @MemoryManagement__2 = constant [2 x void (%Tuple*, i64)*] [void (%Tuple*, i64)* @MemoryManagement__2__RefCount, void (%Tuple*, i64)* @MemoryManagement__2__AliasCount] -@0 = internal constant [20 x i8] c"Pauli value: PauliI\00" -@1 = internal constant [14 x i8] c"Pauli value: \00" -@2 = internal constant [7 x i8] c"PauliX\00" -@3 = internal constant [7 x i8] c"PauliY\00" -@4 = internal constant [7 x i8] c"PauliZ\00" +@8 = internal constant [20 x i8] c"Pauli value: PauliI\00" +@9 = internal constant [14 x i8] c"Pauli value: \00" +@10 = internal constant [7 x i8] c"PauliX\00" +@11 = internal constant [7 x i8] c"PauliY\00" +@12 = internal constant [7 x i8] c"PauliZ\00" -define i64 @Microsoft__Quantum__Testing__QIR__TestControlled__body() { +define void @Microsoft__Quantum__Testing__QIR__TestControlled__body() { entry: %0 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, i64 }* getelementptr ({ %Callable*, i64 }, { %Callable*, i64 }* null, i32 1) to i64)) %1 = bitcast %Tuple* %0 to { %Callable*, i64 }* - %2 = getelementptr { %Callable*, i64 }, { %Callable*, i64 }* %1, i64 0, i32 0 - %3 = getelementptr { %Callable*, i64 }, { %Callable*, i64 }* %1, i64 0, i32 1 + %2 = getelementptr inbounds { %Callable*, i64 }, { %Callable*, i64 }* %1, i32 0, i32 0 + %3 = getelementptr inbounds { %Callable*, i64 }, { %Callable*, i64 }* %1, i32 0, i32 1 %4 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Testing__QIR__Qop, [2 x void (%Tuple*, i64)*]* null, %Tuple* null) store %Callable* %4, %Callable** %2 store i64 1, i64* %3 @@ -59,257 +67,495 @@ entry: call void @__quantum__rt__callable_make_controlled(%Callable* %ctl_ctl_qop) call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %ctl_ctl_qop, i64 1) call void @__quantum__rt__callable_update_alias_count(%Callable* %ctl_ctl_qop, i64 1) - %error_code = alloca i64 - store i64 0, i64* %error_code %q1 = call %Qubit* @__quantum__rt__qubit_allocate() %q2 = call %Qubit* @__quantum__rt__qubit_allocate() %q3 = call %Qubit* @__quantum__rt__qubit_allocate() %5 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64)) %6 = bitcast %Tuple* %5 to { %Qubit* }* - %7 = getelementptr { %Qubit* }, { %Qubit* }* %6, i64 0, i32 0 + %7 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %6, i32 0, i32 0 store %Qubit* %q1, %Qubit** %7 call void @__quantum__rt__callable_invoke(%Callable* %qop, %Tuple* %5, %Tuple* null) %8 = call %Result* @Microsoft__Quantum__Intrinsic__M__body(%Qubit* %q1) %9 = load %Result*, %Result** @ResultOne %10 = call i1 @__quantum__rt__result_equal(%Result* %8, %Result* %9) %11 = xor i1 %10, true - br i1 %11, label %then0__1, label %else__1 + br i1 %11, label %then0__1, label %continue__1 then0__1: ; preds = %entry - store i64 1, i64* %error_code - br label %continue__1 + %12 = call %String* @__quantum__rt__string_create(i32 13, i8* getelementptr inbounds ([14 x i8], [14 x i8]* @0, i32 0, i32 0)) + call void @__quantum__rt__qubit_release(%Qubit* %q1) + call void @__quantum__rt__qubit_release(%Qubit* %q2) + call void @__quantum__rt__qubit_release(%Qubit* %q3) + call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %qop, i64 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %qop, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %adj_qop, i64 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %adj_qop, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %ctl_qop, i64 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %ctl_qop, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %adj_ctl_qop, i64 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %adj_ctl_qop, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %ctl_ctl_qop, i64 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %ctl_ctl_qop, i64 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %5, i64 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %8, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %qop, i64 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %qop, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %adj_qop, i64 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %adj_qop, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %ctl_qop, i64 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %ctl_qop, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %adj_ctl_qop, i64 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %adj_ctl_qop, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %ctl_ctl_qop, i64 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %ctl_ctl_qop, i64 -1) + call void @__quantum__rt__fail(%String* %12) + unreachable + +continue__1: ; preds = %entry + %13 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64)) + %14 = bitcast %Tuple* %13 to { %Qubit* }* + %15 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %14, i32 0, i32 0 + store %Qubit* %q2, %Qubit** %15 + call void @__quantum__rt__callable_invoke(%Callable* %adj_qop, %Tuple* %13, %Tuple* null) + %16 = call %Result* @Microsoft__Quantum__Intrinsic__M__body(%Qubit* %q2) + %17 = load %Result*, %Result** @ResultOne + %18 = call i1 @__quantum__rt__result_equal(%Result* %16, %Result* %17) + %19 = xor i1 %18, true + br i1 %19, label %then0__2, label %continue__2 + +then0__2: ; preds = %continue__1 + %20 = call %String* @__quantum__rt__string_create(i32 13, i8* getelementptr inbounds ([14 x i8], [14 x i8]* @1, i32 0, i32 0)) + call void @__quantum__rt__qubit_release(%Qubit* %q1) + call void @__quantum__rt__qubit_release(%Qubit* %q2) + call void @__quantum__rt__qubit_release(%Qubit* %q3) + call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %qop, i64 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %qop, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %adj_qop, i64 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %adj_qop, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %ctl_qop, i64 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %ctl_qop, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %adj_ctl_qop, i64 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %adj_ctl_qop, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %ctl_ctl_qop, i64 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %ctl_ctl_qop, i64 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %5, i64 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %8, i64 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %13, i64 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %16, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %qop, i64 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %qop, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %adj_qop, i64 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %adj_qop, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %ctl_qop, i64 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %ctl_qop, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %adj_ctl_qop, i64 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %adj_ctl_qop, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %ctl_ctl_qop, i64 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %ctl_ctl_qop, i64 -1) + call void @__quantum__rt__fail(%String* %20) + unreachable + +continue__2: ; preds = %continue__1 + %21 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %22 = bitcast %Tuple* %21 to { %Array*, %Qubit* }* + %23 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %22, i32 0, i32 0 + %24 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %22, i32 0, i32 1 + %25 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %26 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %25, i64 0) + %27 = bitcast i8* %26 to %Qubit** + store %Qubit* %q1, %Qubit** %27 + store %Array* %25, %Array** %23 + store %Qubit* %q3, %Qubit** %24 + call void @__quantum__rt__callable_invoke(%Callable* %ctl_qop, %Tuple* %21, %Tuple* null) + %28 = call %Result* @Microsoft__Quantum__Intrinsic__M__body(%Qubit* %q3) + %29 = load %Result*, %Result** @ResultOne + %30 = call i1 @__quantum__rt__result_equal(%Result* %28, %Result* %29) + %31 = xor i1 %30, true + br i1 %31, label %then0__3, label %continue__3 + +then0__3: ; preds = %continue__2 + %32 = call %String* @__quantum__rt__string_create(i32 13, i8* getelementptr inbounds ([14 x i8], [14 x i8]* @2, i32 0, i32 0)) + call void @__quantum__rt__qubit_release(%Qubit* %q1) + call void @__quantum__rt__qubit_release(%Qubit* %q2) + call void @__quantum__rt__qubit_release(%Qubit* %q3) + call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %qop, i64 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %qop, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %adj_qop, i64 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %adj_qop, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %ctl_qop, i64 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %ctl_qop, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %adj_ctl_qop, i64 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %adj_ctl_qop, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %ctl_ctl_qop, i64 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %ctl_ctl_qop, i64 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %5, i64 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %8, i64 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %13, i64 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %16, i64 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %25, i64 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %21, i64 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %28, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %qop, i64 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %qop, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %adj_qop, i64 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %adj_qop, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %ctl_qop, i64 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %ctl_qop, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %adj_ctl_qop, i64 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %adj_ctl_qop, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %ctl_ctl_qop, i64 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %ctl_ctl_qop, i64 -1) + call void @__quantum__rt__fail(%String* %32) + unreachable + +continue__3: ; preds = %continue__2 + %33 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %34 = bitcast %Tuple* %33 to { %Array*, %Qubit* }* + %35 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %34, i32 0, i32 0 + %36 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %34, i32 0, i32 1 + %37 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %38 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %37, i64 0) + %39 = bitcast i8* %38 to %Qubit** + store %Qubit* %q2, %Qubit** %39 + store %Array* %37, %Array** %35 + store %Qubit* %q3, %Qubit** %36 + call void @__quantum__rt__callable_invoke(%Callable* %adj_ctl_qop, %Tuple* %33, %Tuple* null) + %40 = call %Result* @Microsoft__Quantum__Intrinsic__M__body(%Qubit* %q3) + %41 = load %Result*, %Result** @ResultZero + %42 = call i1 @__quantum__rt__result_equal(%Result* %40, %Result* %41) + %43 = xor i1 %42, true + br i1 %43, label %then0__4, label %continue__4 + +then0__4: ; preds = %continue__3 + %44 = call %String* @__quantum__rt__string_create(i32 13, i8* getelementptr inbounds ([14 x i8], [14 x i8]* @3, i32 0, i32 0)) + call void @__quantum__rt__qubit_release(%Qubit* %q1) + call void @__quantum__rt__qubit_release(%Qubit* %q2) + call void @__quantum__rt__qubit_release(%Qubit* %q3) + call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %qop, i64 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %qop, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %adj_qop, i64 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %adj_qop, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %ctl_qop, i64 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %ctl_qop, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %adj_ctl_qop, i64 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %adj_ctl_qop, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %ctl_ctl_qop, i64 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %ctl_ctl_qop, i64 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %5, i64 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %8, i64 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %13, i64 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %16, i64 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %25, i64 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %21, i64 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %28, i64 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %37, i64 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %33, i64 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %40, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %qop, i64 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %qop, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %adj_qop, i64 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %adj_qop, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %ctl_qop, i64 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %ctl_qop, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %adj_ctl_qop, i64 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %adj_ctl_qop, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %ctl_ctl_qop, i64 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %ctl_ctl_qop, i64 -1) + call void @__quantum__rt__fail(%String* %44) + unreachable + +continue__4: ; preds = %continue__3 + %45 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %46 = bitcast %Tuple* %45 to { %Array*, { %Array*, %Qubit* }* }* + %47 = getelementptr inbounds { %Array*, { %Array*, %Qubit* }* }, { %Array*, { %Array*, %Qubit* }* }* %46, i32 0, i32 0 + %48 = getelementptr inbounds { %Array*, { %Array*, %Qubit* }* }, { %Array*, { %Array*, %Qubit* }* }* %46, i32 0, i32 1 + %49 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %50 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %49, i64 0) + %51 = bitcast i8* %50 to %Qubit** + store %Qubit* %q1, %Qubit** %51 + %52 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %53 = bitcast %Tuple* %52 to { %Array*, %Qubit* }* + %54 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %53, i32 0, i32 0 + %55 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %53, i32 0, i32 1 + %56 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %57 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %56, i64 0) + %58 = bitcast i8* %57 to %Qubit** + store %Qubit* %q2, %Qubit** %58 + store %Array* %56, %Array** %54 + store %Qubit* %q3, %Qubit** %55 + store %Array* %49, %Array** %47 + store { %Array*, %Qubit* }* %53, { %Array*, %Qubit* }** %48 + call void @__quantum__rt__callable_invoke(%Callable* %ctl_ctl_qop, %Tuple* %45, %Tuple* null) + %59 = call %Result* @Microsoft__Quantum__Intrinsic__M__body(%Qubit* %q3) + %60 = load %Result*, %Result** @ResultOne + %61 = call i1 @__quantum__rt__result_equal(%Result* %59, %Result* %60) + %62 = xor i1 %61, true + br i1 %62, label %then0__5, label %continue__5 -else__1: ; preds = %entry - %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64)) - %13 = bitcast %Tuple* %12 to { %Qubit* }* - %14 = getelementptr { %Qubit* }, { %Qubit* }* %13, i64 0, i32 0 - store %Qubit* %q2, %Qubit** %14 - call void @__quantum__rt__callable_invoke(%Callable* %adj_qop, %Tuple* %12, %Tuple* null) - %15 = call %Result* @Microsoft__Quantum__Intrinsic__M__body(%Qubit* %q2) - %16 = load %Result*, %Result** @ResultOne - %17 = call i1 @__quantum__rt__result_equal(%Result* %15, %Result* %16) - %18 = xor i1 %17, true - br i1 %18, label %then0__2, label %else__2 - -then0__2: ; preds = %else__1 - store i64 2, i64* %error_code - br label %continue__2 - -else__2: ; preds = %else__1 - %19 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %20 = bitcast %Tuple* %19 to { %Array*, %Qubit* }* - %21 = getelementptr { %Array*, %Qubit* }, { %Array*, %Qubit* }* %20, i64 0, i32 0 - %22 = getelementptr { %Array*, %Qubit* }, { %Array*, %Qubit* }* %20, i64 0, i32 1 - %23 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) - %24 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %23, i64 0) - %25 = bitcast i8* %24 to %Qubit** - store %Qubit* %q1, %Qubit** %25 - store %Array* %23, %Array** %21 - store %Qubit* %q3, %Qubit** %22 - call void @__quantum__rt__callable_invoke(%Callable* %ctl_qop, %Tuple* %19, %Tuple* null) - %26 = call %Result* @Microsoft__Quantum__Intrinsic__M__body(%Qubit* %q3) - %27 = load %Result*, %Result** @ResultOne - %28 = call i1 @__quantum__rt__result_equal(%Result* %26, %Result* %27) - %29 = xor i1 %28, true - br i1 %29, label %then0__3, label %else__3 - -then0__3: ; preds = %else__2 - store i64 3, i64* %error_code - br label %continue__3 - -else__3: ; preds = %else__2 - %30 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %31 = bitcast %Tuple* %30 to { %Array*, %Qubit* }* - %32 = getelementptr { %Array*, %Qubit* }, { %Array*, %Qubit* }* %31, i64 0, i32 0 - %33 = getelementptr { %Array*, %Qubit* }, { %Array*, %Qubit* }* %31, i64 0, i32 1 - %34 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) - %35 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %34, i64 0) - %36 = bitcast i8* %35 to %Qubit** - store %Qubit* %q2, %Qubit** %36 - store %Array* %34, %Array** %32 - store %Qubit* %q3, %Qubit** %33 - call void @__quantum__rt__callable_invoke(%Callable* %adj_ctl_qop, %Tuple* %30, %Tuple* null) - %37 = call %Result* @Microsoft__Quantum__Intrinsic__M__body(%Qubit* %q3) - %38 = load %Result*, %Result** @ResultZero - %39 = call i1 @__quantum__rt__result_equal(%Result* %37, %Result* %38) - %40 = xor i1 %39, true - br i1 %40, label %then0__4, label %else__4 - -then0__4: ; preds = %else__3 - store i64 4, i64* %error_code - br label %continue__4 - -else__4: ; preds = %else__3 - %41 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %42 = bitcast %Tuple* %41 to { %Array*, { %Array*, %Qubit* }* }* - %43 = getelementptr { %Array*, { %Array*, %Qubit* }* }, { %Array*, { %Array*, %Qubit* }* }* %42, i64 0, i32 0 - %44 = getelementptr { %Array*, { %Array*, %Qubit* }* }, { %Array*, { %Array*, %Qubit* }* }* %42, i64 0, i32 1 - %45 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) - %46 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %45, i64 0) - %47 = bitcast i8* %46 to %Qubit** - store %Qubit* %q1, %Qubit** %47 - %48 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %49 = bitcast %Tuple* %48 to { %Array*, %Qubit* }* - %50 = getelementptr { %Array*, %Qubit* }, { %Array*, %Qubit* }* %49, i64 0, i32 0 - %51 = getelementptr { %Array*, %Qubit* }, { %Array*, %Qubit* }* %49, i64 0, i32 1 - %52 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) - %53 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %52, i64 0) - %54 = bitcast i8* %53 to %Qubit** - store %Qubit* %q2, %Qubit** %54 - store %Array* %52, %Array** %50 - store %Qubit* %q3, %Qubit** %51 - store %Array* %45, %Array** %43 - store { %Array*, %Qubit* }* %49, { %Array*, %Qubit* }** %44 - call void @__quantum__rt__callable_invoke(%Callable* %ctl_ctl_qop, %Tuple* %41, %Tuple* null) - %55 = call %Result* @Microsoft__Quantum__Intrinsic__M__body(%Qubit* %q3) - %56 = load %Result*, %Result** @ResultOne - %57 = call i1 @__quantum__rt__result_equal(%Result* %55, %Result* %56) - %58 = xor i1 %57, true - br i1 %58, label %then0__5, label %else__5 - -then0__5: ; preds = %else__4 - store i64 5, i64* %error_code - br label %continue__5 - -else__5: ; preds = %else__4 - %59 = call %Callable* @__quantum__rt__callable_copy(%Callable* %qop, i1 false) - call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %59, i64 1) - call void @__quantum__rt__callable_make_controlled(%Callable* %59) - %60 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %61 = bitcast %Tuple* %60 to { %Array*, %Qubit* }* - %62 = getelementptr { %Array*, %Qubit* }, { %Array*, %Qubit* }* %61, i64 0, i32 0 - %63 = getelementptr { %Array*, %Qubit* }, { %Array*, %Qubit* }* %61, i64 0, i32 1 - %64 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 2) - %65 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %64, i64 0) - %66 = bitcast i8* %65 to %Qubit** - %67 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %64, i64 1) - %68 = bitcast i8* %67 to %Qubit** - store %Qubit* %q1, %Qubit** %66 - store %Qubit* %q2, %Qubit** %68 - store %Array* %64, %Array** %62 - store %Qubit* %q3, %Qubit** %63 - call void @__quantum__rt__callable_invoke(%Callable* %59, %Tuple* %60, %Tuple* null) - %69 = call %Result* @Microsoft__Quantum__Intrinsic__M__body(%Qubit* %q3) - %70 = load %Result*, %Result** @ResultZero - %71 = call i1 @__quantum__rt__result_equal(%Result* %69, %Result* %70) - %72 = xor i1 %71, true - br i1 %72, label %then0__6, label %else__6 - -then0__6: ; preds = %else__5 - store i64 6, i64* %error_code - br label %continue__6 - -else__6: ; preds = %else__5 +then0__5: ; preds = %continue__4 + %63 = call %String* @__quantum__rt__string_create(i32 13, i8* getelementptr inbounds ([14 x i8], [14 x i8]* @4, i32 0, i32 0)) + call void @__quantum__rt__qubit_release(%Qubit* %q1) + call void @__quantum__rt__qubit_release(%Qubit* %q2) + call void @__quantum__rt__qubit_release(%Qubit* %q3) + call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %qop, i64 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %qop, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %adj_qop, i64 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %adj_qop, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %ctl_qop, i64 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %ctl_qop, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %adj_ctl_qop, i64 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %adj_ctl_qop, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %ctl_ctl_qop, i64 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %ctl_ctl_qop, i64 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %5, i64 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %8, i64 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %13, i64 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %16, i64 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %25, i64 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %21, i64 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %28, i64 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %37, i64 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %33, i64 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %40, i64 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %49, i64 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %56, i64 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %52, i64 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %45, i64 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %59, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %qop, i64 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %qop, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %adj_qop, i64 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %adj_qop, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %ctl_qop, i64 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %ctl_qop, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %adj_ctl_qop, i64 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %adj_ctl_qop, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %ctl_ctl_qop, i64 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %ctl_ctl_qop, i64 -1) + call void @__quantum__rt__fail(%String* %63) + unreachable + +continue__5: ; preds = %continue__4 + %64 = call %Callable* @__quantum__rt__callable_copy(%Callable* %qop, i1 false) + call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %64, i64 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %64) + %65 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %66 = bitcast %Tuple* %65 to { %Array*, %Qubit* }* + %67 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %66, i32 0, i32 0 + %68 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %66, i32 0, i32 1 + %69 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 2) + %70 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %69, i64 0) + %71 = bitcast i8* %70 to %Qubit** + %72 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %69, i64 1) + %73 = bitcast i8* %72 to %Qubit** + store %Qubit* %q1, %Qubit** %71 + store %Qubit* %q2, %Qubit** %73 + store %Array* %69, %Array** %67 + store %Qubit* %q3, %Qubit** %68 + call void @__quantum__rt__callable_invoke(%Callable* %64, %Tuple* %65, %Tuple* null) + %74 = call %Result* @Microsoft__Quantum__Intrinsic__M__body(%Qubit* %q3) + %75 = load %Result*, %Result** @ResultZero + %76 = call i1 @__quantum__rt__result_equal(%Result* %74, %Result* %75) + %77 = xor i1 %76, true + br i1 %77, label %then0__6, label %continue__6 + +then0__6: ; preds = %continue__5 + %78 = call %String* @__quantum__rt__string_create(i32 13, i8* getelementptr inbounds ([14 x i8], [14 x i8]* @5, i32 0, i32 0)) + call void @__quantum__rt__qubit_release(%Qubit* %q1) + call void @__quantum__rt__qubit_release(%Qubit* %q2) + call void @__quantum__rt__qubit_release(%Qubit* %q3) + call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %qop, i64 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %qop, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %adj_qop, i64 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %adj_qop, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %ctl_qop, i64 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %ctl_qop, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %adj_ctl_qop, i64 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %adj_ctl_qop, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %ctl_ctl_qop, i64 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %ctl_ctl_qop, i64 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %5, i64 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %8, i64 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %13, i64 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %16, i64 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %25, i64 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %21, i64 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %28, i64 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %37, i64 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %33, i64 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %40, i64 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %49, i64 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %56, i64 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %52, i64 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %45, i64 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %59, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %64, i64 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %64, i64 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %69, i64 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %65, i64 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %74, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %qop, i64 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %qop, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %adj_qop, i64 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %adj_qop, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %ctl_qop, i64 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %ctl_qop, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %adj_ctl_qop, i64 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %adj_ctl_qop, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %ctl_ctl_qop, i64 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %ctl_ctl_qop, i64 -1) + call void @__quantum__rt__fail(%String* %78) + unreachable + +continue__6: ; preds = %continue__5 %q4 = call %Qubit* @__quantum__rt__qubit_allocate() - %73 = call %Callable* @__quantum__rt__callable_copy(%Callable* %qop, i1 false) - call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %73, i64 1) - call void @__quantum__rt__callable_make_adjoint(%Callable* %73) - %74 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64)) - %75 = bitcast %Tuple* %74 to { %Qubit* }* - %76 = getelementptr { %Qubit* }, { %Qubit* }* %75, i64 0, i32 0 - store %Qubit* %q3, %Qubit** %76 - call void @__quantum__rt__callable_invoke(%Callable* %73, %Tuple* %74, %Tuple* null) - %77 = call %Callable* @__quantum__rt__callable_copy(%Callable* %ctl_ctl_qop, i1 false) - call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %77, i64 1) - call void @__quantum__rt__callable_make_controlled(%Callable* %77) - call void @__quantum__rt__callable_make_adjoint(%Callable* %77) - %78 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %79 = bitcast %Tuple* %78 to { %Array*, { %Array*, { %Array*, %Qubit* }* }* }* - %80 = getelementptr { %Array*, { %Array*, { %Array*, %Qubit* }* }* }, { %Array*, { %Array*, { %Array*, %Qubit* }* }* }* %79, i64 0, i32 0 - %81 = getelementptr { %Array*, { %Array*, { %Array*, %Qubit* }* }* }, { %Array*, { %Array*, { %Array*, %Qubit* }* }* }* %79, i64 0, i32 1 - %82 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) - %83 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %82, i64 0) - %84 = bitcast i8* %83 to %Qubit** - store %Qubit* %q1, %Qubit** %84 - %85 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %86 = bitcast %Tuple* %85 to { %Array*, { %Array*, %Qubit* }* }* - %87 = getelementptr { %Array*, { %Array*, %Qubit* }* }, { %Array*, { %Array*, %Qubit* }* }* %86, i64 0, i32 0 - %88 = getelementptr { %Array*, { %Array*, %Qubit* }* }, { %Array*, { %Array*, %Qubit* }* }* %86, i64 0, i32 1 - %89 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) - %90 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %89, i64 0) - %91 = bitcast i8* %90 to %Qubit** - store %Qubit* %q2, %Qubit** %91 - %92 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %93 = bitcast %Tuple* %92 to { %Array*, %Qubit* }* - %94 = getelementptr { %Array*, %Qubit* }, { %Array*, %Qubit* }* %93, i64 0, i32 0 - %95 = getelementptr { %Array*, %Qubit* }, { %Array*, %Qubit* }* %93, i64 0, i32 1 - %96 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) - %97 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %96, i64 0) - %98 = bitcast i8* %97 to %Qubit** - store %Qubit* %q3, %Qubit** %98 - store %Array* %96, %Array** %94 - store %Qubit* %q4, %Qubit** %95 - store %Array* %89, %Array** %87 - store { %Array*, %Qubit* }* %93, { %Array*, %Qubit* }** %88 - store %Array* %82, %Array** %80 - store { %Array*, { %Array*, %Qubit* }* }* %86, { %Array*, { %Array*, %Qubit* }* }** %81 - call void @__quantum__rt__callable_invoke(%Callable* %77, %Tuple* %78, %Tuple* null) - %99 = call %Result* @Microsoft__Quantum__Intrinsic__M__body(%Qubit* %q4) - %100 = load %Result*, %Result** @ResultOne - %101 = call i1 @__quantum__rt__result_equal(%Result* %99, %Result* %100) - %102 = xor i1 %101, true - br i1 %102, label %then0__7, label %continue__7 - -then0__7: ; preds = %else__6 - store i64 7, i64* %error_code - br label %continue__7 - -continue__7: ; preds = %then0__7, %else__6 + %79 = call %Callable* @__quantum__rt__callable_copy(%Callable* %qop, i1 false) + call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %79, i64 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %79) + %80 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64)) + %81 = bitcast %Tuple* %80 to { %Qubit* }* + %82 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %81, i32 0, i32 0 + store %Qubit* %q3, %Qubit** %82 + call void @__quantum__rt__callable_invoke(%Callable* %79, %Tuple* %80, %Tuple* null) + %83 = call %Callable* @__quantum__rt__callable_copy(%Callable* %ctl_ctl_qop, i1 false) + call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %83, i64 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %83) + call void @__quantum__rt__callable_make_adjoint(%Callable* %83) + %84 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %85 = bitcast %Tuple* %84 to { %Array*, { %Array*, { %Array*, %Qubit* }* }* }* + %86 = getelementptr inbounds { %Array*, { %Array*, { %Array*, %Qubit* }* }* }, { %Array*, { %Array*, { %Array*, %Qubit* }* }* }* %85, i32 0, i32 0 + %87 = getelementptr inbounds { %Array*, { %Array*, { %Array*, %Qubit* }* }* }, { %Array*, { %Array*, { %Array*, %Qubit* }* }* }* %85, i32 0, i32 1 + %88 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %89 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %88, i64 0) + %90 = bitcast i8* %89 to %Qubit** + store %Qubit* %q1, %Qubit** %90 + %91 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %92 = bitcast %Tuple* %91 to { %Array*, { %Array*, %Qubit* }* }* + %93 = getelementptr inbounds { %Array*, { %Array*, %Qubit* }* }, { %Array*, { %Array*, %Qubit* }* }* %92, i32 0, i32 0 + %94 = getelementptr inbounds { %Array*, { %Array*, %Qubit* }* }, { %Array*, { %Array*, %Qubit* }* }* %92, i32 0, i32 1 + %95 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %96 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %95, i64 0) + %97 = bitcast i8* %96 to %Qubit** + store %Qubit* %q2, %Qubit** %97 + %98 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %99 = bitcast %Tuple* %98 to { %Array*, %Qubit* }* + %100 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %99, i32 0, i32 0 + %101 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %99, i32 0, i32 1 + %102 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %103 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %102, i64 0) + %104 = bitcast i8* %103 to %Qubit** + store %Qubit* %q3, %Qubit** %104 + store %Array* %102, %Array** %100 + store %Qubit* %q4, %Qubit** %101 + store %Array* %95, %Array** %93 + store { %Array*, %Qubit* }* %99, { %Array*, %Qubit* }** %94 + store %Array* %88, %Array** %86 + store { %Array*, { %Array*, %Qubit* }* }* %92, { %Array*, { %Array*, %Qubit* }* }** %87 + call void @__quantum__rt__callable_invoke(%Callable* %83, %Tuple* %84, %Tuple* null) + %105 = call %Result* @Microsoft__Quantum__Intrinsic__M__body(%Qubit* %q4) + %106 = load %Result*, %Result** @ResultOne + %107 = call i1 @__quantum__rt__result_equal(%Result* %105, %Result* %106) + %108 = xor i1 %107, true + br i1 %108, label %then0__7, label %continue__7 + +then0__7: ; preds = %continue__6 + %109 = call %String* @__quantum__rt__string_create(i32 13, i8* getelementptr inbounds ([14 x i8], [14 x i8]* @6, i32 0, i32 0)) call void @__quantum__rt__qubit_release(%Qubit* %q4) - call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %73, i64 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %73, i64 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %74, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %77, i64 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %77, i64 -1) - call void @__quantum__rt__array_update_reference_count(%Array* %82, i64 -1) - call void @__quantum__rt__array_update_reference_count(%Array* %89, i64 -1) - call void @__quantum__rt__array_update_reference_count(%Array* %96, i64 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %92, i64 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %85, i64 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %78, i64 -1) - call void @__quantum__rt__result_update_reference_count(%Result* %99, i64 -1) - br label %continue__6 - -continue__6: ; preds = %continue__7, %then0__6 - call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %59, i64 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %59, i64 -1) - call void @__quantum__rt__array_update_reference_count(%Array* %64, i64 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %60, i64 -1) - call void @__quantum__rt__result_update_reference_count(%Result* %69, i64 -1) - br label %continue__5 - -continue__5: ; preds = %continue__6, %then0__5 - call void @__quantum__rt__array_update_reference_count(%Array* %45, i64 -1) - call void @__quantum__rt__array_update_reference_count(%Array* %52, i64 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %48, i64 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %41, i64 -1) - call void @__quantum__rt__result_update_reference_count(%Result* %55, i64 -1) - br label %continue__4 - -continue__4: ; preds = %continue__5, %then0__4 - call void @__quantum__rt__array_update_reference_count(%Array* %34, i64 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %30, i64 -1) - call void @__quantum__rt__result_update_reference_count(%Result* %37, i64 -1) - br label %continue__3 - -continue__3: ; preds = %continue__4, %then0__3 - call void @__quantum__rt__array_update_reference_count(%Array* %23, i64 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %19, i64 -1) - call void @__quantum__rt__result_update_reference_count(%Result* %26, i64 -1) - br label %continue__2 - -continue__2: ; preds = %continue__3, %then0__2 - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i64 -1) - call void @__quantum__rt__result_update_reference_count(%Result* %15, i64 -1) - br label %continue__1 + call void @__quantum__rt__qubit_release(%Qubit* %q1) + call void @__quantum__rt__qubit_release(%Qubit* %q2) + call void @__quantum__rt__qubit_release(%Qubit* %q3) + call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %qop, i64 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %qop, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %adj_qop, i64 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %adj_qop, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %ctl_qop, i64 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %ctl_qop, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %adj_ctl_qop, i64 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %adj_ctl_qop, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %ctl_ctl_qop, i64 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %ctl_ctl_qop, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %79, i64 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %79, i64 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %80, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %83, i64 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %83, i64 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %88, i64 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %95, i64 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %102, i64 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %98, i64 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %91, i64 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %84, i64 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %105, i64 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %5, i64 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %8, i64 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %13, i64 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %16, i64 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %25, i64 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %21, i64 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %28, i64 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %37, i64 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %33, i64 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %40, i64 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %49, i64 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %56, i64 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %52, i64 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %45, i64 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %59, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %64, i64 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %64, i64 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %69, i64 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %65, i64 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %74, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %qop, i64 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %qop, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %adj_qop, i64 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %adj_qop, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %ctl_qop, i64 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %ctl_qop, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %adj_ctl_qop, i64 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %adj_ctl_qop, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %ctl_ctl_qop, i64 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %ctl_ctl_qop, i64 -1) + call void @__quantum__rt__fail(%String* %109) + unreachable -continue__1: ; preds = %continue__2, %then0__1 +continue__7: ; preds = %continue__6 + call void @__quantum__rt__qubit_release(%Qubit* %q4) + call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %79, i64 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %79, i64 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %80, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %83, i64 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %83, i64 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %88, i64 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %95, i64 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %102, i64 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %98, i64 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %91, i64 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %84, i64 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %105, i64 -1) call void @__quantum__rt__qubit_release(%Qubit* %q1) call void @__quantum__rt__qubit_release(%Qubit* %q2) call void @__quantum__rt__qubit_release(%Qubit* %q3) call void @__quantum__rt__tuple_update_reference_count(%Tuple* %5, i64 -1) call void @__quantum__rt__result_update_reference_count(%Result* %8, i64 -1) - %103 = load i64, i64* %error_code + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %13, i64 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %16, i64 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %25, i64 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %21, i64 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %28, i64 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %37, i64 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %33, i64 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %40, i64 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %49, i64 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %56, i64 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %52, i64 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %45, i64 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %59, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %64, i64 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %64, i64 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %69, i64 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %65, i64 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %74, i64 -1) call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %qop, i64 -1) call void @__quantum__rt__callable_update_alias_count(%Callable* %qop, i64 -1) call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %adj_qop, i64 -1) @@ -330,7 +576,7 @@ continue__1: ; preds = %continue__2, %then0 call void @__quantum__rt__callable_update_reference_count(%Callable* %adj_ctl_qop, i64 -1) call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %ctl_ctl_qop, i64 -1) call void @__quantum__rt__callable_update_reference_count(%Callable* %ctl_ctl_qop, i64 -1) - ret i64 %103 + ret void } declare %Tuple* @__quantum__rt__tuple_create(i64) @@ -338,8 +584,8 @@ declare %Tuple* @__quantum__rt__tuple_create(i64) define void @Microsoft__Quantum__Testing__QIR__Qop__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { entry: %0 = bitcast %Tuple* %arg-tuple to { %Qubit*, i64 }* - %1 = getelementptr { %Qubit*, i64 }, { %Qubit*, i64 }* %0, i64 0, i32 0 - %2 = getelementptr { %Qubit*, i64 }, { %Qubit*, i64 }* %0, i64 0, i32 1 + %1 = getelementptr inbounds { %Qubit*, i64 }, { %Qubit*, i64 }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Qubit*, i64 }, { %Qubit*, i64 }* %0, i32 0, i32 1 %3 = load %Qubit*, %Qubit** %1 %4 = load i64, i64* %2 call void @Microsoft__Quantum__Testing__QIR__Qop__body(%Qubit* %3, i64 %4) @@ -349,8 +595,8 @@ entry: define void @Microsoft__Quantum__Testing__QIR__Qop__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { entry: %0 = bitcast %Tuple* %arg-tuple to { %Qubit*, i64 }* - %1 = getelementptr { %Qubit*, i64 }, { %Qubit*, i64 }* %0, i64 0, i32 0 - %2 = getelementptr { %Qubit*, i64 }, { %Qubit*, i64 }* %0, i64 0, i32 1 + %1 = getelementptr inbounds { %Qubit*, i64 }, { %Qubit*, i64 }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Qubit*, i64 }, { %Qubit*, i64 }* %0, i32 0, i32 1 %3 = load %Qubit*, %Qubit** %1 %4 = load i64, i64* %2 call void @Microsoft__Quantum__Testing__QIR__Qop__adj(%Qubit* %3, i64 %4) @@ -360,8 +606,8 @@ entry: define void @Microsoft__Quantum__Testing__QIR__Qop__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { entry: %0 = bitcast %Tuple* %arg-tuple to { %Array*, { %Qubit*, i64 }* }* - %1 = getelementptr { %Array*, { %Qubit*, i64 }* }, { %Array*, { %Qubit*, i64 }* }* %0, i64 0, i32 0 - %2 = getelementptr { %Array*, { %Qubit*, i64 }* }, { %Array*, { %Qubit*, i64 }* }* %0, i64 0, i32 1 + %1 = getelementptr inbounds { %Array*, { %Qubit*, i64 }* }, { %Array*, { %Qubit*, i64 }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { %Qubit*, i64 }* }, { %Array*, { %Qubit*, i64 }* }* %0, i32 0, i32 1 %3 = load %Array*, %Array** %1 %4 = load { %Qubit*, i64 }*, { %Qubit*, i64 }** %2 call void @Microsoft__Quantum__Testing__QIR__Qop__ctl(%Array* %3, { %Qubit*, i64 }* %4) @@ -371,8 +617,8 @@ entry: define void @Microsoft__Quantum__Testing__QIR__Qop__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { entry: %0 = bitcast %Tuple* %arg-tuple to { %Array*, { %Qubit*, i64 }* }* - %1 = getelementptr { %Array*, { %Qubit*, i64 }* }, { %Array*, { %Qubit*, i64 }* }* %0, i64 0, i32 0 - %2 = getelementptr { %Array*, { %Qubit*, i64 }* }, { %Array*, { %Qubit*, i64 }* }* %0, i64 0, i32 1 + %1 = getelementptr inbounds { %Array*, { %Qubit*, i64 }* }, { %Array*, { %Qubit*, i64 }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { %Qubit*, i64 }* }, { %Array*, { %Qubit*, i64 }* }* %0, i32 0, i32 1 %3 = load %Array*, %Array** %1 %4 = load { %Qubit*, i64 }*, { %Qubit*, i64 }** %2 call void @Microsoft__Quantum__Testing__QIR__Qop__ctladj(%Array* %3, { %Qubit*, i64 }* %4) @@ -384,18 +630,18 @@ declare %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, define void @Lifted__PartialApplication__1__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { entry: %0 = bitcast %Tuple* %arg-tuple to { %Qubit* }* - %1 = getelementptr { %Qubit* }, { %Qubit* }* %0, i64 0, i32 0 + %1 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %0, i32 0, i32 0 %2 = load %Qubit*, %Qubit** %1 %3 = bitcast %Tuple* %capture-tuple to { %Callable*, i64 }* - %4 = getelementptr { %Callable*, i64 }, { %Callable*, i64 }* %3, i64 0, i32 1 + %4 = getelementptr inbounds { %Callable*, i64 }, { %Callable*, i64 }* %3, i32 0, i32 1 %5 = load i64, i64* %4 %6 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Qubit*, i64 }* getelementptr ({ %Qubit*, i64 }, { %Qubit*, i64 }* null, i32 1) to i64)) %7 = bitcast %Tuple* %6 to { %Qubit*, i64 }* - %8 = getelementptr { %Qubit*, i64 }, { %Qubit*, i64 }* %7, i64 0, i32 0 - %9 = getelementptr { %Qubit*, i64 }, { %Qubit*, i64 }* %7, i64 0, i32 1 + %8 = getelementptr inbounds { %Qubit*, i64 }, { %Qubit*, i64 }* %7, i32 0, i32 0 + %9 = getelementptr inbounds { %Qubit*, i64 }, { %Qubit*, i64 }* %7, i32 0, i32 1 store %Qubit* %2, %Qubit** %8 store i64 %5, i64* %9 - %10 = getelementptr { %Callable*, i64 }, { %Callable*, i64 }* %3, i64 0, i32 0 + %10 = getelementptr inbounds { %Callable*, i64 }, { %Callable*, i64 }* %3, i32 0, i32 0 %11 = load %Callable*, %Callable** %10 call void @__quantum__rt__callable_invoke(%Callable* %11, %Tuple* %6, %Tuple* %result-tuple) call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i64 -1) @@ -405,18 +651,18 @@ entry: define void @Lifted__PartialApplication__1__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { entry: %0 = bitcast %Tuple* %arg-tuple to { %Qubit* }* - %1 = getelementptr { %Qubit* }, { %Qubit* }* %0, i64 0, i32 0 + %1 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %0, i32 0, i32 0 %2 = load %Qubit*, %Qubit** %1 %3 = bitcast %Tuple* %capture-tuple to { %Callable*, i64 }* - %4 = getelementptr { %Callable*, i64 }, { %Callable*, i64 }* %3, i64 0, i32 1 + %4 = getelementptr inbounds { %Callable*, i64 }, { %Callable*, i64 }* %3, i32 0, i32 1 %5 = load i64, i64* %4 %6 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Qubit*, i64 }* getelementptr ({ %Qubit*, i64 }, { %Qubit*, i64 }* null, i32 1) to i64)) %7 = bitcast %Tuple* %6 to { %Qubit*, i64 }* - %8 = getelementptr { %Qubit*, i64 }, { %Qubit*, i64 }* %7, i64 0, i32 0 - %9 = getelementptr { %Qubit*, i64 }, { %Qubit*, i64 }* %7, i64 0, i32 1 + %8 = getelementptr inbounds { %Qubit*, i64 }, { %Qubit*, i64 }* %7, i32 0, i32 0 + %9 = getelementptr inbounds { %Qubit*, i64 }, { %Qubit*, i64 }* %7, i32 0, i32 1 store %Qubit* %2, %Qubit** %8 store i64 %5, i64* %9 - %10 = getelementptr { %Callable*, i64 }, { %Callable*, i64 }* %3, i64 0, i32 0 + %10 = getelementptr inbounds { %Callable*, i64 }, { %Callable*, i64 }* %3, i32 0, i32 0 %11 = load %Callable*, %Callable** %10 %12 = call %Callable* @__quantum__rt__callable_copy(%Callable* %11, i1 false) call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %12, i64 1) @@ -431,26 +677,26 @@ entry: define void @Lifted__PartialApplication__1__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { entry: %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* - %1 = getelementptr { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i64 0, i32 0 - %2 = getelementptr { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i64 0, i32 1 + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 %3 = load %Array*, %Array** %1 %4 = load %Qubit*, %Qubit** %2 %5 = bitcast %Tuple* %capture-tuple to { %Callable*, i64 }* - %6 = getelementptr { %Callable*, i64 }, { %Callable*, i64 }* %5, i64 0, i32 1 + %6 = getelementptr inbounds { %Callable*, i64 }, { %Callable*, i64 }* %5, i32 0, i32 1 %7 = load i64, i64* %6 %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Qubit*, i64 }* getelementptr ({ %Qubit*, i64 }, { %Qubit*, i64 }* null, i32 1) to i64)) %9 = bitcast %Tuple* %8 to { %Qubit*, i64 }* - %10 = getelementptr { %Qubit*, i64 }, { %Qubit*, i64 }* %9, i64 0, i32 0 - %11 = getelementptr { %Qubit*, i64 }, { %Qubit*, i64 }* %9, i64 0, i32 1 + %10 = getelementptr inbounds { %Qubit*, i64 }, { %Qubit*, i64 }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { %Qubit*, i64 }, { %Qubit*, i64 }* %9, i32 0, i32 1 store %Qubit* %4, %Qubit** %10 store i64 %7, i64* %11 %12 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) %13 = bitcast %Tuple* %12 to { %Array*, { %Qubit*, i64 }* }* - %14 = getelementptr { %Array*, { %Qubit*, i64 }* }, { %Array*, { %Qubit*, i64 }* }* %13, i64 0, i32 0 - %15 = getelementptr { %Array*, { %Qubit*, i64 }* }, { %Array*, { %Qubit*, i64 }* }* %13, i64 0, i32 1 + %14 = getelementptr inbounds { %Array*, { %Qubit*, i64 }* }, { %Array*, { %Qubit*, i64 }* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, { %Qubit*, i64 }* }, { %Array*, { %Qubit*, i64 }* }* %13, i32 0, i32 1 store %Array* %3, %Array** %14 store { %Qubit*, i64 }* %9, { %Qubit*, i64 }** %15 - %16 = getelementptr { %Callable*, i64 }, { %Callable*, i64 }* %5, i64 0, i32 0 + %16 = getelementptr inbounds { %Callable*, i64 }, { %Callable*, i64 }* %5, i32 0, i32 0 %17 = load %Callable*, %Callable** %16 %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %18, i64 1) @@ -466,26 +712,26 @@ entry: define void @Lifted__PartialApplication__1__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { entry: %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* - %1 = getelementptr { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i64 0, i32 0 - %2 = getelementptr { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i64 0, i32 1 + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 %3 = load %Array*, %Array** %1 %4 = load %Qubit*, %Qubit** %2 %5 = bitcast %Tuple* %capture-tuple to { %Callable*, i64 }* - %6 = getelementptr { %Callable*, i64 }, { %Callable*, i64 }* %5, i64 0, i32 1 + %6 = getelementptr inbounds { %Callable*, i64 }, { %Callable*, i64 }* %5, i32 0, i32 1 %7 = load i64, i64* %6 %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Qubit*, i64 }* getelementptr ({ %Qubit*, i64 }, { %Qubit*, i64 }* null, i32 1) to i64)) %9 = bitcast %Tuple* %8 to { %Qubit*, i64 }* - %10 = getelementptr { %Qubit*, i64 }, { %Qubit*, i64 }* %9, i64 0, i32 0 - %11 = getelementptr { %Qubit*, i64 }, { %Qubit*, i64 }* %9, i64 0, i32 1 + %10 = getelementptr inbounds { %Qubit*, i64 }, { %Qubit*, i64 }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { %Qubit*, i64 }, { %Qubit*, i64 }* %9, i32 0, i32 1 store %Qubit* %4, %Qubit** %10 store i64 %7, i64* %11 %12 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) %13 = bitcast %Tuple* %12 to { %Array*, { %Qubit*, i64 }* }* - %14 = getelementptr { %Array*, { %Qubit*, i64 }* }, { %Array*, { %Qubit*, i64 }* }* %13, i64 0, i32 0 - %15 = getelementptr { %Array*, { %Qubit*, i64 }* }, { %Array*, { %Qubit*, i64 }* }* %13, i64 0, i32 1 + %14 = getelementptr inbounds { %Array*, { %Qubit*, i64 }* }, { %Array*, { %Qubit*, i64 }* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, { %Qubit*, i64 }* }, { %Array*, { %Qubit*, i64 }* }* %13, i32 0, i32 1 store %Array* %3, %Array** %14 store { %Qubit*, i64 }* %9, { %Qubit*, i64 }** %15 - %16 = getelementptr { %Callable*, i64 }, { %Callable*, i64 }* %5, i64 0, i32 0 + %16 = getelementptr inbounds { %Callable*, i64 }, { %Callable*, i64 }* %5, i32 0, i32 0 %17 = load %Callable*, %Callable** %16 %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %18, i64 1) @@ -502,7 +748,7 @@ entry: define void @MemoryManagement__1__RefCount(%Tuple* %capture-tuple, i64 %count-change) { entry: %0 = bitcast %Tuple* %capture-tuple to { %Callable*, i64 }* - %1 = getelementptr { %Callable*, i64 }, { %Callable*, i64 }* %0, i64 0, i32 0 + %1 = getelementptr inbounds { %Callable*, i64 }, { %Callable*, i64 }* %0, i32 0, i32 0 %2 = load %Callable*, %Callable** %1 call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %2, i64 %count-change) call void @__quantum__rt__callable_update_reference_count(%Callable* %2, i64 %count-change) @@ -513,7 +759,7 @@ entry: define void @MemoryManagement__1__AliasCount(%Tuple* %capture-tuple, i64 %count-change) { entry: %0 = bitcast %Tuple* %capture-tuple to { %Callable*, i64 }* - %1 = getelementptr { %Callable*, i64 }, { %Callable*, i64 }* %0, i64 0, i32 0 + %1 = getelementptr inbounds { %Callable*, i64 }, { %Callable*, i64 }* %0, i32 0, i32 0 %2 = load %Callable*, %Callable** %1 call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %2, i64 %count-change) call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i64 %count-change) @@ -537,7 +783,7 @@ declare %Array* @__quantum__rt__qubit_allocate_array(i64) declare void @__quantum__rt__callable_invoke(%Callable*, %Tuple*, %Tuple*) -define %Result* @Microsoft__Quantum__Intrinsic__M__body(%Qubit* %qb) { +define %Result* @Microsoft__Quantum__Intrinsic__M__body(%Qubit* %qubit) { entry: %bases = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 1) %0 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %bases, i64 0) @@ -548,7 +794,7 @@ entry: %qubits = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) %3 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qubits, i64 0) %4 = bitcast i8* %3 to %Qubit** - store %Qubit* %qb, %Qubit** %4 + store %Qubit* %qubit, %Qubit** %4 call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i64 1) %5 = call %Result* @__quantum__qis__measure__body(%Array* %bases, %Array* %qubits) call void @__quantum__rt__array_update_alias_count(%Array* %bases, i64 -1) @@ -560,102 +806,87 @@ entry: declare i1 @__quantum__rt__result_equal(%Result*, %Result*) -declare %Array* @__quantum__rt__array_create_1d(i32, i64) - -declare i8* @__quantum__rt__array_get_element_ptr_1d(%Array*, i64) +declare %String* @__quantum__rt__string_create(i32, i8*) declare void @__quantum__rt__qubit_release(%Qubit*) -declare void @__quantum__rt__callable_update_reference_count(%Callable*, i64) - declare void @__quantum__rt__tuple_update_reference_count(%Tuple*, i64) -declare void @__quantum__rt__array_update_reference_count(%Array*, i64) - declare void @__quantum__rt__result_update_reference_count(%Result*, i64) -define i64 @Microsoft__Quantum__Testing__QIR__Subtract__body(i64 %from, i64 %what) { -entry: - %0 = sub i64 %from, %what - ret i64 %0 -} +declare void @__quantum__rt__callable_update_reference_count(%Callable*, i64) -define void @Microsoft__Quantum__Testing__QIR__Qop__body(%Qubit* %q, i64 %n) { -entry: - %0 = srem i64 %n, 2 - %1 = icmp eq i64 %0, 1 - br i1 %1, label %then0__1, label %continue__1 +declare void @__quantum__rt__fail(%String*) -then0__1: ; preds = %entry - call void @__quantum__qis__k__body(%Qubit* %q) - br label %continue__1 +declare %Array* @__quantum__rt__array_create_1d(i32, i64) -continue__1: ; preds = %then0__1, %entry - ret void -} +declare i8* @__quantum__rt__array_get_element_ptr_1d(%Array*, i64) -declare void @__quantum__qis__k__body(%Qubit*) +declare void @__quantum__rt__array_update_reference_count(%Array*, i64) -define void @Microsoft__Quantum__Testing__QIR__Qop__adj(%Qubit* %q, i64 %n) { +define void @Microsoft__Quantum__Testing__QIR__TestQubitResultManagement__body() { entry: - %0 = srem i64 %n, 2 - %1 = icmp eq i64 %0, 1 - br i1 %1, label %then0__1, label %continue__1 + %qs = call %Array* @__quantum__rt__qubit_allocate_array(i64 2) + call void @__quantum__rt__array_update_alias_count(%Array* %qs, i64 1) + %0 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 1) + %1 = bitcast i8* %0 to %Qubit** + %qubit = load %Qubit*, %Qubit** %1 + call void @__quantum__qis__x__body(%Qubit* %qubit) + %q = call %Qubit* @__quantum__rt__qubit_allocate() + %2 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 1) + %3 = bitcast i8* %2 to %Qubit** + %4 = load %Qubit*, %Qubit** %3 + %5 = call %Result* @Microsoft__Quantum__Intrinsic__M__body(%Qubit* %4) + %6 = load %Result*, %Result** @ResultOne + %7 = call i1 @__quantum__rt__result_equal(%Result* %5, %Result* %6) + br i1 %7, label %then0__1, label %continue__1 then0__1: ; preds = %entry - call void @__quantum__qis__k__body(%Qubit* %q) + call void @__quantum__qis__x__body(%Qubit* %q) br label %continue__1 continue__1: ; preds = %then0__1, %entry - ret void -} - -define void @Microsoft__Quantum__Testing__QIR__Qop__ctl(%Array* %ctrls, { %Qubit*, i64 }* %0) { -entry: - call void @__quantum__rt__array_update_alias_count(%Array* %ctrls, i64 1) - %1 = getelementptr { %Qubit*, i64 }, { %Qubit*, i64 }* %0, i64 0, i32 0 - %q = load %Qubit*, %Qubit** %1 - %2 = getelementptr { %Qubit*, i64 }, { %Qubit*, i64 }* %0, i64 0, i32 1 - %n = load i64, i64* %2 - %3 = srem i64 %n, 2 - %4 = icmp eq i64 %3, 1 - br i1 %4, label %then0__1, label %continue__1 + %8 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 0) + %9 = bitcast i8* %8 to %Qubit** + %10 = load %Qubit*, %Qubit** %9 + %11 = call %Result* @Microsoft__Quantum__Intrinsic__M__body(%Qubit* %10) + %12 = call %Result* @Microsoft__Quantum__Intrinsic__M__body(%Qubit* %q) + %13 = call i1 @__quantum__rt__result_equal(%Result* %11, %Result* %12) + br i1 %13, label %then0__2, label %continue__2 -then0__1: ; preds = %entry - call void @__quantum__rt__array_update_alias_count(%Array* %ctrls, i64 1) - call void @__quantum__qis__k__ctl(%Array* %ctrls, %Qubit* %q) - call void @__quantum__rt__array_update_alias_count(%Array* %ctrls, i64 -1) - br label %continue__1 +then0__2: ; preds = %continue__1 + %14 = call %String* @__quantum__rt__string_create(i32 29, i8* getelementptr inbounds ([30 x i8], [30 x i8]* @7, i32 0, i32 0)) + call void @__quantum__rt__qubit_release(%Qubit* %q) + call void @__quantum__rt__qubit_release_array(%Array* %qs) + call void @__quantum__rt__array_update_alias_count(%Array* %qs, i64 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %5, i64 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %11, i64 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %12, i64 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qs, i64 -1) + call void @__quantum__rt__fail(%String* %14) + unreachable -continue__1: ; preds = %then0__1, %entry - call void @__quantum__rt__array_update_alias_count(%Array* %ctrls, i64 -1) +continue__2: ; preds = %continue__1 + call void @__quantum__rt__qubit_release(%Qubit* %q) + call void @__quantum__rt__result_update_reference_count(%Result* %5, i64 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %11, i64 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %12, i64 -1) + call void @__quantum__rt__qubit_release_array(%Array* %qs) + call void @__quantum__rt__array_update_alias_count(%Array* %qs, i64 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qs, i64 -1) ret void } declare void @__quantum__rt__array_update_alias_count(%Array*, i64) -declare void @__quantum__qis__k__ctl(%Array*, %Qubit*) - -define void @Microsoft__Quantum__Testing__QIR__Qop__ctladj(%Array* %__controlQubits__, { %Qubit*, i64 }* %0) { -entry: - call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i64 1) - %1 = getelementptr { %Qubit*, i64 }, { %Qubit*, i64 }* %0, i64 0, i32 0 - %q = load %Qubit*, %Qubit** %1 - %2 = getelementptr { %Qubit*, i64 }, { %Qubit*, i64 }* %0, i64 0, i32 1 - %n = load i64, i64* %2 - %3 = srem i64 %n, 2 - %4 = icmp eq i64 %3, 1 - br i1 %4, label %then0__1, label %continue__1 +declare void @__quantum__qis__x__body(%Qubit*) -then0__1: ; preds = %entry - call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i64 1) - call void @__quantum__qis__k__ctl(%Array* %__controlQubits__, %Qubit* %q) - call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i64 -1) - br label %continue__1 +declare void @__quantum__rt__qubit_release_array(%Array*) -continue__1: ; preds = %then0__1, %entry - call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i64 -1) - ret void +define i64 @Microsoft__Quantum__Testing__QIR__Subtract__body(i64 %from, i64 %what) { +entry: + %0 = sub i64 %from, %what + ret i64 %0 } define i64 @Microsoft__Quantum__Testing__QIR__Test_Arrays__body(%Array* %array, i64 %index, i64 %val, i1 %compilerDecoy) { @@ -667,59 +898,60 @@ entry: call void @__quantum__rt__array_update_reference_count(%Array* %array, i64 1) call void @__quantum__rt__array_update_alias_count(%Array* %array, i64 -1) %0 = call %Array* @__quantum__rt__array_copy(%Array* %array, i1 false) - %1 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %0, i64 %index) - %2 = bitcast i8* %1 to i64* - store i64 %val, i64* %2 + %1 = icmp ne %Array* %array, %0 + %2 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %0, i64 %index) + %3 = bitcast i8* %2 to i64* + store i64 %val, i64* %3 call void @__quantum__rt__array_update_reference_count(%Array* %0, i64 1) call void @__quantum__rt__array_update_alias_count(%Array* %0, i64 1) store %Array* %0, %Array** %local %n = call i64 @__quantum__rt__array_get_size_1d(%Array* %0) - %3 = sub i64 %n, 1 - %4 = load %Range, %Range* @EmptyRange - %5 = insertvalue %Range %4, i64 %index, 0 - %6 = insertvalue %Range %5, i64 1, 1 - %7 = insertvalue %Range %6, i64 %3, 2 - %slice1 = call %Array* @__quantum__rt__array_slice_1d(%Array* %0, %Range %7, i1 false) + %4 = sub i64 %n, 1 + %5 = load %Range, %Range* @EmptyRange + %6 = insertvalue %Range %5, i64 %index, 0 + %7 = insertvalue %Range %6, i64 1, 1 + %8 = insertvalue %Range %7, i64 %4, 2 + %slice1 = call %Array* @__quantum__rt__array_slice_1d(%Array* %0, %Range %8, i1 false) call void @__quantum__rt__array_update_alias_count(%Array* %slice1, i64 1) - %8 = load %Range, %Range* @EmptyRange - %9 = insertvalue %Range %8, i64 %index, 0 - %10 = insertvalue %Range %9, i64 -2, 1 - %11 = insertvalue %Range %10, i64 0, 2 - %slice2 = call %Array* @__quantum__rt__array_slice_1d(%Array* %0, %Range %11, i1 false) + %9 = load %Range, %Range* @EmptyRange + %10 = insertvalue %Range %9, i64 %index, 0 + %11 = insertvalue %Range %10, i64 -2, 1 + %12 = insertvalue %Range %11, i64 0, 2 + %slice2 = call %Array* @__quantum__rt__array_slice_1d(%Array* %0, %Range %12, i1 false) call void @__quantum__rt__array_update_alias_count(%Array* %slice2, i64 1) %result = call %Array* @__quantum__rt__array_concatenate(%Array* %slice2, %Array* %slice1) call void @__quantum__rt__array_update_alias_count(%Array* %result, i64 1) %sum = alloca i64 store i64 0, i64* %sum - %12 = call i64 @__quantum__rt__array_get_size_1d(%Array* %result) - %13 = sub i64 %12, 1 + %13 = call i64 @__quantum__rt__array_get_size_1d(%Array* %result) + %14 = sub i64 %13, 1 br label %header__1 header__1: ; preds = %exiting__1, %entry - %i = phi i64 [ 0, %entry ], [ %20, %exiting__1 ] - %14 = icmp sle i64 %i, %13 - br i1 %14, label %body__1, label %exit__1 + %i = phi i64 [ 0, %entry ], [ %21, %exiting__1 ] + %15 = icmp sle i64 %i, %14 + br i1 %15, label %body__1, label %exit__1 body__1: ; preds = %header__1 - %15 = load i64, i64* %sum - %16 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %result, i64 %i) - %17 = bitcast i8* %16 to i64* - %18 = load i64, i64* %17 - %19 = add i64 %15, %18 - store i64 %19, i64* %sum + %16 = load i64, i64* %sum + %17 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %result, i64 %i) + %18 = bitcast i8* %17 to i64* + %19 = load i64, i64* %18 + %20 = add i64 %16, %19 + store i64 %20, i64* %sum br label %exiting__1 exiting__1: ; preds = %body__1 - %20 = add i64 %i, 1 + %21 = add i64 %i, 1 br label %header__1 exit__1: ; preds = %header__1 br i1 %compilerDecoy, label %then0__1, label %continue__1 then0__1: ; preds = %exit__1 - %res1 = call i64 @Microsoft__Quantum__Testing__QIR__TestControlled__body() + call void @Microsoft__Quantum__Testing__QIR__TestControlled__body() %res2 = call i64 @Microsoft__Quantum__Testing__QIR__TestPartials__body(i64 17, i64 42) - %res3 = call i1 @Microsoft__Quantum__Testing__QIR__Test_Qubit_Result_Management__body() + call void @Microsoft__Quantum__Testing__QIR__TestQubitResultManagement__body() %res4 = call i64 @Microsoft__Quantum__Testing__QIR__Math__SqrtTest__body() %res5 = call i64 @Microsoft__Quantum__Testing__QIR__Math__LogTest__body() %res6 = call i64 @Microsoft__Quantum__Testing__QIR__Math__ArcTan2Test__body() @@ -727,7 +959,7 @@ then0__1: ; preds = %exit__1 br label %continue__1 continue__1: ; preds = %then0__1, %exit__1 - %21 = load i64, i64* %sum + %22 = load i64, i64* %sum call void @__quantum__rt__array_update_alias_count(%Array* %array, i64 -1) call void @__quantum__rt__array_update_alias_count(%Array* %0, i64 -1) call void @__quantum__rt__array_update_alias_count(%Array* %slice1, i64 -1) @@ -739,7 +971,7 @@ continue__1: ; preds = %then0__1, %exit__1 call void @__quantum__rt__array_update_reference_count(%Array* %slice2, i64 -1) call void @__quantum__rt__array_update_reference_count(%Array* %result, i64 -1) call void @__quantum__rt__array_update_reference_count(%Array* %0, i64 -1) - ret i64 %21 + ret i64 %22 } declare %Array* @__quantum__rt__array_copy(%Array*, i1) @@ -754,8 +986,8 @@ define i64 @Microsoft__Quantum__Testing__QIR__TestPartials__body(i64 %x, i64 %y) entry: %0 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, i64 }* getelementptr ({ %Callable*, i64 }, { %Callable*, i64 }* null, i32 1) to i64)) %1 = bitcast %Tuple* %0 to { %Callable*, i64 }* - %2 = getelementptr { %Callable*, i64 }, { %Callable*, i64 }* %1, i64 0, i32 0 - %3 = getelementptr { %Callable*, i64 }, { %Callable*, i64 }* %1, i64 0, i32 1 + %2 = getelementptr inbounds { %Callable*, i64 }, { %Callable*, i64 }* %1, i32 0, i32 0 + %3 = getelementptr inbounds { %Callable*, i64 }, { %Callable*, i64 }* %1, i32 0, i32 1 %4 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Testing__QIR__Subtract, [2 x void (%Tuple*, i64)*]* null, %Tuple* null) store %Callable* %4, %Callable** %2 store i64 %x, i64* %3 @@ -764,12 +996,12 @@ entry: call void @__quantum__rt__callable_update_alias_count(%Callable* %subtractor, i64 1) %5 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint (i64* getelementptr (i64, i64* null, i32 1) to i64)) %6 = bitcast %Tuple* %5 to { i64 }* - %7 = getelementptr { i64 }, { i64 }* %6, i64 0, i32 0 + %7 = getelementptr inbounds { i64 }, { i64 }* %6, i32 0, i32 0 store i64 %y, i64* %7 %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint (i64* getelementptr (i64, i64* null, i32 1) to i64)) call void @__quantum__rt__callable_invoke(%Callable* %subtractor, %Tuple* %5, %Tuple* %8) %9 = bitcast %Tuple* %8 to { i64 }* - %10 = getelementptr { i64 }, { i64 }* %9, i64 0, i32 0 + %10 = getelementptr inbounds { i64 }, { i64 }* %9, i32 0, i32 0 %11 = load i64, i64* %10 call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %subtractor, i64 -1) call void @__quantum__rt__callable_update_alias_count(%Callable* %subtractor, i64 -1) @@ -780,45 +1012,6 @@ entry: ret i64 %11 } -define i1 @Microsoft__Quantum__Testing__QIR__Test_Qubit_Result_Management__body() { -entry: - %qs = call %Array* @__quantum__rt__qubit_allocate_array(i64 2) - call void @__quantum__rt__array_update_alias_count(%Array* %qs, i64 1) - %0 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 1) - %1 = bitcast i8* %0 to %Qubit** - %qb = load %Qubit*, %Qubit** %1 - call void @__quantum__qis__x__body(%Qubit* %qb) - %q = call %Qubit* @__quantum__rt__qubit_allocate() - %2 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 1) - %3 = bitcast i8* %2 to %Qubit** - %4 = load %Qubit*, %Qubit** %3 - %5 = call %Result* @Microsoft__Quantum__Intrinsic__M__body(%Qubit* %4) - %6 = load %Result*, %Result** @ResultOne - %7 = call i1 @__quantum__rt__result_equal(%Result* %5, %Result* %6) - br i1 %7, label %then0__1, label %continue__1 - -then0__1: ; preds = %entry - call void @__quantum__qis__x__body(%Qubit* %q) - br label %continue__1 - -continue__1: ; preds = %then0__1, %entry - %8 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 0) - %9 = bitcast i8* %8 to %Qubit** - %10 = load %Qubit*, %Qubit** %9 - %11 = call %Result* @Microsoft__Quantum__Intrinsic__M__body(%Qubit* %10) - %12 = call %Result* @Microsoft__Quantum__Intrinsic__M__body(%Qubit* %q) - %13 = call i1 @__quantum__rt__result_equal(%Result* %11, %Result* %12) - %14 = xor i1 %13, true - call void @__quantum__rt__qubit_release(%Qubit* %q) - call void @__quantum__rt__qubit_release_array(%Array* %qs) - call void @__quantum__rt__array_update_alias_count(%Array* %qs, i64 -1) - call void @__quantum__rt__result_update_reference_count(%Result* %5, i64 -1) - call void @__quantum__rt__result_update_reference_count(%Result* %11, i64 -1) - call void @__quantum__rt__result_update_reference_count(%Result* %12, i64 -1) - call void @__quantum__rt__array_update_reference_count(%Array* %qs, i64 -1) - ret i1 %14 -} - define i64 @Microsoft__Quantum__Testing__QIR__Math__SqrtTest__body() { entry: %0 = call double @__quantum__qis__sqrt__body(double 4.000000e+00) @@ -879,8 +1072,8 @@ continue__6: ; preds = %continue__5 define i64 @Microsoft__Quantum__Testing__QIR__Math__LogTest__body() { entry: - %d = call double @Microsoft__Quantum__Math__E__body() - %0 = call double @__quantum__qis__log__body(double %d) + %input = call double @Microsoft__Quantum__Math__E__body() + %0 = call double @__quantum__qis__log__body(double %input) %1 = fcmp one double 1.000000e+00, %0 br i1 %1, label %then0__1, label %continue__1 @@ -890,8 +1083,8 @@ then0__1: ; preds = %entry continue__1: ; preds = %entry %2 = call double @Microsoft__Quantum__Math__E__body() %3 = call double @Microsoft__Quantum__Math__E__body() - %d__1 = fmul double %2, %3 - %4 = call double @__quantum__qis__log__body(double %d__1) + %input__1 = fmul double %2, %3 + %4 = call double @__quantum__qis__log__body(double %input__1) %5 = fcmp one double 2.000000e+00, %4 br i1 %5, label %then0__2, label %continue__2 @@ -899,8 +1092,8 @@ then0__2: ; preds = %continue__1 ret i64 2 continue__2: ; preds = %continue__1 - %d__3 = call double @__quantum__qis__log__body(double 0.000000e+00) - %6 = call i1 @__quantum__qis__isnegativeinfinity__body(double %d__3) + %d = call double @__quantum__qis__log__body(double 0.000000e+00) + %6 = call i1 @__quantum__qis__isnegativeinfinity__body(double %d) %7 = xor i1 %6, true br i1 %7, label %then0__3, label %continue__3 @@ -908,8 +1101,8 @@ then0__3: ; preds = %continue__2 ret i64 3 continue__3: ; preds = %continue__2 - %d__5 = call double @__quantum__qis__log__body(double -5.000000e+00) - %8 = call i1 @__quantum__qis__isnan__body(double %d__5) + %d__1 = call double @__quantum__qis__log__body(double -5.000000e+00) + %8 = call i1 @__quantum__qis__isnan__body(double %d__1) %9 = xor i1 %8, true br i1 %9, label %then0__4, label %continue__4 @@ -917,9 +1110,9 @@ then0__4: ; preds = %continue__3 ret i64 4 continue__4: ; preds = %continue__3 - %d__6 = call double @__quantum__qis__nan__body() - %d__7 = call double @__quantum__qis__log__body(double %d__6) - %10 = call i1 @__quantum__qis__isnan__body(double %d__7) + %input__4 = call double @__quantum__qis__nan__body() + %d__2 = call double @__quantum__qis__log__body(double %input__4) + %10 = call i1 @__quantum__qis__isnan__body(double %d__2) %11 = xor i1 %10, true br i1 %11, label %then0__5, label %continue__5 @@ -927,9 +1120,9 @@ then0__5: ; preds = %continue__4 ret i64 5 continue__5: ; preds = %continue__4 - %d__8 = call double @__quantum__qis__infinity__body() - %d__9 = call double @__quantum__qis__log__body(double %d__8) - %12 = call i1 @__quantum__qis__isinf__body(double %d__9) + %input__5 = call double @__quantum__qis__infinity__body() + %d__3 = call double @__quantum__qis__log__body(double %input__5) + %12 = call i1 @__quantum__qis__isinf__body(double %d__3) %13 = xor i1 %12, true br i1 %13, label %then0__6, label %continue__6 @@ -1068,8 +1261,8 @@ continue__12: ; preds = %continue__11 define i64 @Microsoft__Quantum__Testing__QIR__Str__PauliToStringTest__body() { entry: - %0 = call %String* @__quantum__rt__string_create(i32 19, i8* getelementptr inbounds ([20 x i8], [20 x i8]* @0, i32 0, i32 0)) - %1 = call %String* @__quantum__rt__string_create(i32 13, i8* getelementptr inbounds ([14 x i8], [14 x i8]* @1, i32 0, i32 0)) + %0 = call %String* @__quantum__rt__string_create(i32 19, i8* getelementptr inbounds ([20 x i8], [20 x i8]* @8, i32 0, i32 0)) + %1 = call %String* @__quantum__rt__string_create(i32 13, i8* getelementptr inbounds ([14 x i8], [14 x i8]* @9, i32 0, i32 0)) %2 = load i2, i2* @PauliI %3 = call %String* @__quantum__rt__pauli_to_string(i2 %2) %4 = call %String* @__quantum__rt__string_concatenate(%String* %1, %String* %3) @@ -1085,7 +1278,7 @@ then0__1: ; preds = %entry ret i64 1 continue__1: ; preds = %entry - %7 = call %String* @__quantum__rt__string_create(i32 6, i8* getelementptr inbounds ([7 x i8], [7 x i8]* @2, i32 0, i32 0)) + %7 = call %String* @__quantum__rt__string_create(i32 6, i8* getelementptr inbounds ([7 x i8], [7 x i8]* @10, i32 0, i32 0)) %8 = load i2, i2* @PauliX %9 = call %String* @__quantum__rt__pauli_to_string(i2 %8) %10 = call i1 @__quantum__rt__string_equal(%String* %7, %String* %9) @@ -1100,7 +1293,7 @@ then0__2: ; preds = %continue__1 ret i64 2 continue__2: ; preds = %continue__1 - %12 = call %String* @__quantum__rt__string_create(i32 6, i8* getelementptr inbounds ([7 x i8], [7 x i8]* @3, i32 0, i32 0)) + %12 = call %String* @__quantum__rt__string_create(i32 6, i8* getelementptr inbounds ([7 x i8], [7 x i8]* @11, i32 0, i32 0)) %13 = load i2, i2* @PauliY %14 = call %String* @__quantum__rt__pauli_to_string(i2 %13) %15 = call i1 @__quantum__rt__string_equal(%String* %12, %String* %14) @@ -1117,7 +1310,7 @@ then0__3: ; preds = %continue__2 ret i64 3 continue__3: ; preds = %continue__2 - %17 = call %String* @__quantum__rt__string_create(i32 6, i8* getelementptr inbounds ([7 x i8], [7 x i8]* @4, i32 0, i32 0)) + %17 = call %String* @__quantum__rt__string_create(i32 6, i8* getelementptr inbounds ([7 x i8], [7 x i8]* @12, i32 0, i32 0)) %18 = load i2, i2* @PauliZ %19 = call %String* @__quantum__rt__pauli_to_string(i2 %18) %20 = call i1 @__quantum__rt__string_equal(%String* %17, %String* %19) @@ -1147,16 +1340,81 @@ continue__4: ; preds = %continue__3 ret i64 0 } +define void @Microsoft__Quantum__Testing__QIR__Qop__body(%Qubit* %q, i64 %n) { +entry: + %0 = srem i64 %n, 2 + %1 = icmp eq i64 %0, 1 + br i1 %1, label %then0__1, label %continue__1 + +then0__1: ; preds = %entry + call void @__quantum__qis__k__body(%Qubit* %q) + br label %continue__1 + +continue__1: ; preds = %then0__1, %entry + ret void +} + +declare void @__quantum__qis__k__body(%Qubit*) + +define void @Microsoft__Quantum__Testing__QIR__Qop__adj(%Qubit* %q, i64 %n) { +entry: + call void @Microsoft__Quantum__Testing__QIR__Qop__body(%Qubit* %q, i64 %n) + ret void +} + +define void @Microsoft__Quantum__Testing__QIR__Qop__ctl(%Array* %ctrls, { %Qubit*, i64 }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %ctrls, i64 1) + %1 = getelementptr inbounds { %Qubit*, i64 }, { %Qubit*, i64 }* %0, i32 0, i32 0 + %q = load %Qubit*, %Qubit** %1 + %2 = getelementptr inbounds { %Qubit*, i64 }, { %Qubit*, i64 }* %0, i32 0, i32 1 + %n = load i64, i64* %2 + %3 = srem i64 %n, 2 + %4 = icmp eq i64 %3, 1 + br i1 %4, label %then0__1, label %continue__1 + +then0__1: ; preds = %entry + call void @__quantum__rt__array_update_alias_count(%Array* %ctrls, i64 1) + call void @__quantum__qis__k__ctl(%Array* %ctrls, %Qubit* %q) + call void @__quantum__rt__array_update_alias_count(%Array* %ctrls, i64 -1) + br label %continue__1 + +continue__1: ; preds = %then0__1, %entry + call void @__quantum__rt__array_update_alias_count(%Array* %ctrls, i64 -1) + ret void +} + +declare void @__quantum__qis__k__ctl(%Array*, %Qubit*) + +define void @Microsoft__Quantum__Testing__QIR__Qop__ctladj(%Array* %__controlQubits__, { %Qubit*, i64 }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i64 1) + %1 = getelementptr inbounds { %Qubit*, i64 }, { %Qubit*, i64 }* %0, i32 0, i32 0 + %q = load %Qubit*, %Qubit** %1 + %2 = getelementptr inbounds { %Qubit*, i64 }, { %Qubit*, i64 }* %0, i32 0, i32 1 + %n = load i64, i64* %2 + %3 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Qubit*, i64 }* getelementptr ({ %Qubit*, i64 }, { %Qubit*, i64 }* null, i32 1) to i64)) + %4 = bitcast %Tuple* %3 to { %Qubit*, i64 }* + %5 = getelementptr inbounds { %Qubit*, i64 }, { %Qubit*, i64 }* %4, i32 0, i32 0 + %6 = getelementptr inbounds { %Qubit*, i64 }, { %Qubit*, i64 }* %4, i32 0, i32 1 + store %Qubit* %q, %Qubit** %5 + store i64 %n, i64* %6 + call void @Microsoft__Quantum__Testing__QIR__Qop__ctl(%Array* %__controlQubits__, { %Qubit*, i64 }* %4) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i64 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i64 -1) + ret void +} + define void @Microsoft__Quantum__Testing__QIR__Subtract__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { entry: %0 = bitcast %Tuple* %arg-tuple to { i64, i64 }* - %1 = getelementptr { i64, i64 }, { i64, i64 }* %0, i64 0, i32 0 - %2 = getelementptr { i64, i64 }, { i64, i64 }* %0, i64 0, i32 1 + %1 = getelementptr inbounds { i64, i64 }, { i64, i64 }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { i64, i64 }, { i64, i64 }* %0, i32 0, i32 1 %3 = load i64, i64* %1 %4 = load i64, i64* %2 %5 = call i64 @Microsoft__Quantum__Testing__QIR__Subtract__body(i64 %3, i64 %4) %6 = bitcast %Tuple* %result-tuple to { i64 }* - %7 = getelementptr { i64 }, { i64 }* %6, i64 0, i32 0 + %7 = getelementptr inbounds { i64 }, { i64 }* %6, i32 0, i32 0 store i64 %5, i64* %7 ret void } @@ -1164,18 +1422,18 @@ entry: define void @Lifted__PartialApplication__2__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { entry: %0 = bitcast %Tuple* %capture-tuple to { %Callable*, i64 }* - %1 = getelementptr { %Callable*, i64 }, { %Callable*, i64 }* %0, i64 0, i32 1 + %1 = getelementptr inbounds { %Callable*, i64 }, { %Callable*, i64 }* %0, i32 0, i32 1 %2 = load i64, i64* %1 %3 = bitcast %Tuple* %arg-tuple to { i64 }* - %4 = getelementptr { i64 }, { i64 }* %3, i64 0, i32 0 + %4 = getelementptr inbounds { i64 }, { i64 }* %3, i32 0, i32 0 %5 = load i64, i64* %4 %6 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i64* getelementptr (i64, i64* null, i32 1) to i64), i64 2)) %7 = bitcast %Tuple* %6 to { i64, i64 }* - %8 = getelementptr { i64, i64 }, { i64, i64 }* %7, i64 0, i32 0 - %9 = getelementptr { i64, i64 }, { i64, i64 }* %7, i64 0, i32 1 + %8 = getelementptr inbounds { i64, i64 }, { i64, i64 }* %7, i32 0, i32 0 + %9 = getelementptr inbounds { i64, i64 }, { i64, i64 }* %7, i32 0, i32 1 store i64 %2, i64* %8 store i64 %5, i64* %9 - %10 = getelementptr { %Callable*, i64 }, { %Callable*, i64 }* %0, i64 0, i32 0 + %10 = getelementptr inbounds { %Callable*, i64 }, { %Callable*, i64 }* %0, i32 0, i32 0 %11 = load %Callable*, %Callable** %10 call void @__quantum__rt__callable_invoke(%Callable* %11, %Tuple* %6, %Tuple* %result-tuple) call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i64 -1) @@ -1185,7 +1443,7 @@ entry: define void @MemoryManagement__2__RefCount(%Tuple* %capture-tuple, i64 %count-change) { entry: %0 = bitcast %Tuple* %capture-tuple to { %Callable*, i64 }* - %1 = getelementptr { %Callable*, i64 }, { %Callable*, i64 }* %0, i64 0, i32 0 + %1 = getelementptr inbounds { %Callable*, i64 }, { %Callable*, i64 }* %0, i32 0, i32 0 %2 = load %Callable*, %Callable** %1 call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %2, i64 %count-change) call void @__quantum__rt__callable_update_reference_count(%Callable* %2, i64 %count-change) @@ -1196,7 +1454,7 @@ entry: define void @MemoryManagement__2__AliasCount(%Tuple* %capture-tuple, i64 %count-change) { entry: %0 = bitcast %Tuple* %capture-tuple to { %Callable*, i64 }* - %1 = getelementptr { %Callable*, i64 }, { %Callable*, i64 }* %0, i64 0, i32 0 + %1 = getelementptr inbounds { %Callable*, i64 }, { %Callable*, i64 }* %0, i32 0, i32 0 %2 = load %Callable*, %Callable** %1 call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %2, i64 %count-change) call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i64 %count-change) @@ -1204,55 +1462,55 @@ entry: ret void } -declare void @__quantum__qis__x__body(%Qubit*) - -declare void @__quantum__rt__qubit_release_array(%Array*) - -declare %Result* @__quantum__qis__measure__body(%Array*, %Array*) - -define double @Microsoft__Quantum__Intrinsic__ArcTan2__body(double %y, double %x) { -entry: - %0 = call double @__quantum__qis__arctan2__body(double %y, double %x) - ret double %0 -} - -declare double @__quantum__qis__arctan2__body(double, double) - -define void @Microsoft__Quantum__Intrinsic__K__body(%Qubit* %q) { +define void @Microsoft__Quantum__Intrinsic__X__body(%Qubit* %qubit) { entry: - call void @__quantum__qis__k__body(%Qubit* %q) + call void @__quantum__qis__x__body(%Qubit* %qubit) ret void } -define void @Microsoft__Quantum__Intrinsic__K__adj(%Qubit* %q) { +define void @Microsoft__Quantum__Intrinsic__X__adj(%Qubit* %qubit) { entry: - call void @__quantum__qis__k__body(%Qubit* %q) + call void @__quantum__qis__x__body(%Qubit* %qubit) ret void } -define void @Microsoft__Quantum__Intrinsic__K__ctl(%Array* %__controlQubits__, %Qubit* %q) { +define void @Microsoft__Quantum__Intrinsic__X__ctl(%Array* %__controlQubits__, %Qubit* %qubit) { entry: call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i64 1) - call void @__quantum__qis__k__ctl(%Array* %__controlQubits__, %Qubit* %q) + call void @__quantum__qis__x__ctl(%Array* %__controlQubits__, %Qubit* %qubit) call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i64 -1) ret void } -define void @Microsoft__Quantum__Intrinsic__K__ctladj(%Array* %__controlQubits__, %Qubit* %q) { +declare void @__quantum__qis__x__ctl(%Array*, %Qubit*) + +define void @Microsoft__Quantum__Intrinsic__X__ctladj(%Array* %__controlQubits__, %Qubit* %qubit) { entry: call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i64 1) - call void @__quantum__qis__k__ctl(%Array* %__controlQubits__, %Qubit* %q) + call void @__quantum__qis__x__ctl(%Array* %__controlQubits__, %Qubit* %qubit) call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i64 -1) ret void } -define double @Microsoft__Quantum__Intrinsic__Sqrt__body(double %d) { +define %Result* @Microsoft__Quantum__Intrinsic__Measure__body(%Array* %bases, %Array* %qubits) { entry: - %0 = call double @__quantum__qis__sqrt__body(double %d) - ret double %0 + call void @__quantum__rt__array_update_alias_count(%Array* %bases, i64 1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i64 1) + %0 = call %Result* @__quantum__qis__measure__body(%Array* %bases, %Array* %qubits) + call void @__quantum__rt__array_update_alias_count(%Array* %bases, i64 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i64 -1) + ret %Result* %0 } -declare double @__quantum__qis__sqrt__body(double) +declare %Result* @__quantum__qis__measure__body(%Array*, %Array*) + +define i1 @Microsoft__Quantum__Intrinsic__IsInf__body(double %d) { +entry: + %0 = call i1 @__quantum__qis__isinf__body(double %d) + ret i1 %0 +} + +declare i1 @__quantum__qis__isinf__body(double) define double @Microsoft__Quantum__Intrinsic__NAN__body() { entry: @@ -1262,97 +1520,73 @@ entry: declare double @__quantum__qis__nan__body() -define void @Microsoft__Quantum__Intrinsic__X__body(%Qubit* %qb) { +define i1 @Microsoft__Quantum__Intrinsic__IsNan__body(double %d) { entry: - call void @__quantum__qis__x__body(%Qubit* %qb) - ret void + %0 = call i1 @__quantum__qis__isnan__body(double %d) + ret i1 %0 } -define void @Microsoft__Quantum__Intrinsic__X__adj(%Qubit* %qb) { -entry: - call void @__quantum__qis__x__body(%Qubit* %qb) - ret void -} +declare i1 @__quantum__qis__isnan__body(double) -define void @Microsoft__Quantum__Intrinsic__X__ctl(%Array* %__controlQubits__, %Qubit* %qb) { +define double @Microsoft__Quantum__Intrinsic__INFINITY__body() { entry: - call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i64 1) - call void @__quantum__qis__x__ctl(%Array* %__controlQubits__, %Qubit* %qb) - call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i64 -1) - ret void + %0 = call double @__quantum__qis__infinity__body() + ret double %0 } -declare void @__quantum__qis__x__ctl(%Array*, %Qubit*) - -define void @Microsoft__Quantum__Intrinsic__X__ctladj(%Array* %__controlQubits__, %Qubit* %qb) { -entry: - call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i64 1) - call void @__quantum__qis__x__ctl(%Array* %__controlQubits__, %Qubit* %qb) - call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i64 -1) - ret void -} +declare double @__quantum__qis__infinity__body() -define i1 @Microsoft__Quantum__Intrinsic__IsNan__body(double %d) { +define i1 @Microsoft__Quantum__Intrinsic__IsNegativeInfinity__body(double %d) { entry: - %0 = call i1 @__quantum__qis__isnan__body(double %d) + %0 = call i1 @__quantum__qis__isnegativeinfinity__body(double %d) ret i1 %0 } -declare i1 @__quantum__qis__isnan__body(double) +declare i1 @__quantum__qis__isnegativeinfinity__body(double) -define %Result* @Microsoft__Quantum__Intrinsic__Measure__body(%Array* %bases, %Array* %qubits) { +define void @Microsoft__Quantum__Intrinsic__K__body(%Qubit* %q) { entry: - call void @__quantum__rt__array_update_alias_count(%Array* %bases, i64 1) - call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i64 1) - %0 = call %Result* @__quantum__qis__measure__body(%Array* %bases, %Array* %qubits) - call void @__quantum__rt__array_update_alias_count(%Array* %bases, i64 -1) - call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i64 -1) - ret %Result* %0 + call void @__quantum__qis__k__body(%Qubit* %q) + ret void } -define double @Microsoft__Quantum__Intrinsic__Log__body(double %d) { +define void @Microsoft__Quantum__Intrinsic__K__adj(%Qubit* %q) { entry: - %0 = call double @__quantum__qis__log__body(double %d) - ret double %0 + call void @__quantum__qis__k__body(%Qubit* %q) + ret void } -declare double @__quantum__qis__log__body(double) - -define double @Microsoft__Quantum__Intrinsic__INFINITY__body() { +define void @Microsoft__Quantum__Intrinsic__K__ctl(%Array* %__controlQubits__, %Qubit* %q) { entry: - %0 = call double @__quantum__qis__infinity__body() - ret double %0 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i64 1) + call void @__quantum__qis__k__ctl(%Array* %__controlQubits__, %Qubit* %q) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i64 -1) + ret void } -declare double @__quantum__qis__infinity__body() - -define i1 @Microsoft__Quantum__Intrinsic__IsInf__body(double %d) { +define void @Microsoft__Quantum__Intrinsic__K__ctladj(%Array* %__controlQubits__, %Qubit* %q) { entry: - %0 = call i1 @__quantum__qis__isinf__body(double %d) - ret i1 %0 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i64 1) + call void @__quantum__qis__k__ctl(%Array* %__controlQubits__, %Qubit* %q) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i64 -1) + ret void } -declare i1 @__quantum__qis__isinf__body(double) +declare double @__quantum__qis__arctan2__body(double, double) -define i1 @Microsoft__Quantum__Intrinsic__IsNegativeInfinity__body(double %d) { +define double @Microsoft__Quantum__Math__PI__body() { entry: - %0 = call i1 @__quantum__qis__isnegativeinfinity__body(double %d) - ret i1 %0 + ret double 0x400921FB54442D18 } -declare i1 @__quantum__qis__isnegativeinfinity__body(double) +declare double @__quantum__qis__sqrt__body(double) define double @Microsoft__Quantum__Math__E__body() { entry: ret double 0x4005BF0A8B145769 } -define double @Microsoft__Quantum__Math__PI__body() { -entry: - ret double 0x400921FB54442D18 -} - -declare %String* @__quantum__rt__string_create(i32, i8*) +declare double @__quantum__qis__log__body(double) declare %String* @__quantum__rt__pauli_to_string(i2) @@ -1362,6 +1596,24 @@ declare void @__quantum__rt__string_update_reference_count(%String*, i64) declare i1 @__quantum__rt__string_equal(%String*, %String*) +define double @Microsoft__Quantum__Math__Log__body(double %input) { +entry: + %0 = call double @__quantum__qis__log__body(double %input) + ret double %0 +} + +define double @Microsoft__Quantum__Math__ArcTan2__body(double %y, double %x) { +entry: + %0 = call double @__quantum__qis__arctan2__body(double %y, double %x) + ret double %0 +} + +define double @Microsoft__Quantum__Math__Sqrt__body(double %d) { +entry: + %0 = call double @__quantum__qis__sqrt__body(double %d) + ret double %0 +} + define i64 @Microsoft__Quantum__Testing__QIR__Test_Arrays(i64 %array__count, i64* %array, i64 %index, i64 %val, i1 %compilerDecoy) #0 { entry: %0 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 %array__count) @@ -1369,20 +1621,38 @@ entry: br i1 %1, label %copy, label %next copy: ; preds = %entry - %2 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %0, i64 0) - %3 = mul i64 %array__count, 8 - call void @llvm.memcpy.p0i8.p0i64.i64(i8* %2, i64* %array, i64 %3, i1 false) - br label %next + %2 = ptrtoint i64* %array to i64 + %3 = sub i64 %array__count, 1 + br label %header__1 -next: ; preds = %copy, %entry +next: ; preds = %exit__1, %entry %4 = call i64 @Microsoft__Quantum__Testing__QIR__Test_Arrays__body(%Array* %0, i64 %index, i64 %val, i1 %compilerDecoy) + call void @__quantum__rt__array_update_reference_count(%Array* %0, i64 -1) ret i64 %4 -} -; Function Attrs: argmemonly nounwind willreturn -declare void @llvm.memcpy.p0i8.p0i64.i64(i8* noalias nocapture writeonly, i64* noalias nocapture readonly, i64, i1 immarg) #1 +header__1: ; preds = %exiting__1, %copy + %5 = phi i64 [ 0, %copy ], [ %13, %exiting__1 ] + %6 = icmp sle i64 %5, %3 + br i1 %6, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %7 = mul i64 %5, 8 + %8 = add i64 %2, %7 + %9 = inttoptr i64 %8 to i64* + %10 = load i64, i64* %9 + %11 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %0, i64 %5) + %12 = bitcast i8* %11 to i64* + store i64 %10, i64* %12 + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %13 = add i64 %5, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + br label %next +} declare void @__quantum__rt__tuple_update_alias_count(%Tuple*, i64) attributes #0 = { "EntryPoint" } -attributes #1 = { argmemonly nounwind willreturn } diff --git a/src/QirRuntime/test/QIR-static/qir-test-functors.qs b/src/QirRuntime/test/QIR-static/qir-test-functors.qs deleted file mode 100644 index 15640eb37f2..00000000000 --- a/src/QirRuntime/test/QIR-static/qir-test-functors.qs +++ /dev/null @@ -1,84 +0,0 @@ - -// For the test to pass implement K to be the same as X. We need it for the test, because the standard bridge doesn't -// support multi-controlled X. -namespace Microsoft.Quantum.Intrinsic -{ - @Inline() - operation K(q : Qubit) : Unit - is Adj+Ctl { - body intrinsic; - adjoint self; - controlled intrinsic; - } -} - -namespace Microsoft.Quantum.Testing.QIR -{ - open Microsoft.Quantum.Intrinsic; - - operation Qop(q : Qubit, n : Int) : Unit - is Adj+Ctl { - body (...) - { - if (n%2 == 1) { K(q); } - } - adjoint (...) // self, but have to define explicitly due to https://github.com/microsoft/qsharp-compiler/issues/781 - { - if (n%2 == 1) { K(q); } - } - controlled (ctrls, ...) - { - if (n%2 == 1) { Controlled K(ctrls, q); } - } - } - - operation TestControlled () : Int - { - let qop = Qop(_, 1); - let adj_qop = Adjoint qop; - let ctl_qop = Controlled qop; - let adj_ctl_qop = Adjoint Controlled qop; - let ctl_ctl_qop = Controlled ctl_qop; - - mutable error_code = 0; - using ((q1, q2, q3) = (Qubit(), Qubit(), Qubit())) - { - qop(q1); - if (M(q1) != One) { set error_code = 1; } - else - { - adj_qop(q2); - if (M(q2) != One) { set error_code = 2; } - else{ - ctl_qop([q1], q3); - if (M(q3) != One) { set error_code = 3; } - else - { - adj_ctl_qop([q2], q3); - if (M(q3) != Zero) { set error_code = 4; } - else - { - ctl_ctl_qop([q1], ([q2], q3)); - if (M(q3) != One) { set error_code = 5; } - else - { - Controlled qop([q1, q2], q3); - if (M(q3) != Zero) { set error_code = 6; } - else - { - using (q4 = Qubit()) - { - Adjoint qop(q3); - Adjoint Controlled ctl_ctl_qop([q1], ([q2], ([q3], q4))); - if (M(q4) != One) { set error_code = 7; } - } - } - } - } - } - } - } - } - return error_code; - } -} \ No newline at end of file diff --git a/src/QirRuntime/test/QIR-static/qsharp/Math.qs b/src/QirRuntime/test/QIR-static/qsharp/Math.qs new file mode 100644 index 00000000000..12e98bd5458 --- /dev/null +++ b/src/QirRuntime/test/QIR-static/qsharp/Math.qs @@ -0,0 +1,31 @@ +namespace Microsoft.Quantum.Intrinsic { + + open Microsoft.Quantum.Targeting; + open Microsoft.Quantum.Core; + + @Inline() + function NAN() : Double { + body intrinsic; + } + + @Inline() + function IsNan(d: Double) : Bool { + body intrinsic; + } + + @Inline() + function INFINITY() : Double { + body intrinsic; + } + + @Inline() + function IsInf(d: Double) : Bool { + body intrinsic; + } + + @Inline() + function IsNegativeInfinity(d : Double) : Bool { + body intrinsic; + } + +} \ No newline at end of file diff --git a/src/QirRuntime/test/QIR-static/qsharp/qir-gen.csproj b/src/QirRuntime/test/QIR-static/qsharp/qir-gen.csproj new file mode 100644 index 00000000000..8f4f360a7bb --- /dev/null +++ b/src/QirRuntime/test/QIR-static/qsharp/qir-gen.csproj @@ -0,0 +1,9 @@ + + + + Exe + netcoreapp3.1 + True + + + diff --git a/src/QirRuntime/test/QIR-static/qir-test-arrays.qs b/src/QirRuntime/test/QIR-static/qsharp/qir-test-arrays.qs similarity index 86% rename from src/QirRuntime/test/QIR-static/qir-test-arrays.qs rename to src/QirRuntime/test/QIR-static/qsharp/qir-test-arrays.qs index 65d05ca8ef2..f46e7a06bc4 100644 --- a/src/QirRuntime/test/QIR-static/qir-test-arrays.qs +++ b/src/QirRuntime/test/QIR-static/qsharp/qir-test-arrays.qs @@ -1,14 +1,12 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. -namespace Microsoft.Quantum.Testing.QIR -{ +namespace Microsoft.Quantum.Testing.QIR { open Microsoft.Quantum.Testing.QIR.Math; open Microsoft.Quantum.Testing.QIR.Str; @EntryPoint() - operation Test_Arrays(array : Int[], index : Int, val : Int, compilerDecoy : Bool) : Int - { + operation Test_Arrays(array : Int[], index : Int, val : Int, compilerDecoy : Bool) : Int { // exercise __quantum__rt__array_copy mutable local = array; @@ -27,17 +25,15 @@ namespace Microsoft.Quantum.Testing.QIR // return a value that is likely to be correct only if the above operations did what was expected mutable sum = 0; - for (i in 0..Length(result)-1) - { + for i in 0..Length(result)-1 { set sum += result[i]; } // The purpose of this block is to keep the Q# compiler from optimizing away other tests when generating QIR - if (compilerDecoy) - { + if (compilerDecoy) { let res1 = TestControlled(); let res2 = TestPartials(17, 42); - let res3 = Test_Qubit_Result_Management(); + TestQubitResultManagement(); // Math tests: let res4 = SqrtTest(); @@ -45,8 +41,6 @@ namespace Microsoft.Quantum.Testing.QIR let res6 = ArcTan2Test(); let res7 = PauliToStringTest(); } - return sum; } - } diff --git a/src/QirRuntime/test/QIR-static/qsharp/qir-test-functors.qs b/src/QirRuntime/test/QIR-static/qsharp/qir-test-functors.qs new file mode 100644 index 00000000000..aa6a91f200b --- /dev/null +++ b/src/QirRuntime/test/QIR-static/qsharp/qir-test-functors.qs @@ -0,0 +1,66 @@ + +// For the test to pass implement K to be the same as X. We need it for the test, because the standard bridge doesn't +// support multi-controlled X. +namespace Microsoft.Quantum.Intrinsic { + open Microsoft.Quantum.Core; + @Inline() + operation K(q : Qubit) : Unit + is Adj+Ctl { + body intrinsic; + adjoint self; + controlled intrinsic; + } +} + +namespace Microsoft.Quantum.Testing.QIR { + open Microsoft.Quantum.Intrinsic; + + operation Qop(q : Qubit, n : Int) : Unit + is Adj+Ctl { + body (...) { + if (n%2 == 1) { K(q); } + } + adjoint self; + controlled (ctrls, ...) { + if (n%2 == 1) { Controlled K(ctrls, q); } + } + } + + // We want to test for conditional measurements which requires us to generate QIR with --runtime set to + // BasicMeasurementFeedback, which in turn doesn't allow updating mutables inside measurement conditionals. + // this means, we cannot easily get detailed failure information back from Q#, but the test driver can mock + // the simulator to track the point of failure. + operation TestControlled() : Unit { + let qop = Qop(_, 1); + let adj_qop = Adjoint qop; + let ctl_qop = Controlled qop; + let adj_ctl_qop = Adjoint Controlled qop; + let ctl_ctl_qop = Controlled ctl_qop; + + use (q1, q2, q3) = (Qubit(), Qubit(), Qubit()) { + qop(q1); + if (M(q1) != One) { fail("error code: 1"); } + + adj_qop(q2); + if (M(q2) != One) { fail("error code: 2"); } + + ctl_qop([q1], q3); + if (M(q3) != One) { fail("error code: 3"); } + + adj_ctl_qop([q2], q3); + if (M(q3) != Zero) { fail("error code: 2"); } + + ctl_ctl_qop([q1], ([q2], q3)); + if (M(q3) != One) { fail("error code: 5"); } + + Controlled qop([q1, q2], q3); + if (M(q3) != Zero) { fail("error code: 6"); } + + use q4 = Qubit() { + Adjoint qop(q3); + Adjoint Controlled ctl_ctl_qop([q1], ([q2], ([q3], q4))); + if (M(q4) != One) { fail("error code: 7"); } + } + } + } +} \ No newline at end of file diff --git a/src/QirRuntime/test/QIR-static/qir-test-math.qs b/src/QirRuntime/test/QIR-static/qsharp/qir-test-math.qs similarity index 100% rename from src/QirRuntime/test/QIR-static/qir-test-math.qs rename to src/QirRuntime/test/QIR-static/qsharp/qir-test-math.qs diff --git a/src/QirRuntime/test/QIR-static/qir-test-partials.qs b/src/QirRuntime/test/QIR-static/qsharp/qir-test-partials.qs similarity index 55% rename from src/QirRuntime/test/QIR-static/qir-test-partials.qs rename to src/QirRuntime/test/QIR-static/qsharp/qir-test-partials.qs index 158573d4d91..837f09f6286 100644 --- a/src/QirRuntime/test/QIR-static/qir-test-partials.qs +++ b/src/QirRuntime/test/QIR-static/qsharp/qir-test-partials.qs @@ -1,15 +1,12 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. -namespace Microsoft.Quantum.Testing.QIR -{ - function Subtract(from : Int, what : Int) : Int - { +namespace Microsoft.Quantum.Testing.QIR { + function Subtract(from : Int, what : Int) : Int { return from - what; } - function TestPartials(x : Int, y : Int) : Int - { + function TestPartials(x : Int, y : Int) : Int { let subtractor = Subtract(x, _); return subtractor(y); } diff --git a/src/QirRuntime/test/QIR-static/qir-test-qubits-results.qs b/src/QirRuntime/test/QIR-static/qsharp/qir-test-qubits-results.qs similarity index 60% rename from src/QirRuntime/test/QIR-static/qir-test-qubits-results.qs rename to src/QirRuntime/test/QIR-static/qsharp/qir-test-qubits-results.qs index 9c4f2e5493f..dc6c74fb9ee 100644 --- a/src/QirRuntime/test/QIR-static/qir-test-qubits-results.qs +++ b/src/QirRuntime/test/QIR-static/qsharp/qir-test-qubits-results.qs @@ -1,21 +1,17 @@ // Copyright (c) Microsoft Corporation. // Licensed under the MIT License. -namespace Microsoft.Quantum.Testing.QIR -{ +namespace Microsoft.Quantum.Testing.QIR { open Microsoft.Quantum.Intrinsic; - operation Test_Qubit_Result_Management() : Bool - { + operation TestQubitResultManagement() : Unit { // exercise __quantum__rt__qubit_allocate_array - using(qs = Qubit[2]) - { + use qs = Qubit[2] { X(qs[1]); // exercise __quantum__rt__qubit_allocate - using (q = Qubit()) - { + use q = Qubit() { // exercise __quantum__rt__result_equal and accessing result constants - if (M(qs[1]) == One) {X(q);} - return M(qs[0]) != M(q); + if (M(qs[1]) == One) { X(q); } + if (M(qs[0]) == M(q)) { fail("Unexpected measurement result"); } } // exercise __quantum__rt__qubit_release } // exercise __quantum__rt__qubit_release_array } diff --git a/src/QirRuntime/test/QIR-static/qir-test-strings.qs b/src/QirRuntime/test/QIR-static/qsharp/qir-test-strings.qs similarity index 100% rename from src/QirRuntime/test/QIR-static/qir-test-strings.qs rename to src/QirRuntime/test/QIR-static/qsharp/qir-test-strings.qs From 8d71bc2f148172cbca14ba27a8ea7285bea97c37 Mon Sep 17 00:00:00 2001 From: Sarah Marshall <33814365+samarsha@users.noreply.github.com> Date: Fri, 12 Feb 2021 16:44:43 -0800 Subject: [PATCH 06/30] Update C# generation for access modifier changes in Q# compiler (#481) * Use new Visibility compiler API * Update C# gen and fix package versions * Update naming --- .../Microsoft.Quantum.CSharpGeneration.fsproj | 4 ++-- .../CSharpGeneration/SimulationCode.fs | 16 ++++++++-------- ...tum.Simulation.QCTraceSimulatorRuntime.csproj | 2 +- .../Microsoft.Quantum.QSharp.Core.csproj | 3 +-- .../Microsoft.Quantum.QSharp.Foundation.csproj | 2 +- .../HoneywellExe/HoneywellExe.csproj | 2 +- .../IntrinsicTests/IntrinsicTests.csproj | 2 +- .../TestProjects/IonQExe/IonQExe.csproj | 2 +- .../Library with Spaces.csproj | 2 +- .../TestProjects/Library1/Library1.csproj | 2 +- .../TestProjects/Library2/Library2.csproj | 2 +- .../TestProjects/QCIExe/QCIExe.csproj | 2 +- .../TestProjects/QSharpExe/QSharpExe.csproj | 2 +- .../TestProjects/TargetedExe/TargetedExe.csproj | 2 +- .../TestProjects/UnitTests/UnitTests.csproj | 2 +- .../Tests.Microsoft.Quantum.Simulators.csproj | 2 +- ...sts.Microsoft.Quantum.Simulators.Type1.csproj | 2 +- ...sts.Microsoft.Quantum.Simulators.Type2.csproj | 4 +--- .../Microsoft.Quantum.Simulators.csproj | 3 ++- .../Microsoft.Quantum.Type1.Core.csproj | 2 +- .../Microsoft.Quantum.Type2.Core.csproj | 2 +- src/Xunit/Microsoft.Quantum.Xunit.nuspec | 2 +- 22 files changed, 31 insertions(+), 33 deletions(-) diff --git a/src/Simulation/CSharpGeneration/Microsoft.Quantum.CSharpGeneration.fsproj b/src/Simulation/CSharpGeneration/Microsoft.Quantum.CSharpGeneration.fsproj index 2dc489c798d..d387f0b4215 100644 --- a/src/Simulation/CSharpGeneration/Microsoft.Quantum.CSharpGeneration.fsproj +++ b/src/Simulation/CSharpGeneration/Microsoft.Quantum.CSharpGeneration.fsproj @@ -1,4 +1,4 @@ - + @@ -22,7 +22,7 @@ - + diff --git a/src/Simulation/CSharpGeneration/SimulationCode.fs b/src/Simulation/CSharpGeneration/SimulationCode.fs index 6b4029c0030..f8fa92a5e65 100644 --- a/src/Simulation/CSharpGeneration/SimulationCode.fs +++ b/src/Simulation/CSharpGeneration/SimulationCode.fs @@ -946,16 +946,16 @@ module SimulationCode = /// a Property that returns an instance of the operation by calling the /// IOperationFactory let buildOpsProperties context (operations : QsQualifiedName list): MemberDeclarationSyntax list = - let getCallableAccessModifier qualifiedName = + let getCallableAccess qualifiedName = match context.allCallables.TryGetValue qualifiedName with - | true, callable -> Some callable.Modifiers.Access + | true, callable -> Some callable.Access | false, _ -> None let getPropertyModifiers qualifiedName = // Use the right accessibility for the property depending on the accessibility of the callable. // Note: In C#, "private protected" is the intersection of protected and internal. - match getCallableAccessModifier qualifiedName |> Option.defaultValue DefaultAccess with - | DefaultAccess -> [ ``protected`` ] + match getCallableAccess qualifiedName |> Option.defaultValue Public with + | Public -> [ ``protected`` ] | Internal -> [ ``private``; ``protected`` ] let buildOne qualifiedName = @@ -1439,8 +1439,8 @@ module SimulationCode = (constructors @ properties @ methods) ``}`` - let private classAccessModifier = function - | DefaultAccess -> ``public`` + let private classAccess = function + | Public -> ``public`` | Internal -> ``internal`` // Builds the .NET class for the given operation. @@ -1509,7 +1509,7 @@ module SimulationCode = let methods = [ opNames |> buildInit context; inData |> fst; outData |> fst; buildRun context nonGenericName op.ArgumentTuple op.Signature.ArgumentType op.Signature.ReturnType ] let modifiers = - let access = classAccessModifier op.Modifiers.Access + let access = classAccess op.Access if opIsIntrinsic && not isConcreteIntrinsic then [ access; ``abstract``; ``partial`` ] else @@ -1604,7 +1604,7 @@ module SimulationCode = let baseClassName = udtBaseClassName context qsharpType let baseClass = ``simpleBase`` baseClassName - let modifiers = [ classAccessModifier udt.Modifiers.Access ] + let modifiers = [ classAccess udt.Access ] let interfaces = [ ``simpleBase`` "IApplyData" ] let constructors = [ buildEmptyConstructor; buildBaseTupleConstructor ] let qubitsField = buildQubitsField context qsharpType diff --git a/src/Simulation/QCTraceSimulator.Tests/Tests.Microsoft.Quantum.Simulation.QCTraceSimulatorRuntime.csproj b/src/Simulation/QCTraceSimulator.Tests/Tests.Microsoft.Quantum.Simulation.QCTraceSimulatorRuntime.csproj index 7e8e61d22f6..e6c3aa13131 100644 --- a/src/Simulation/QCTraceSimulator.Tests/Tests.Microsoft.Quantum.Simulation.QCTraceSimulatorRuntime.csproj +++ b/src/Simulation/QCTraceSimulator.Tests/Tests.Microsoft.Quantum.Simulation.QCTraceSimulatorRuntime.csproj @@ -1,4 +1,4 @@ - + diff --git a/src/Simulation/QSharpCore/Microsoft.Quantum.QSharp.Core.csproj b/src/Simulation/QSharpCore/Microsoft.Quantum.QSharp.Core.csproj index a80ac901a6b..bee12356075 100644 --- a/src/Simulation/QSharpCore/Microsoft.Quantum.QSharp.Core.csproj +++ b/src/Simulation/QSharpCore/Microsoft.Quantum.QSharp.Core.csproj @@ -1,5 +1,4 @@ - - + diff --git a/src/Simulation/QSharpFoundation/Microsoft.Quantum.QSharp.Foundation.csproj b/src/Simulation/QSharpFoundation/Microsoft.Quantum.QSharp.Foundation.csproj index 69118c12e1e..3dd5111a716 100644 --- a/src/Simulation/QSharpFoundation/Microsoft.Quantum.QSharp.Foundation.csproj +++ b/src/Simulation/QSharpFoundation/Microsoft.Quantum.QSharp.Foundation.csproj @@ -1,4 +1,4 @@ - + diff --git a/src/Simulation/Simulators.Tests/TestProjects/HoneywellExe/HoneywellExe.csproj b/src/Simulation/Simulators.Tests/TestProjects/HoneywellExe/HoneywellExe.csproj index 5384a2f6901..dba30bd5d0e 100644 --- a/src/Simulation/Simulators.Tests/TestProjects/HoneywellExe/HoneywellExe.csproj +++ b/src/Simulation/Simulators.Tests/TestProjects/HoneywellExe/HoneywellExe.csproj @@ -1,4 +1,4 @@ - + Exe diff --git a/src/Simulation/Simulators.Tests/TestProjects/IntrinsicTests/IntrinsicTests.csproj b/src/Simulation/Simulators.Tests/TestProjects/IntrinsicTests/IntrinsicTests.csproj index 9075b363ba0..76774ef34c7 100644 --- a/src/Simulation/Simulators.Tests/TestProjects/IntrinsicTests/IntrinsicTests.csproj +++ b/src/Simulation/Simulators.Tests/TestProjects/IntrinsicTests/IntrinsicTests.csproj @@ -1,4 +1,4 @@ - + netcoreapp3.1 diff --git a/src/Simulation/Simulators.Tests/TestProjects/IonQExe/IonQExe.csproj b/src/Simulation/Simulators.Tests/TestProjects/IonQExe/IonQExe.csproj index e0b14d1bfa7..8dc981e3a13 100644 --- a/src/Simulation/Simulators.Tests/TestProjects/IonQExe/IonQExe.csproj +++ b/src/Simulation/Simulators.Tests/TestProjects/IonQExe/IonQExe.csproj @@ -1,4 +1,4 @@ - + Exe diff --git a/src/Simulation/Simulators.Tests/TestProjects/Library with Spaces/Library with Spaces.csproj b/src/Simulation/Simulators.Tests/TestProjects/Library with Spaces/Library with Spaces.csproj index 556eda7eaad..08b73178a78 100644 --- a/src/Simulation/Simulators.Tests/TestProjects/Library with Spaces/Library with Spaces.csproj +++ b/src/Simulation/Simulators.Tests/TestProjects/Library with Spaces/Library with Spaces.csproj @@ -1,4 +1,4 @@ - + netstandard2.1 false diff --git a/src/Simulation/Simulators.Tests/TestProjects/Library1/Library1.csproj b/src/Simulation/Simulators.Tests/TestProjects/Library1/Library1.csproj index 2479cfcbd73..367d19a5ee2 100644 --- a/src/Simulation/Simulators.Tests/TestProjects/Library1/Library1.csproj +++ b/src/Simulation/Simulators.Tests/TestProjects/Library1/Library1.csproj @@ -1,4 +1,4 @@ - + netstandard2.1 diff --git a/src/Simulation/Simulators.Tests/TestProjects/Library2/Library2.csproj b/src/Simulation/Simulators.Tests/TestProjects/Library2/Library2.csproj index 2479cfcbd73..367d19a5ee2 100644 --- a/src/Simulation/Simulators.Tests/TestProjects/Library2/Library2.csproj +++ b/src/Simulation/Simulators.Tests/TestProjects/Library2/Library2.csproj @@ -1,4 +1,4 @@ - + netstandard2.1 diff --git a/src/Simulation/Simulators.Tests/TestProjects/QCIExe/QCIExe.csproj b/src/Simulation/Simulators.Tests/TestProjects/QCIExe/QCIExe.csproj index 6badef1cce5..e918999b811 100644 --- a/src/Simulation/Simulators.Tests/TestProjects/QCIExe/QCIExe.csproj +++ b/src/Simulation/Simulators.Tests/TestProjects/QCIExe/QCIExe.csproj @@ -1,4 +1,4 @@ - + Exe diff --git a/src/Simulation/Simulators.Tests/TestProjects/QSharpExe/QSharpExe.csproj b/src/Simulation/Simulators.Tests/TestProjects/QSharpExe/QSharpExe.csproj index b6ff9d91069..ebcc391078d 100644 --- a/src/Simulation/Simulators.Tests/TestProjects/QSharpExe/QSharpExe.csproj +++ b/src/Simulation/Simulators.Tests/TestProjects/QSharpExe/QSharpExe.csproj @@ -1,4 +1,4 @@ - + Exe diff --git a/src/Simulation/Simulators.Tests/TestProjects/TargetedExe/TargetedExe.csproj b/src/Simulation/Simulators.Tests/TestProjects/TargetedExe/TargetedExe.csproj index 91bf1cd73be..0e3dece3b64 100644 --- a/src/Simulation/Simulators.Tests/TestProjects/TargetedExe/TargetedExe.csproj +++ b/src/Simulation/Simulators.Tests/TestProjects/TargetedExe/TargetedExe.csproj @@ -1,4 +1,4 @@ - + Exe diff --git a/src/Simulation/Simulators.Tests/TestProjects/UnitTests/UnitTests.csproj b/src/Simulation/Simulators.Tests/TestProjects/UnitTests/UnitTests.csproj index bd33c8fe071..744de7e54c1 100644 --- a/src/Simulation/Simulators.Tests/TestProjects/UnitTests/UnitTests.csproj +++ b/src/Simulation/Simulators.Tests/TestProjects/UnitTests/UnitTests.csproj @@ -1,4 +1,4 @@ - + netcoreapp3.1 diff --git a/src/Simulation/Simulators.Tests/Tests.Microsoft.Quantum.Simulators.csproj b/src/Simulation/Simulators.Tests/Tests.Microsoft.Quantum.Simulators.csproj index 497ff79d40a..945ad953a4f 100644 --- a/src/Simulation/Simulators.Tests/Tests.Microsoft.Quantum.Simulators.csproj +++ b/src/Simulation/Simulators.Tests/Tests.Microsoft.Quantum.Simulators.csproj @@ -1,4 +1,4 @@ - + diff --git a/src/Simulation/Simulators.Type1.Tests/Tests.Microsoft.Quantum.Simulators.Type1.csproj b/src/Simulation/Simulators.Type1.Tests/Tests.Microsoft.Quantum.Simulators.Type1.csproj index 9e9d51b6c22..ffeea476545 100644 --- a/src/Simulation/Simulators.Type1.Tests/Tests.Microsoft.Quantum.Simulators.Type1.csproj +++ b/src/Simulation/Simulators.Type1.Tests/Tests.Microsoft.Quantum.Simulators.Type1.csproj @@ -1,4 +1,4 @@ - + diff --git a/src/Simulation/Simulators.Type2.Tests/Tests.Microsoft.Quantum.Simulators.Type2.csproj b/src/Simulation/Simulators.Type2.Tests/Tests.Microsoft.Quantum.Simulators.Type2.csproj index 8b0e1a36091..0493ce6298f 100644 --- a/src/Simulation/Simulators.Type2.Tests/Tests.Microsoft.Quantum.Simulators.Type2.csproj +++ b/src/Simulation/Simulators.Type2.Tests/Tests.Microsoft.Quantum.Simulators.Type2.csproj @@ -1,4 +1,4 @@ - + @@ -19,5 +19,3 @@ - - diff --git a/src/Simulation/Simulators/Microsoft.Quantum.Simulators.csproj b/src/Simulation/Simulators/Microsoft.Quantum.Simulators.csproj index df891ab5eb0..ee170f9087f 100644 --- a/src/Simulation/Simulators/Microsoft.Quantum.Simulators.csproj +++ b/src/Simulation/Simulators/Microsoft.Quantum.Simulators.csproj @@ -1,4 +1,5 @@ - + + diff --git a/src/Simulation/Type1Core/Microsoft.Quantum.Type1.Core.csproj b/src/Simulation/Type1Core/Microsoft.Quantum.Type1.Core.csproj index f21cd835b30..7daac307ae6 100644 --- a/src/Simulation/Type1Core/Microsoft.Quantum.Type1.Core.csproj +++ b/src/Simulation/Type1Core/Microsoft.Quantum.Type1.Core.csproj @@ -1,4 +1,4 @@ - + diff --git a/src/Simulation/Type2Core/Microsoft.Quantum.Type2.Core.csproj b/src/Simulation/Type2Core/Microsoft.Quantum.Type2.Core.csproj index 9bec9e8d8f2..2a066c3ae25 100644 --- a/src/Simulation/Type2Core/Microsoft.Quantum.Type2.Core.csproj +++ b/src/Simulation/Type2Core/Microsoft.Quantum.Type2.Core.csproj @@ -1,4 +1,4 @@ - + diff --git a/src/Xunit/Microsoft.Quantum.Xunit.nuspec b/src/Xunit/Microsoft.Quantum.Xunit.nuspec index 44fcb9344a9..52887b4328b 100644 --- a/src/Xunit/Microsoft.Quantum.Xunit.nuspec +++ b/src/Xunit/Microsoft.Quantum.Xunit.nuspec @@ -22,7 +22,7 @@ - + From 19039f61c3c88115297d2c81a612de58e37048c9 Mon Sep 17 00:00:00 2001 From: "Stefan J. Wernli" Date: Wed, 17 Feb 2021 00:06:10 -0800 Subject: [PATCH 07/30] Generate QIR during pipeline (#522) * Use xplat qir generation in build pipeline * Build pipeline should use pwsh * Update for CR feedback --- .gitignore | 2 +- build/steps.yml | 6 +- src/QirRuntime/build-qir-runtime.ps1 | 11 + src/QirRuntime/test/QIR-static/qir-gen.ll | 1658 ----------------- .../test/QIR-static/qsharp/qir-gen.csproj | 2 +- 5 files changed, 16 insertions(+), 1663 deletions(-) delete mode 100644 src/QirRuntime/test/QIR-static/qir-gen.ll diff --git a/.gitignore b/.gitignore index 7ada7136a29..f77b7e72b01 100644 --- a/.gitignore +++ b/.gitignore @@ -24,7 +24,7 @@ bld/ [Oo]bj/ [Ll]og/ [Dd]rops/ -**/qir/qir-gen.ll +**/qir-gen.ll # Visual Studio 2015/2017 cache/options directory .vs/ diff --git a/build/steps.yml b/build/steps.yml index 89e377bfb6e..77054f58a30 100644 --- a/build/steps.yml +++ b/build/steps.yml @@ -5,18 +5,18 @@ steps: - template: steps-init.yml -- powershell: ./build.ps1 +- pwsh: ./build.ps1 displayName: "Building Q# runtime" workingDirectory: $(System.DefaultWorkingDirectory)/build -- powershell: ./test.ps1 +- pwsh: ./test.ps1 displayName: "Testing Q# runtime" workingDirectory: $(System.DefaultWorkingDirectory)/build condition: and(succeeded(), ne(variables['Skip.Tests'], 'true')) -- powershell: ./pack.ps1 +- pwsh: ./pack.ps1 displayName: "Pack Q# runtime (Windows only)" condition: and(succeeded(), eq(variables['Agent.OS'], 'Windows_NT')) workingDirectory: $(System.DefaultWorkingDirectory)/build diff --git a/src/QirRuntime/build-qir-runtime.ps1 b/src/QirRuntime/build-qir-runtime.ps1 index ea0654513e8..63247ebaab7 100644 --- a/src/QirRuntime/build-qir-runtime.ps1 +++ b/src/QirRuntime/build-qir-runtime.ps1 @@ -2,6 +2,17 @@ # Licensed under the MIT License. if ($Env:ENABLE_QIRRUNTIME -eq "true") { + Write-Host "##[info]Compile Q# Projects into QIR" + $qirStaticPath = Join-Path $PSScriptRoot test QIR-static qsharp + dotnet build $qirStaticPath -c $Env:BUILD_CONFIGURATION -v $Env:BUILD_VERBOSITY + if ($LastExitCode -ne 0) { + Write-Host "##vso[task.logissue type=error;]Failed to compile Q# project at '$qirStaticPath' into QIR." + return + } + Copy-Item -Path (Join-Path $qirStaticPath qir *.ll) -Destination (Split-Path $qirStaticPath -Parent) + # Also copy to drops so it ends up in build artifacts, for easier post-build debugging. + Copy-Item -Path (Join-Path $qirStaticPath qir *.ll) -Destination $Env:DROPS_DIR + Write-Host "##[info]Build QIR Runtime" $oldCC = $env:CC $oldCXX = $env:CXX diff --git a/src/QirRuntime/test/QIR-static/qir-gen.ll b/src/QirRuntime/test/QIR-static/qir-gen.ll deleted file mode 100644 index 4a41d3df9e7..00000000000 --- a/src/QirRuntime/test/QIR-static/qir-gen.ll +++ /dev/null @@ -1,1658 +0,0 @@ - -%Result = type opaque -%Range = type { i64, i64, i64 } -%Tuple = type opaque -%Callable = type opaque -%Qubit = type opaque -%String = type opaque -%Array = type opaque - -@ResultZero = external global %Result* -@ResultOne = external global %Result* -@PauliI = constant i2 0 -@PauliX = constant i2 1 -@PauliY = constant i2 -1 -@PauliZ = constant i2 -2 -@EmptyRange = internal constant %Range { i64 0, i64 1, i64 -1 } -@Microsoft__Quantum__Testing__QIR__Qop = constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Testing__QIR__Qop__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Testing__QIR__Qop__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Testing__QIR__Qop__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Testing__QIR__Qop__ctladj__wrapper] -@PartialApplication__1 = constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__1__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__1__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__1__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__1__ctladj__wrapper] -@MemoryManagement__1 = constant [2 x void (%Tuple*, i64)*] [void (%Tuple*, i64)* @MemoryManagement__1__RefCount, void (%Tuple*, i64)* @MemoryManagement__1__AliasCount] -@0 = internal constant [14 x i8] c"error code: 1\00" -@1 = internal constant [14 x i8] c"error code: 2\00" -@2 = internal constant [14 x i8] c"error code: 3\00" -@3 = internal constant [14 x i8] c"error code: 2\00" -@4 = internal constant [14 x i8] c"error code: 5\00" -@5 = internal constant [14 x i8] c"error code: 6\00" -@6 = internal constant [14 x i8] c"error code: 7\00" -@7 = internal constant [30 x i8] c"Unexpected measurement result\00" -@Microsoft__Quantum__Testing__QIR__Subtract = constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Testing__QIR__Subtract__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null] -@PartialApplication__2 = constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__2__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null] -@MemoryManagement__2 = constant [2 x void (%Tuple*, i64)*] [void (%Tuple*, i64)* @MemoryManagement__2__RefCount, void (%Tuple*, i64)* @MemoryManagement__2__AliasCount] -@8 = internal constant [20 x i8] c"Pauli value: PauliI\00" -@9 = internal constant [14 x i8] c"Pauli value: \00" -@10 = internal constant [7 x i8] c"PauliX\00" -@11 = internal constant [7 x i8] c"PauliY\00" -@12 = internal constant [7 x i8] c"PauliZ\00" - -define void @Microsoft__Quantum__Testing__QIR__TestControlled__body() { -entry: - %0 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, i64 }* getelementptr ({ %Callable*, i64 }, { %Callable*, i64 }* null, i32 1) to i64)) - %1 = bitcast %Tuple* %0 to { %Callable*, i64 }* - %2 = getelementptr inbounds { %Callable*, i64 }, { %Callable*, i64 }* %1, i32 0, i32 0 - %3 = getelementptr inbounds { %Callable*, i64 }, { %Callable*, i64 }* %1, i32 0, i32 1 - %4 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Testing__QIR__Qop, [2 x void (%Tuple*, i64)*]* null, %Tuple* null) - store %Callable* %4, %Callable** %2 - store i64 1, i64* %3 - %qop = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__1, [2 x void (%Tuple*, i64)*]* @MemoryManagement__1, %Tuple* %0) - call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %qop, i64 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %qop, i64 1) - %adj_qop = call %Callable* @__quantum__rt__callable_copy(%Callable* %qop, i1 false) - call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %adj_qop, i64 1) - call void @__quantum__rt__callable_make_adjoint(%Callable* %adj_qop) - call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %adj_qop, i64 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %adj_qop, i64 1) - %ctl_qop = call %Callable* @__quantum__rt__callable_copy(%Callable* %qop, i1 false) - call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %ctl_qop, i64 1) - call void @__quantum__rt__callable_make_controlled(%Callable* %ctl_qop) - call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %ctl_qop, i64 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %ctl_qop, i64 1) - %adj_ctl_qop = call %Callable* @__quantum__rt__callable_copy(%Callable* %qop, i1 false) - call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %adj_ctl_qop, i64 1) - call void @__quantum__rt__callable_make_controlled(%Callable* %adj_ctl_qop) - call void @__quantum__rt__callable_make_adjoint(%Callable* %adj_ctl_qop) - call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %adj_ctl_qop, i64 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %adj_ctl_qop, i64 1) - %ctl_ctl_qop = call %Callable* @__quantum__rt__callable_copy(%Callable* %ctl_qop, i1 false) - call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %ctl_ctl_qop, i64 1) - call void @__quantum__rt__callable_make_controlled(%Callable* %ctl_ctl_qop) - call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %ctl_ctl_qop, i64 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %ctl_ctl_qop, i64 1) - %q1 = call %Qubit* @__quantum__rt__qubit_allocate() - %q2 = call %Qubit* @__quantum__rt__qubit_allocate() - %q3 = call %Qubit* @__quantum__rt__qubit_allocate() - %5 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64)) - %6 = bitcast %Tuple* %5 to { %Qubit* }* - %7 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %6, i32 0, i32 0 - store %Qubit* %q1, %Qubit** %7 - call void @__quantum__rt__callable_invoke(%Callable* %qop, %Tuple* %5, %Tuple* null) - %8 = call %Result* @Microsoft__Quantum__Intrinsic__M__body(%Qubit* %q1) - %9 = load %Result*, %Result** @ResultOne - %10 = call i1 @__quantum__rt__result_equal(%Result* %8, %Result* %9) - %11 = xor i1 %10, true - br i1 %11, label %then0__1, label %continue__1 - -then0__1: ; preds = %entry - %12 = call %String* @__quantum__rt__string_create(i32 13, i8* getelementptr inbounds ([14 x i8], [14 x i8]* @0, i32 0, i32 0)) - call void @__quantum__rt__qubit_release(%Qubit* %q1) - call void @__quantum__rt__qubit_release(%Qubit* %q2) - call void @__quantum__rt__qubit_release(%Qubit* %q3) - call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %qop, i64 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %qop, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %adj_qop, i64 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %adj_qop, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %ctl_qop, i64 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %ctl_qop, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %adj_ctl_qop, i64 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %adj_ctl_qop, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %ctl_ctl_qop, i64 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %ctl_ctl_qop, i64 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %5, i64 -1) - call void @__quantum__rt__result_update_reference_count(%Result* %8, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %qop, i64 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %qop, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %adj_qop, i64 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %adj_qop, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %ctl_qop, i64 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %ctl_qop, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %adj_ctl_qop, i64 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %adj_ctl_qop, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %ctl_ctl_qop, i64 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %ctl_ctl_qop, i64 -1) - call void @__quantum__rt__fail(%String* %12) - unreachable - -continue__1: ; preds = %entry - %13 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64)) - %14 = bitcast %Tuple* %13 to { %Qubit* }* - %15 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %14, i32 0, i32 0 - store %Qubit* %q2, %Qubit** %15 - call void @__quantum__rt__callable_invoke(%Callable* %adj_qop, %Tuple* %13, %Tuple* null) - %16 = call %Result* @Microsoft__Quantum__Intrinsic__M__body(%Qubit* %q2) - %17 = load %Result*, %Result** @ResultOne - %18 = call i1 @__quantum__rt__result_equal(%Result* %16, %Result* %17) - %19 = xor i1 %18, true - br i1 %19, label %then0__2, label %continue__2 - -then0__2: ; preds = %continue__1 - %20 = call %String* @__quantum__rt__string_create(i32 13, i8* getelementptr inbounds ([14 x i8], [14 x i8]* @1, i32 0, i32 0)) - call void @__quantum__rt__qubit_release(%Qubit* %q1) - call void @__quantum__rt__qubit_release(%Qubit* %q2) - call void @__quantum__rt__qubit_release(%Qubit* %q3) - call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %qop, i64 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %qop, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %adj_qop, i64 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %adj_qop, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %ctl_qop, i64 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %ctl_qop, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %adj_ctl_qop, i64 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %adj_ctl_qop, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %ctl_ctl_qop, i64 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %ctl_ctl_qop, i64 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %5, i64 -1) - call void @__quantum__rt__result_update_reference_count(%Result* %8, i64 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %13, i64 -1) - call void @__quantum__rt__result_update_reference_count(%Result* %16, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %qop, i64 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %qop, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %adj_qop, i64 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %adj_qop, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %ctl_qop, i64 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %ctl_qop, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %adj_ctl_qop, i64 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %adj_ctl_qop, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %ctl_ctl_qop, i64 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %ctl_ctl_qop, i64 -1) - call void @__quantum__rt__fail(%String* %20) - unreachable - -continue__2: ; preds = %continue__1 - %21 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %22 = bitcast %Tuple* %21 to { %Array*, %Qubit* }* - %23 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %22, i32 0, i32 0 - %24 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %22, i32 0, i32 1 - %25 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) - %26 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %25, i64 0) - %27 = bitcast i8* %26 to %Qubit** - store %Qubit* %q1, %Qubit** %27 - store %Array* %25, %Array** %23 - store %Qubit* %q3, %Qubit** %24 - call void @__quantum__rt__callable_invoke(%Callable* %ctl_qop, %Tuple* %21, %Tuple* null) - %28 = call %Result* @Microsoft__Quantum__Intrinsic__M__body(%Qubit* %q3) - %29 = load %Result*, %Result** @ResultOne - %30 = call i1 @__quantum__rt__result_equal(%Result* %28, %Result* %29) - %31 = xor i1 %30, true - br i1 %31, label %then0__3, label %continue__3 - -then0__3: ; preds = %continue__2 - %32 = call %String* @__quantum__rt__string_create(i32 13, i8* getelementptr inbounds ([14 x i8], [14 x i8]* @2, i32 0, i32 0)) - call void @__quantum__rt__qubit_release(%Qubit* %q1) - call void @__quantum__rt__qubit_release(%Qubit* %q2) - call void @__quantum__rt__qubit_release(%Qubit* %q3) - call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %qop, i64 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %qop, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %adj_qop, i64 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %adj_qop, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %ctl_qop, i64 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %ctl_qop, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %adj_ctl_qop, i64 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %adj_ctl_qop, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %ctl_ctl_qop, i64 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %ctl_ctl_qop, i64 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %5, i64 -1) - call void @__quantum__rt__result_update_reference_count(%Result* %8, i64 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %13, i64 -1) - call void @__quantum__rt__result_update_reference_count(%Result* %16, i64 -1) - call void @__quantum__rt__array_update_reference_count(%Array* %25, i64 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %21, i64 -1) - call void @__quantum__rt__result_update_reference_count(%Result* %28, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %qop, i64 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %qop, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %adj_qop, i64 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %adj_qop, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %ctl_qop, i64 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %ctl_qop, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %adj_ctl_qop, i64 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %adj_ctl_qop, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %ctl_ctl_qop, i64 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %ctl_ctl_qop, i64 -1) - call void @__quantum__rt__fail(%String* %32) - unreachable - -continue__3: ; preds = %continue__2 - %33 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %34 = bitcast %Tuple* %33 to { %Array*, %Qubit* }* - %35 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %34, i32 0, i32 0 - %36 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %34, i32 0, i32 1 - %37 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) - %38 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %37, i64 0) - %39 = bitcast i8* %38 to %Qubit** - store %Qubit* %q2, %Qubit** %39 - store %Array* %37, %Array** %35 - store %Qubit* %q3, %Qubit** %36 - call void @__quantum__rt__callable_invoke(%Callable* %adj_ctl_qop, %Tuple* %33, %Tuple* null) - %40 = call %Result* @Microsoft__Quantum__Intrinsic__M__body(%Qubit* %q3) - %41 = load %Result*, %Result** @ResultZero - %42 = call i1 @__quantum__rt__result_equal(%Result* %40, %Result* %41) - %43 = xor i1 %42, true - br i1 %43, label %then0__4, label %continue__4 - -then0__4: ; preds = %continue__3 - %44 = call %String* @__quantum__rt__string_create(i32 13, i8* getelementptr inbounds ([14 x i8], [14 x i8]* @3, i32 0, i32 0)) - call void @__quantum__rt__qubit_release(%Qubit* %q1) - call void @__quantum__rt__qubit_release(%Qubit* %q2) - call void @__quantum__rt__qubit_release(%Qubit* %q3) - call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %qop, i64 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %qop, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %adj_qop, i64 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %adj_qop, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %ctl_qop, i64 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %ctl_qop, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %adj_ctl_qop, i64 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %adj_ctl_qop, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %ctl_ctl_qop, i64 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %ctl_ctl_qop, i64 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %5, i64 -1) - call void @__quantum__rt__result_update_reference_count(%Result* %8, i64 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %13, i64 -1) - call void @__quantum__rt__result_update_reference_count(%Result* %16, i64 -1) - call void @__quantum__rt__array_update_reference_count(%Array* %25, i64 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %21, i64 -1) - call void @__quantum__rt__result_update_reference_count(%Result* %28, i64 -1) - call void @__quantum__rt__array_update_reference_count(%Array* %37, i64 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %33, i64 -1) - call void @__quantum__rt__result_update_reference_count(%Result* %40, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %qop, i64 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %qop, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %adj_qop, i64 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %adj_qop, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %ctl_qop, i64 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %ctl_qop, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %adj_ctl_qop, i64 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %adj_ctl_qop, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %ctl_ctl_qop, i64 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %ctl_ctl_qop, i64 -1) - call void @__quantum__rt__fail(%String* %44) - unreachable - -continue__4: ; preds = %continue__3 - %45 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %46 = bitcast %Tuple* %45 to { %Array*, { %Array*, %Qubit* }* }* - %47 = getelementptr inbounds { %Array*, { %Array*, %Qubit* }* }, { %Array*, { %Array*, %Qubit* }* }* %46, i32 0, i32 0 - %48 = getelementptr inbounds { %Array*, { %Array*, %Qubit* }* }, { %Array*, { %Array*, %Qubit* }* }* %46, i32 0, i32 1 - %49 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) - %50 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %49, i64 0) - %51 = bitcast i8* %50 to %Qubit** - store %Qubit* %q1, %Qubit** %51 - %52 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %53 = bitcast %Tuple* %52 to { %Array*, %Qubit* }* - %54 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %53, i32 0, i32 0 - %55 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %53, i32 0, i32 1 - %56 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) - %57 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %56, i64 0) - %58 = bitcast i8* %57 to %Qubit** - store %Qubit* %q2, %Qubit** %58 - store %Array* %56, %Array** %54 - store %Qubit* %q3, %Qubit** %55 - store %Array* %49, %Array** %47 - store { %Array*, %Qubit* }* %53, { %Array*, %Qubit* }** %48 - call void @__quantum__rt__callable_invoke(%Callable* %ctl_ctl_qop, %Tuple* %45, %Tuple* null) - %59 = call %Result* @Microsoft__Quantum__Intrinsic__M__body(%Qubit* %q3) - %60 = load %Result*, %Result** @ResultOne - %61 = call i1 @__quantum__rt__result_equal(%Result* %59, %Result* %60) - %62 = xor i1 %61, true - br i1 %62, label %then0__5, label %continue__5 - -then0__5: ; preds = %continue__4 - %63 = call %String* @__quantum__rt__string_create(i32 13, i8* getelementptr inbounds ([14 x i8], [14 x i8]* @4, i32 0, i32 0)) - call void @__quantum__rt__qubit_release(%Qubit* %q1) - call void @__quantum__rt__qubit_release(%Qubit* %q2) - call void @__quantum__rt__qubit_release(%Qubit* %q3) - call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %qop, i64 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %qop, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %adj_qop, i64 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %adj_qop, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %ctl_qop, i64 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %ctl_qop, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %adj_ctl_qop, i64 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %adj_ctl_qop, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %ctl_ctl_qop, i64 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %ctl_ctl_qop, i64 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %5, i64 -1) - call void @__quantum__rt__result_update_reference_count(%Result* %8, i64 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %13, i64 -1) - call void @__quantum__rt__result_update_reference_count(%Result* %16, i64 -1) - call void @__quantum__rt__array_update_reference_count(%Array* %25, i64 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %21, i64 -1) - call void @__quantum__rt__result_update_reference_count(%Result* %28, i64 -1) - call void @__quantum__rt__array_update_reference_count(%Array* %37, i64 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %33, i64 -1) - call void @__quantum__rt__result_update_reference_count(%Result* %40, i64 -1) - call void @__quantum__rt__array_update_reference_count(%Array* %49, i64 -1) - call void @__quantum__rt__array_update_reference_count(%Array* %56, i64 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %52, i64 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %45, i64 -1) - call void @__quantum__rt__result_update_reference_count(%Result* %59, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %qop, i64 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %qop, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %adj_qop, i64 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %adj_qop, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %ctl_qop, i64 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %ctl_qop, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %adj_ctl_qop, i64 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %adj_ctl_qop, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %ctl_ctl_qop, i64 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %ctl_ctl_qop, i64 -1) - call void @__quantum__rt__fail(%String* %63) - unreachable - -continue__5: ; preds = %continue__4 - %64 = call %Callable* @__quantum__rt__callable_copy(%Callable* %qop, i1 false) - call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %64, i64 1) - call void @__quantum__rt__callable_make_controlled(%Callable* %64) - %65 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %66 = bitcast %Tuple* %65 to { %Array*, %Qubit* }* - %67 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %66, i32 0, i32 0 - %68 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %66, i32 0, i32 1 - %69 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 2) - %70 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %69, i64 0) - %71 = bitcast i8* %70 to %Qubit** - %72 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %69, i64 1) - %73 = bitcast i8* %72 to %Qubit** - store %Qubit* %q1, %Qubit** %71 - store %Qubit* %q2, %Qubit** %73 - store %Array* %69, %Array** %67 - store %Qubit* %q3, %Qubit** %68 - call void @__quantum__rt__callable_invoke(%Callable* %64, %Tuple* %65, %Tuple* null) - %74 = call %Result* @Microsoft__Quantum__Intrinsic__M__body(%Qubit* %q3) - %75 = load %Result*, %Result** @ResultZero - %76 = call i1 @__quantum__rt__result_equal(%Result* %74, %Result* %75) - %77 = xor i1 %76, true - br i1 %77, label %then0__6, label %continue__6 - -then0__6: ; preds = %continue__5 - %78 = call %String* @__quantum__rt__string_create(i32 13, i8* getelementptr inbounds ([14 x i8], [14 x i8]* @5, i32 0, i32 0)) - call void @__quantum__rt__qubit_release(%Qubit* %q1) - call void @__quantum__rt__qubit_release(%Qubit* %q2) - call void @__quantum__rt__qubit_release(%Qubit* %q3) - call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %qop, i64 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %qop, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %adj_qop, i64 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %adj_qop, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %ctl_qop, i64 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %ctl_qop, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %adj_ctl_qop, i64 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %adj_ctl_qop, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %ctl_ctl_qop, i64 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %ctl_ctl_qop, i64 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %5, i64 -1) - call void @__quantum__rt__result_update_reference_count(%Result* %8, i64 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %13, i64 -1) - call void @__quantum__rt__result_update_reference_count(%Result* %16, i64 -1) - call void @__quantum__rt__array_update_reference_count(%Array* %25, i64 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %21, i64 -1) - call void @__quantum__rt__result_update_reference_count(%Result* %28, i64 -1) - call void @__quantum__rt__array_update_reference_count(%Array* %37, i64 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %33, i64 -1) - call void @__quantum__rt__result_update_reference_count(%Result* %40, i64 -1) - call void @__quantum__rt__array_update_reference_count(%Array* %49, i64 -1) - call void @__quantum__rt__array_update_reference_count(%Array* %56, i64 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %52, i64 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %45, i64 -1) - call void @__quantum__rt__result_update_reference_count(%Result* %59, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %64, i64 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %64, i64 -1) - call void @__quantum__rt__array_update_reference_count(%Array* %69, i64 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %65, i64 -1) - call void @__quantum__rt__result_update_reference_count(%Result* %74, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %qop, i64 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %qop, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %adj_qop, i64 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %adj_qop, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %ctl_qop, i64 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %ctl_qop, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %adj_ctl_qop, i64 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %adj_ctl_qop, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %ctl_ctl_qop, i64 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %ctl_ctl_qop, i64 -1) - call void @__quantum__rt__fail(%String* %78) - unreachable - -continue__6: ; preds = %continue__5 - %q4 = call %Qubit* @__quantum__rt__qubit_allocate() - %79 = call %Callable* @__quantum__rt__callable_copy(%Callable* %qop, i1 false) - call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %79, i64 1) - call void @__quantum__rt__callable_make_adjoint(%Callable* %79) - %80 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64)) - %81 = bitcast %Tuple* %80 to { %Qubit* }* - %82 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %81, i32 0, i32 0 - store %Qubit* %q3, %Qubit** %82 - call void @__quantum__rt__callable_invoke(%Callable* %79, %Tuple* %80, %Tuple* null) - %83 = call %Callable* @__quantum__rt__callable_copy(%Callable* %ctl_ctl_qop, i1 false) - call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %83, i64 1) - call void @__quantum__rt__callable_make_controlled(%Callable* %83) - call void @__quantum__rt__callable_make_adjoint(%Callable* %83) - %84 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %85 = bitcast %Tuple* %84 to { %Array*, { %Array*, { %Array*, %Qubit* }* }* }* - %86 = getelementptr inbounds { %Array*, { %Array*, { %Array*, %Qubit* }* }* }, { %Array*, { %Array*, { %Array*, %Qubit* }* }* }* %85, i32 0, i32 0 - %87 = getelementptr inbounds { %Array*, { %Array*, { %Array*, %Qubit* }* }* }, { %Array*, { %Array*, { %Array*, %Qubit* }* }* }* %85, i32 0, i32 1 - %88 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) - %89 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %88, i64 0) - %90 = bitcast i8* %89 to %Qubit** - store %Qubit* %q1, %Qubit** %90 - %91 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %92 = bitcast %Tuple* %91 to { %Array*, { %Array*, %Qubit* }* }* - %93 = getelementptr inbounds { %Array*, { %Array*, %Qubit* }* }, { %Array*, { %Array*, %Qubit* }* }* %92, i32 0, i32 0 - %94 = getelementptr inbounds { %Array*, { %Array*, %Qubit* }* }, { %Array*, { %Array*, %Qubit* }* }* %92, i32 0, i32 1 - %95 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) - %96 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %95, i64 0) - %97 = bitcast i8* %96 to %Qubit** - store %Qubit* %q2, %Qubit** %97 - %98 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %99 = bitcast %Tuple* %98 to { %Array*, %Qubit* }* - %100 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %99, i32 0, i32 0 - %101 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %99, i32 0, i32 1 - %102 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) - %103 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %102, i64 0) - %104 = bitcast i8* %103 to %Qubit** - store %Qubit* %q3, %Qubit** %104 - store %Array* %102, %Array** %100 - store %Qubit* %q4, %Qubit** %101 - store %Array* %95, %Array** %93 - store { %Array*, %Qubit* }* %99, { %Array*, %Qubit* }** %94 - store %Array* %88, %Array** %86 - store { %Array*, { %Array*, %Qubit* }* }* %92, { %Array*, { %Array*, %Qubit* }* }** %87 - call void @__quantum__rt__callable_invoke(%Callable* %83, %Tuple* %84, %Tuple* null) - %105 = call %Result* @Microsoft__Quantum__Intrinsic__M__body(%Qubit* %q4) - %106 = load %Result*, %Result** @ResultOne - %107 = call i1 @__quantum__rt__result_equal(%Result* %105, %Result* %106) - %108 = xor i1 %107, true - br i1 %108, label %then0__7, label %continue__7 - -then0__7: ; preds = %continue__6 - %109 = call %String* @__quantum__rt__string_create(i32 13, i8* getelementptr inbounds ([14 x i8], [14 x i8]* @6, i32 0, i32 0)) - call void @__quantum__rt__qubit_release(%Qubit* %q4) - call void @__quantum__rt__qubit_release(%Qubit* %q1) - call void @__quantum__rt__qubit_release(%Qubit* %q2) - call void @__quantum__rt__qubit_release(%Qubit* %q3) - call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %qop, i64 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %qop, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %adj_qop, i64 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %adj_qop, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %ctl_qop, i64 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %ctl_qop, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %adj_ctl_qop, i64 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %adj_ctl_qop, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %ctl_ctl_qop, i64 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %ctl_ctl_qop, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %79, i64 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %79, i64 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %80, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %83, i64 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %83, i64 -1) - call void @__quantum__rt__array_update_reference_count(%Array* %88, i64 -1) - call void @__quantum__rt__array_update_reference_count(%Array* %95, i64 -1) - call void @__quantum__rt__array_update_reference_count(%Array* %102, i64 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %98, i64 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %91, i64 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %84, i64 -1) - call void @__quantum__rt__result_update_reference_count(%Result* %105, i64 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %5, i64 -1) - call void @__quantum__rt__result_update_reference_count(%Result* %8, i64 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %13, i64 -1) - call void @__quantum__rt__result_update_reference_count(%Result* %16, i64 -1) - call void @__quantum__rt__array_update_reference_count(%Array* %25, i64 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %21, i64 -1) - call void @__quantum__rt__result_update_reference_count(%Result* %28, i64 -1) - call void @__quantum__rt__array_update_reference_count(%Array* %37, i64 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %33, i64 -1) - call void @__quantum__rt__result_update_reference_count(%Result* %40, i64 -1) - call void @__quantum__rt__array_update_reference_count(%Array* %49, i64 -1) - call void @__quantum__rt__array_update_reference_count(%Array* %56, i64 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %52, i64 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %45, i64 -1) - call void @__quantum__rt__result_update_reference_count(%Result* %59, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %64, i64 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %64, i64 -1) - call void @__quantum__rt__array_update_reference_count(%Array* %69, i64 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %65, i64 -1) - call void @__quantum__rt__result_update_reference_count(%Result* %74, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %qop, i64 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %qop, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %adj_qop, i64 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %adj_qop, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %ctl_qop, i64 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %ctl_qop, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %adj_ctl_qop, i64 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %adj_ctl_qop, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %ctl_ctl_qop, i64 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %ctl_ctl_qop, i64 -1) - call void @__quantum__rt__fail(%String* %109) - unreachable - -continue__7: ; preds = %continue__6 - call void @__quantum__rt__qubit_release(%Qubit* %q4) - call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %79, i64 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %79, i64 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %80, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %83, i64 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %83, i64 -1) - call void @__quantum__rt__array_update_reference_count(%Array* %88, i64 -1) - call void @__quantum__rt__array_update_reference_count(%Array* %95, i64 -1) - call void @__quantum__rt__array_update_reference_count(%Array* %102, i64 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %98, i64 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %91, i64 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %84, i64 -1) - call void @__quantum__rt__result_update_reference_count(%Result* %105, i64 -1) - call void @__quantum__rt__qubit_release(%Qubit* %q1) - call void @__quantum__rt__qubit_release(%Qubit* %q2) - call void @__quantum__rt__qubit_release(%Qubit* %q3) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %5, i64 -1) - call void @__quantum__rt__result_update_reference_count(%Result* %8, i64 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %13, i64 -1) - call void @__quantum__rt__result_update_reference_count(%Result* %16, i64 -1) - call void @__quantum__rt__array_update_reference_count(%Array* %25, i64 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %21, i64 -1) - call void @__quantum__rt__result_update_reference_count(%Result* %28, i64 -1) - call void @__quantum__rt__array_update_reference_count(%Array* %37, i64 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %33, i64 -1) - call void @__quantum__rt__result_update_reference_count(%Result* %40, i64 -1) - call void @__quantum__rt__array_update_reference_count(%Array* %49, i64 -1) - call void @__quantum__rt__array_update_reference_count(%Array* %56, i64 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %52, i64 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %45, i64 -1) - call void @__quantum__rt__result_update_reference_count(%Result* %59, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %64, i64 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %64, i64 -1) - call void @__quantum__rt__array_update_reference_count(%Array* %69, i64 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %65, i64 -1) - call void @__quantum__rt__result_update_reference_count(%Result* %74, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %qop, i64 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %qop, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %adj_qop, i64 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %adj_qop, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %ctl_qop, i64 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %ctl_qop, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %adj_ctl_qop, i64 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %adj_ctl_qop, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %ctl_ctl_qop, i64 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %ctl_ctl_qop, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %qop, i64 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %qop, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %adj_qop, i64 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %adj_qop, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %ctl_qop, i64 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %ctl_qop, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %adj_ctl_qop, i64 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %adj_ctl_qop, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %ctl_ctl_qop, i64 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %ctl_ctl_qop, i64 -1) - ret void -} - -declare %Tuple* @__quantum__rt__tuple_create(i64) - -define void @Microsoft__Quantum__Testing__QIR__Qop__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { -entry: - %0 = bitcast %Tuple* %arg-tuple to { %Qubit*, i64 }* - %1 = getelementptr inbounds { %Qubit*, i64 }, { %Qubit*, i64 }* %0, i32 0, i32 0 - %2 = getelementptr inbounds { %Qubit*, i64 }, { %Qubit*, i64 }* %0, i32 0, i32 1 - %3 = load %Qubit*, %Qubit** %1 - %4 = load i64, i64* %2 - call void @Microsoft__Quantum__Testing__QIR__Qop__body(%Qubit* %3, i64 %4) - ret void -} - -define void @Microsoft__Quantum__Testing__QIR__Qop__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { -entry: - %0 = bitcast %Tuple* %arg-tuple to { %Qubit*, i64 }* - %1 = getelementptr inbounds { %Qubit*, i64 }, { %Qubit*, i64 }* %0, i32 0, i32 0 - %2 = getelementptr inbounds { %Qubit*, i64 }, { %Qubit*, i64 }* %0, i32 0, i32 1 - %3 = load %Qubit*, %Qubit** %1 - %4 = load i64, i64* %2 - call void @Microsoft__Quantum__Testing__QIR__Qop__adj(%Qubit* %3, i64 %4) - ret void -} - -define void @Microsoft__Quantum__Testing__QIR__Qop__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { -entry: - %0 = bitcast %Tuple* %arg-tuple to { %Array*, { %Qubit*, i64 }* }* - %1 = getelementptr inbounds { %Array*, { %Qubit*, i64 }* }, { %Array*, { %Qubit*, i64 }* }* %0, i32 0, i32 0 - %2 = getelementptr inbounds { %Array*, { %Qubit*, i64 }* }, { %Array*, { %Qubit*, i64 }* }* %0, i32 0, i32 1 - %3 = load %Array*, %Array** %1 - %4 = load { %Qubit*, i64 }*, { %Qubit*, i64 }** %2 - call void @Microsoft__Quantum__Testing__QIR__Qop__ctl(%Array* %3, { %Qubit*, i64 }* %4) - ret void -} - -define void @Microsoft__Quantum__Testing__QIR__Qop__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { -entry: - %0 = bitcast %Tuple* %arg-tuple to { %Array*, { %Qubit*, i64 }* }* - %1 = getelementptr inbounds { %Array*, { %Qubit*, i64 }* }, { %Array*, { %Qubit*, i64 }* }* %0, i32 0, i32 0 - %2 = getelementptr inbounds { %Array*, { %Qubit*, i64 }* }, { %Array*, { %Qubit*, i64 }* }* %0, i32 0, i32 1 - %3 = load %Array*, %Array** %1 - %4 = load { %Qubit*, i64 }*, { %Qubit*, i64 }** %2 - call void @Microsoft__Quantum__Testing__QIR__Qop__ctladj(%Array* %3, { %Qubit*, i64 }* %4) - ret void -} - -declare %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]*, [2 x void (%Tuple*, i64)*]*, %Tuple*) - -define void @Lifted__PartialApplication__1__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { -entry: - %0 = bitcast %Tuple* %arg-tuple to { %Qubit* }* - %1 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %0, i32 0, i32 0 - %2 = load %Qubit*, %Qubit** %1 - %3 = bitcast %Tuple* %capture-tuple to { %Callable*, i64 }* - %4 = getelementptr inbounds { %Callable*, i64 }, { %Callable*, i64 }* %3, i32 0, i32 1 - %5 = load i64, i64* %4 - %6 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Qubit*, i64 }* getelementptr ({ %Qubit*, i64 }, { %Qubit*, i64 }* null, i32 1) to i64)) - %7 = bitcast %Tuple* %6 to { %Qubit*, i64 }* - %8 = getelementptr inbounds { %Qubit*, i64 }, { %Qubit*, i64 }* %7, i32 0, i32 0 - %9 = getelementptr inbounds { %Qubit*, i64 }, { %Qubit*, i64 }* %7, i32 0, i32 1 - store %Qubit* %2, %Qubit** %8 - store i64 %5, i64* %9 - %10 = getelementptr inbounds { %Callable*, i64 }, { %Callable*, i64 }* %3, i32 0, i32 0 - %11 = load %Callable*, %Callable** %10 - call void @__quantum__rt__callable_invoke(%Callable* %11, %Tuple* %6, %Tuple* %result-tuple) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i64 -1) - ret void -} - -define void @Lifted__PartialApplication__1__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { -entry: - %0 = bitcast %Tuple* %arg-tuple to { %Qubit* }* - %1 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %0, i32 0, i32 0 - %2 = load %Qubit*, %Qubit** %1 - %3 = bitcast %Tuple* %capture-tuple to { %Callable*, i64 }* - %4 = getelementptr inbounds { %Callable*, i64 }, { %Callable*, i64 }* %3, i32 0, i32 1 - %5 = load i64, i64* %4 - %6 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Qubit*, i64 }* getelementptr ({ %Qubit*, i64 }, { %Qubit*, i64 }* null, i32 1) to i64)) - %7 = bitcast %Tuple* %6 to { %Qubit*, i64 }* - %8 = getelementptr inbounds { %Qubit*, i64 }, { %Qubit*, i64 }* %7, i32 0, i32 0 - %9 = getelementptr inbounds { %Qubit*, i64 }, { %Qubit*, i64 }* %7, i32 0, i32 1 - store %Qubit* %2, %Qubit** %8 - store i64 %5, i64* %9 - %10 = getelementptr inbounds { %Callable*, i64 }, { %Callable*, i64 }* %3, i32 0, i32 0 - %11 = load %Callable*, %Callable** %10 - %12 = call %Callable* @__quantum__rt__callable_copy(%Callable* %11, i1 false) - call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %12, i64 1) - call void @__quantum__rt__callable_make_adjoint(%Callable* %12) - call void @__quantum__rt__callable_invoke(%Callable* %12, %Tuple* %6, %Tuple* %result-tuple) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %12, i64 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %12, i64 -1) - ret void -} - -define void @Lifted__PartialApplication__1__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { -entry: - %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* - %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 - %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 - %3 = load %Array*, %Array** %1 - %4 = load %Qubit*, %Qubit** %2 - %5 = bitcast %Tuple* %capture-tuple to { %Callable*, i64 }* - %6 = getelementptr inbounds { %Callable*, i64 }, { %Callable*, i64 }* %5, i32 0, i32 1 - %7 = load i64, i64* %6 - %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Qubit*, i64 }* getelementptr ({ %Qubit*, i64 }, { %Qubit*, i64 }* null, i32 1) to i64)) - %9 = bitcast %Tuple* %8 to { %Qubit*, i64 }* - %10 = getelementptr inbounds { %Qubit*, i64 }, { %Qubit*, i64 }* %9, i32 0, i32 0 - %11 = getelementptr inbounds { %Qubit*, i64 }, { %Qubit*, i64 }* %9, i32 0, i32 1 - store %Qubit* %4, %Qubit** %10 - store i64 %7, i64* %11 - %12 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %13 = bitcast %Tuple* %12 to { %Array*, { %Qubit*, i64 }* }* - %14 = getelementptr inbounds { %Array*, { %Qubit*, i64 }* }, { %Array*, { %Qubit*, i64 }* }* %13, i32 0, i32 0 - %15 = getelementptr inbounds { %Array*, { %Qubit*, i64 }* }, { %Array*, { %Qubit*, i64 }* }* %13, i32 0, i32 1 - store %Array* %3, %Array** %14 - store { %Qubit*, i64 }* %9, { %Qubit*, i64 }** %15 - %16 = getelementptr inbounds { %Callable*, i64 }, { %Callable*, i64 }* %5, i32 0, i32 0 - %17 = load %Callable*, %Callable** %16 - %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) - call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %18, i64 1) - call void @__quantum__rt__callable_make_controlled(%Callable* %18) - call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i64 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %18, i64 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i64 -1) - ret void -} - -define void @Lifted__PartialApplication__1__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { -entry: - %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* - %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 - %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 - %3 = load %Array*, %Array** %1 - %4 = load %Qubit*, %Qubit** %2 - %5 = bitcast %Tuple* %capture-tuple to { %Callable*, i64 }* - %6 = getelementptr inbounds { %Callable*, i64 }, { %Callable*, i64 }* %5, i32 0, i32 1 - %7 = load i64, i64* %6 - %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Qubit*, i64 }* getelementptr ({ %Qubit*, i64 }, { %Qubit*, i64 }* null, i32 1) to i64)) - %9 = bitcast %Tuple* %8 to { %Qubit*, i64 }* - %10 = getelementptr inbounds { %Qubit*, i64 }, { %Qubit*, i64 }* %9, i32 0, i32 0 - %11 = getelementptr inbounds { %Qubit*, i64 }, { %Qubit*, i64 }* %9, i32 0, i32 1 - store %Qubit* %4, %Qubit** %10 - store i64 %7, i64* %11 - %12 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %13 = bitcast %Tuple* %12 to { %Array*, { %Qubit*, i64 }* }* - %14 = getelementptr inbounds { %Array*, { %Qubit*, i64 }* }, { %Array*, { %Qubit*, i64 }* }* %13, i32 0, i32 0 - %15 = getelementptr inbounds { %Array*, { %Qubit*, i64 }* }, { %Array*, { %Qubit*, i64 }* }* %13, i32 0, i32 1 - store %Array* %3, %Array** %14 - store { %Qubit*, i64 }* %9, { %Qubit*, i64 }** %15 - %16 = getelementptr inbounds { %Callable*, i64 }, { %Callable*, i64 }* %5, i32 0, i32 0 - %17 = load %Callable*, %Callable** %16 - %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) - call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %18, i64 1) - call void @__quantum__rt__callable_make_adjoint(%Callable* %18) - call void @__quantum__rt__callable_make_controlled(%Callable* %18) - call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i64 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %18, i64 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i64 -1) - ret void -} - -define void @MemoryManagement__1__RefCount(%Tuple* %capture-tuple, i64 %count-change) { -entry: - %0 = bitcast %Tuple* %capture-tuple to { %Callable*, i64 }* - %1 = getelementptr inbounds { %Callable*, i64 }, { %Callable*, i64 }* %0, i32 0, i32 0 - %2 = load %Callable*, %Callable** %1 - call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %2, i64 %count-change) - call void @__quantum__rt__callable_update_reference_count(%Callable* %2, i64 %count-change) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %capture-tuple, i64 %count-change) - ret void -} - -define void @MemoryManagement__1__AliasCount(%Tuple* %capture-tuple, i64 %count-change) { -entry: - %0 = bitcast %Tuple* %capture-tuple to { %Callable*, i64 }* - %1 = getelementptr inbounds { %Callable*, i64 }, { %Callable*, i64 }* %0, i32 0, i32 0 - %2 = load %Callable*, %Callable** %1 - call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %2, i64 %count-change) - call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i64 %count-change) - call void @__quantum__rt__tuple_update_alias_count(%Tuple* %capture-tuple, i64 %count-change) - ret void -} - -declare void @__quantum__rt__callable_memory_management(i32, %Callable*, i64) - -declare void @__quantum__rt__callable_update_alias_count(%Callable*, i64) - -declare %Callable* @__quantum__rt__callable_copy(%Callable*, i1) - -declare void @__quantum__rt__callable_make_adjoint(%Callable*) - -declare void @__quantum__rt__callable_make_controlled(%Callable*) - -declare %Qubit* @__quantum__rt__qubit_allocate() - -declare %Array* @__quantum__rt__qubit_allocate_array(i64) - -declare void @__quantum__rt__callable_invoke(%Callable*, %Tuple*, %Tuple*) - -define %Result* @Microsoft__Quantum__Intrinsic__M__body(%Qubit* %qubit) { -entry: - %bases = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 1) - %0 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %bases, i64 0) - %1 = bitcast i8* %0 to i2* - %2 = load i2, i2* @PauliZ - store i2 %2, i2* %1 - call void @__quantum__rt__array_update_alias_count(%Array* %bases, i64 1) - %qubits = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) - %3 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qubits, i64 0) - %4 = bitcast i8* %3 to %Qubit** - store %Qubit* %qubit, %Qubit** %4 - call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i64 1) - %5 = call %Result* @__quantum__qis__measure__body(%Array* %bases, %Array* %qubits) - call void @__quantum__rt__array_update_alias_count(%Array* %bases, i64 -1) - call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i64 -1) - call void @__quantum__rt__array_update_reference_count(%Array* %bases, i64 -1) - call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i64 -1) - ret %Result* %5 -} - -declare i1 @__quantum__rt__result_equal(%Result*, %Result*) - -declare %String* @__quantum__rt__string_create(i32, i8*) - -declare void @__quantum__rt__qubit_release(%Qubit*) - -declare void @__quantum__rt__tuple_update_reference_count(%Tuple*, i64) - -declare void @__quantum__rt__result_update_reference_count(%Result*, i64) - -declare void @__quantum__rt__callable_update_reference_count(%Callable*, i64) - -declare void @__quantum__rt__fail(%String*) - -declare %Array* @__quantum__rt__array_create_1d(i32, i64) - -declare i8* @__quantum__rt__array_get_element_ptr_1d(%Array*, i64) - -declare void @__quantum__rt__array_update_reference_count(%Array*, i64) - -define void @Microsoft__Quantum__Testing__QIR__TestQubitResultManagement__body() { -entry: - %qs = call %Array* @__quantum__rt__qubit_allocate_array(i64 2) - call void @__quantum__rt__array_update_alias_count(%Array* %qs, i64 1) - %0 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 1) - %1 = bitcast i8* %0 to %Qubit** - %qubit = load %Qubit*, %Qubit** %1 - call void @__quantum__qis__x__body(%Qubit* %qubit) - %q = call %Qubit* @__quantum__rt__qubit_allocate() - %2 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 1) - %3 = bitcast i8* %2 to %Qubit** - %4 = load %Qubit*, %Qubit** %3 - %5 = call %Result* @Microsoft__Quantum__Intrinsic__M__body(%Qubit* %4) - %6 = load %Result*, %Result** @ResultOne - %7 = call i1 @__quantum__rt__result_equal(%Result* %5, %Result* %6) - br i1 %7, label %then0__1, label %continue__1 - -then0__1: ; preds = %entry - call void @__quantum__qis__x__body(%Qubit* %q) - br label %continue__1 - -continue__1: ; preds = %then0__1, %entry - %8 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 0) - %9 = bitcast i8* %8 to %Qubit** - %10 = load %Qubit*, %Qubit** %9 - %11 = call %Result* @Microsoft__Quantum__Intrinsic__M__body(%Qubit* %10) - %12 = call %Result* @Microsoft__Quantum__Intrinsic__M__body(%Qubit* %q) - %13 = call i1 @__quantum__rt__result_equal(%Result* %11, %Result* %12) - br i1 %13, label %then0__2, label %continue__2 - -then0__2: ; preds = %continue__1 - %14 = call %String* @__quantum__rt__string_create(i32 29, i8* getelementptr inbounds ([30 x i8], [30 x i8]* @7, i32 0, i32 0)) - call void @__quantum__rt__qubit_release(%Qubit* %q) - call void @__quantum__rt__qubit_release_array(%Array* %qs) - call void @__quantum__rt__array_update_alias_count(%Array* %qs, i64 -1) - call void @__quantum__rt__result_update_reference_count(%Result* %5, i64 -1) - call void @__quantum__rt__result_update_reference_count(%Result* %11, i64 -1) - call void @__quantum__rt__result_update_reference_count(%Result* %12, i64 -1) - call void @__quantum__rt__array_update_reference_count(%Array* %qs, i64 -1) - call void @__quantum__rt__fail(%String* %14) - unreachable - -continue__2: ; preds = %continue__1 - call void @__quantum__rt__qubit_release(%Qubit* %q) - call void @__quantum__rt__result_update_reference_count(%Result* %5, i64 -1) - call void @__quantum__rt__result_update_reference_count(%Result* %11, i64 -1) - call void @__quantum__rt__result_update_reference_count(%Result* %12, i64 -1) - call void @__quantum__rt__qubit_release_array(%Array* %qs) - call void @__quantum__rt__array_update_alias_count(%Array* %qs, i64 -1) - call void @__quantum__rt__array_update_reference_count(%Array* %qs, i64 -1) - ret void -} - -declare void @__quantum__rt__array_update_alias_count(%Array*, i64) - -declare void @__quantum__qis__x__body(%Qubit*) - -declare void @__quantum__rt__qubit_release_array(%Array*) - -define i64 @Microsoft__Quantum__Testing__QIR__Subtract__body(i64 %from, i64 %what) { -entry: - %0 = sub i64 %from, %what - ret i64 %0 -} - -define i64 @Microsoft__Quantum__Testing__QIR__Test_Arrays__body(%Array* %array, i64 %index, i64 %val, i1 %compilerDecoy) { -entry: - call void @__quantum__rt__array_update_alias_count(%Array* %array, i64 1) - %local = alloca %Array* - store %Array* %array, %Array** %local - call void @__quantum__rt__array_update_alias_count(%Array* %array, i64 1) - call void @__quantum__rt__array_update_reference_count(%Array* %array, i64 1) - call void @__quantum__rt__array_update_alias_count(%Array* %array, i64 -1) - %0 = call %Array* @__quantum__rt__array_copy(%Array* %array, i1 false) - %1 = icmp ne %Array* %array, %0 - %2 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %0, i64 %index) - %3 = bitcast i8* %2 to i64* - store i64 %val, i64* %3 - call void @__quantum__rt__array_update_reference_count(%Array* %0, i64 1) - call void @__quantum__rt__array_update_alias_count(%Array* %0, i64 1) - store %Array* %0, %Array** %local - %n = call i64 @__quantum__rt__array_get_size_1d(%Array* %0) - %4 = sub i64 %n, 1 - %5 = load %Range, %Range* @EmptyRange - %6 = insertvalue %Range %5, i64 %index, 0 - %7 = insertvalue %Range %6, i64 1, 1 - %8 = insertvalue %Range %7, i64 %4, 2 - %slice1 = call %Array* @__quantum__rt__array_slice_1d(%Array* %0, %Range %8, i1 false) - call void @__quantum__rt__array_update_alias_count(%Array* %slice1, i64 1) - %9 = load %Range, %Range* @EmptyRange - %10 = insertvalue %Range %9, i64 %index, 0 - %11 = insertvalue %Range %10, i64 -2, 1 - %12 = insertvalue %Range %11, i64 0, 2 - %slice2 = call %Array* @__quantum__rt__array_slice_1d(%Array* %0, %Range %12, i1 false) - call void @__quantum__rt__array_update_alias_count(%Array* %slice2, i64 1) - %result = call %Array* @__quantum__rt__array_concatenate(%Array* %slice2, %Array* %slice1) - call void @__quantum__rt__array_update_alias_count(%Array* %result, i64 1) - %sum = alloca i64 - store i64 0, i64* %sum - %13 = call i64 @__quantum__rt__array_get_size_1d(%Array* %result) - %14 = sub i64 %13, 1 - br label %header__1 - -header__1: ; preds = %exiting__1, %entry - %i = phi i64 [ 0, %entry ], [ %21, %exiting__1 ] - %15 = icmp sle i64 %i, %14 - br i1 %15, label %body__1, label %exit__1 - -body__1: ; preds = %header__1 - %16 = load i64, i64* %sum - %17 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %result, i64 %i) - %18 = bitcast i8* %17 to i64* - %19 = load i64, i64* %18 - %20 = add i64 %16, %19 - store i64 %20, i64* %sum - br label %exiting__1 - -exiting__1: ; preds = %body__1 - %21 = add i64 %i, 1 - br label %header__1 - -exit__1: ; preds = %header__1 - br i1 %compilerDecoy, label %then0__1, label %continue__1 - -then0__1: ; preds = %exit__1 - call void @Microsoft__Quantum__Testing__QIR__TestControlled__body() - %res2 = call i64 @Microsoft__Quantum__Testing__QIR__TestPartials__body(i64 17, i64 42) - call void @Microsoft__Quantum__Testing__QIR__TestQubitResultManagement__body() - %res4 = call i64 @Microsoft__Quantum__Testing__QIR__Math__SqrtTest__body() - %res5 = call i64 @Microsoft__Quantum__Testing__QIR__Math__LogTest__body() - %res6 = call i64 @Microsoft__Quantum__Testing__QIR__Math__ArcTan2Test__body() - %res7 = call i64 @Microsoft__Quantum__Testing__QIR__Str__PauliToStringTest__body() - br label %continue__1 - -continue__1: ; preds = %then0__1, %exit__1 - %22 = load i64, i64* %sum - call void @__quantum__rt__array_update_alias_count(%Array* %array, i64 -1) - call void @__quantum__rt__array_update_alias_count(%Array* %0, i64 -1) - call void @__quantum__rt__array_update_alias_count(%Array* %slice1, i64 -1) - call void @__quantum__rt__array_update_alias_count(%Array* %slice2, i64 -1) - call void @__quantum__rt__array_update_alias_count(%Array* %result, i64 -1) - call void @__quantum__rt__array_update_reference_count(%Array* %array, i64 -1) - call void @__quantum__rt__array_update_reference_count(%Array* %0, i64 -1) - call void @__quantum__rt__array_update_reference_count(%Array* %slice1, i64 -1) - call void @__quantum__rt__array_update_reference_count(%Array* %slice2, i64 -1) - call void @__quantum__rt__array_update_reference_count(%Array* %result, i64 -1) - call void @__quantum__rt__array_update_reference_count(%Array* %0, i64 -1) - ret i64 %22 -} - -declare %Array* @__quantum__rt__array_copy(%Array*, i1) - -declare i64 @__quantum__rt__array_get_size_1d(%Array*) - -declare %Array* @__quantum__rt__array_slice_1d(%Array*, %Range, i1) - -declare %Array* @__quantum__rt__array_concatenate(%Array*, %Array*) - -define i64 @Microsoft__Quantum__Testing__QIR__TestPartials__body(i64 %x, i64 %y) { -entry: - %0 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, i64 }* getelementptr ({ %Callable*, i64 }, { %Callable*, i64 }* null, i32 1) to i64)) - %1 = bitcast %Tuple* %0 to { %Callable*, i64 }* - %2 = getelementptr inbounds { %Callable*, i64 }, { %Callable*, i64 }* %1, i32 0, i32 0 - %3 = getelementptr inbounds { %Callable*, i64 }, { %Callable*, i64 }* %1, i32 0, i32 1 - %4 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Testing__QIR__Subtract, [2 x void (%Tuple*, i64)*]* null, %Tuple* null) - store %Callable* %4, %Callable** %2 - store i64 %x, i64* %3 - %subtractor = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__2, [2 x void (%Tuple*, i64)*]* @MemoryManagement__2, %Tuple* %0) - call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %subtractor, i64 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %subtractor, i64 1) - %5 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint (i64* getelementptr (i64, i64* null, i32 1) to i64)) - %6 = bitcast %Tuple* %5 to { i64 }* - %7 = getelementptr inbounds { i64 }, { i64 }* %6, i32 0, i32 0 - store i64 %y, i64* %7 - %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint (i64* getelementptr (i64, i64* null, i32 1) to i64)) - call void @__quantum__rt__callable_invoke(%Callable* %subtractor, %Tuple* %5, %Tuple* %8) - %9 = bitcast %Tuple* %8 to { i64 }* - %10 = getelementptr inbounds { i64 }, { i64 }* %9, i32 0, i32 0 - %11 = load i64, i64* %10 - call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %subtractor, i64 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %subtractor, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %subtractor, i64 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %subtractor, i64 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %5, i64 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i64 -1) - ret i64 %11 -} - -define i64 @Microsoft__Quantum__Testing__QIR__Math__SqrtTest__body() { -entry: - %0 = call double @__quantum__qis__sqrt__body(double 4.000000e+00) - %1 = fcmp one double 2.000000e+00, %0 - br i1 %1, label %then0__1, label %continue__1 - -then0__1: ; preds = %entry - ret i64 1 - -continue__1: ; preds = %entry - %2 = call double @__quantum__qis__sqrt__body(double 9.000000e+00) - %3 = fcmp one double 3.000000e+00, %2 - br i1 %3, label %then0__2, label %continue__2 - -then0__2: ; preds = %continue__1 - ret i64 2 - -continue__2: ; preds = %continue__1 - %4 = call double @__quantum__qis__sqrt__body(double 1.000000e+02) - %5 = fcmp one double 1.000000e+01, %4 - br i1 %5, label %then0__3, label %continue__3 - -then0__3: ; preds = %continue__2 - ret i64 3 - -continue__3: ; preds = %continue__2 - %d__4 = call double @__quantum__qis__sqrt__body(double -5.000000e+00) - %6 = call i1 @__quantum__qis__isnan__body(double %d__4) - %7 = xor i1 %6, true - br i1 %7, label %then0__4, label %continue__4 - -then0__4: ; preds = %continue__3 - ret i64 4 - -continue__4: ; preds = %continue__3 - %d__5 = call double @__quantum__qis__nan__body() - %d__6 = call double @__quantum__qis__sqrt__body(double %d__5) - %8 = call i1 @__quantum__qis__isnan__body(double %d__6) - %9 = xor i1 %8, true - br i1 %9, label %then0__5, label %continue__5 - -then0__5: ; preds = %continue__4 - ret i64 5 - -continue__5: ; preds = %continue__4 - %d__7 = call double @__quantum__qis__infinity__body() - %d__8 = call double @__quantum__qis__sqrt__body(double %d__7) - %10 = call i1 @__quantum__qis__isinf__body(double %d__8) - %11 = xor i1 %10, true - br i1 %11, label %then0__6, label %continue__6 - -then0__6: ; preds = %continue__5 - ret i64 6 - -continue__6: ; preds = %continue__5 - ret i64 0 -} - -define i64 @Microsoft__Quantum__Testing__QIR__Math__LogTest__body() { -entry: - %input = call double @Microsoft__Quantum__Math__E__body() - %0 = call double @__quantum__qis__log__body(double %input) - %1 = fcmp one double 1.000000e+00, %0 - br i1 %1, label %then0__1, label %continue__1 - -then0__1: ; preds = %entry - ret i64 1 - -continue__1: ; preds = %entry - %2 = call double @Microsoft__Quantum__Math__E__body() - %3 = call double @Microsoft__Quantum__Math__E__body() - %input__1 = fmul double %2, %3 - %4 = call double @__quantum__qis__log__body(double %input__1) - %5 = fcmp one double 2.000000e+00, %4 - br i1 %5, label %then0__2, label %continue__2 - -then0__2: ; preds = %continue__1 - ret i64 2 - -continue__2: ; preds = %continue__1 - %d = call double @__quantum__qis__log__body(double 0.000000e+00) - %6 = call i1 @__quantum__qis__isnegativeinfinity__body(double %d) - %7 = xor i1 %6, true - br i1 %7, label %then0__3, label %continue__3 - -then0__3: ; preds = %continue__2 - ret i64 3 - -continue__3: ; preds = %continue__2 - %d__1 = call double @__quantum__qis__log__body(double -5.000000e+00) - %8 = call i1 @__quantum__qis__isnan__body(double %d__1) - %9 = xor i1 %8, true - br i1 %9, label %then0__4, label %continue__4 - -then0__4: ; preds = %continue__3 - ret i64 4 - -continue__4: ; preds = %continue__3 - %input__4 = call double @__quantum__qis__nan__body() - %d__2 = call double @__quantum__qis__log__body(double %input__4) - %10 = call i1 @__quantum__qis__isnan__body(double %d__2) - %11 = xor i1 %10, true - br i1 %11, label %then0__5, label %continue__5 - -then0__5: ; preds = %continue__4 - ret i64 5 - -continue__5: ; preds = %continue__4 - %input__5 = call double @__quantum__qis__infinity__body() - %d__3 = call double @__quantum__qis__log__body(double %input__5) - %12 = call i1 @__quantum__qis__isinf__body(double %d__3) - %13 = xor i1 %12, true - br i1 %13, label %then0__6, label %continue__6 - -then0__6: ; preds = %continue__5 - ret i64 6 - -continue__6: ; preds = %continue__5 - ret i64 0 -} - -define i64 @Microsoft__Quantum__Testing__QIR__Math__ArcTan2Test__body() { -entry: - %0 = call double @__quantum__qis__arctan2__body(double 0.000000e+00, double 1.000000e+00) - %1 = fcmp one double 0.000000e+00, %0 - br i1 %1, label %then0__1, label %continue__1 - -then0__1: ; preds = %entry - ret i64 1 - -continue__1: ; preds = %entry - %2 = call double @Microsoft__Quantum__Math__PI__body() - %3 = call double @__quantum__qis__arctan2__body(double 0.000000e+00, double -1.000000e+00) - %4 = fcmp one double %2, %3 - br i1 %4, label %then0__2, label %continue__2 - -then0__2: ; preds = %continue__1 - ret i64 2 - -continue__2: ; preds = %continue__1 - %5 = call double @Microsoft__Quantum__Math__PI__body() - %6 = fdiv double %5, 2.000000e+00 - %7 = call double @__quantum__qis__arctan2__body(double 1.000000e+00, double 0.000000e+00) - %8 = fcmp one double %6, %7 - br i1 %8, label %then0__3, label %continue__3 - -then0__3: ; preds = %continue__2 - ret i64 3 - -continue__3: ; preds = %continue__2 - %9 = call double @Microsoft__Quantum__Math__PI__body() - %10 = fneg double %9 - %11 = fdiv double %10, 2.000000e+00 - %12 = call double @__quantum__qis__arctan2__body(double -1.000000e+00, double 0.000000e+00) - %13 = fcmp one double %11, %12 - br i1 %13, label %then0__4, label %continue__4 - -then0__4: ; preds = %continue__3 - ret i64 4 - -continue__4: ; preds = %continue__3 - %14 = call double @Microsoft__Quantum__Math__PI__body() - %15 = fdiv double %14, 4.000000e+00 - %16 = call double @__quantum__qis__arctan2__body(double 1.000000e+00, double 1.000000e+00) - %17 = fcmp one double %15, %16 - br i1 %17, label %then0__5, label %continue__5 - -then0__5: ; preds = %continue__4 - ret i64 5 - -continue__5: ; preds = %continue__4 - %18 = call double @Microsoft__Quantum__Math__PI__body() - %19 = fmul double %18, 3.000000e+00 - %20 = fdiv double %19, 4.000000e+00 - %21 = call double @__quantum__qis__arctan2__body(double 1.000000e+00, double -1.000000e+00) - %22 = fcmp one double %20, %21 - br i1 %22, label %then0__6, label %continue__6 - -then0__6: ; preds = %continue__5 - ret i64 6 - -continue__6: ; preds = %continue__5 - %23 = call double @Microsoft__Quantum__Math__PI__body() - %24 = fneg double %23 - %25 = fmul double %24, 3.000000e+00 - %26 = fdiv double %25, 4.000000e+00 - %27 = call double @__quantum__qis__arctan2__body(double -1.000000e+00, double -1.000000e+00) - %28 = fcmp one double %26, %27 - br i1 %28, label %then0__7, label %continue__7 - -then0__7: ; preds = %continue__6 - ret i64 7 - -continue__7: ; preds = %continue__6 - %29 = call double @Microsoft__Quantum__Math__PI__body() - %30 = fneg double %29 - %31 = fdiv double %30, 4.000000e+00 - %32 = call double @__quantum__qis__arctan2__body(double -1.000000e+00, double 1.000000e+00) - %33 = fcmp one double %31, %32 - br i1 %33, label %then0__8, label %continue__8 - -then0__8: ; preds = %continue__7 - ret i64 8 - -continue__8: ; preds = %continue__7 - %34 = call double @__quantum__qis__arctan2__body(double 0.000000e+00, double 0.000000e+00) - %35 = fcmp one double 0.000000e+00, %34 - br i1 %35, label %then0__9, label %continue__9 - -then0__9: ; preds = %continue__8 - ret i64 9 - -continue__9: ; preds = %continue__8 - %y__9 = call double @__quantum__qis__nan__body() - %d = call double @__quantum__qis__arctan2__body(double %y__9, double 0.000000e+00) - %36 = call i1 @__quantum__qis__isnan__body(double %d) - %37 = xor i1 %36, true - br i1 %37, label %then0__10, label %continue__10 - -then0__10: ; preds = %continue__9 - ret i64 11 - -continue__10: ; preds = %continue__9 - %x__10 = call double @__quantum__qis__nan__body() - %d__1 = call double @__quantum__qis__arctan2__body(double 0.000000e+00, double %x__10) - %38 = call i1 @__quantum__qis__isnan__body(double %d__1) - %39 = xor i1 %38, true - br i1 %39, label %then0__11, label %continue__11 - -then0__11: ; preds = %continue__10 - ret i64 12 - -continue__11: ; preds = %continue__10 - %y__11 = call double @__quantum__qis__nan__body() - %x__11 = call double @__quantum__qis__nan__body() - %d__2 = call double @__quantum__qis__arctan2__body(double %y__11, double %x__11) - %40 = call i1 @__quantum__qis__isnan__body(double %d__2) - %41 = xor i1 %40, true - br i1 %41, label %then0__12, label %continue__12 - -then0__12: ; preds = %continue__11 - ret i64 13 - -continue__12: ; preds = %continue__11 - ret i64 0 -} - -define i64 @Microsoft__Quantum__Testing__QIR__Str__PauliToStringTest__body() { -entry: - %0 = call %String* @__quantum__rt__string_create(i32 19, i8* getelementptr inbounds ([20 x i8], [20 x i8]* @8, i32 0, i32 0)) - %1 = call %String* @__quantum__rt__string_create(i32 13, i8* getelementptr inbounds ([14 x i8], [14 x i8]* @9, i32 0, i32 0)) - %2 = load i2, i2* @PauliI - %3 = call %String* @__quantum__rt__pauli_to_string(i2 %2) - %4 = call %String* @__quantum__rt__string_concatenate(%String* %1, %String* %3) - call void @__quantum__rt__string_update_reference_count(%String* %1, i64 -1) - call void @__quantum__rt__string_update_reference_count(%String* %3, i64 -1) - %5 = call i1 @__quantum__rt__string_equal(%String* %0, %String* %4) - %6 = xor i1 %5, true - br i1 %6, label %then0__1, label %continue__1 - -then0__1: ; preds = %entry - call void @__quantum__rt__string_update_reference_count(%String* %0, i64 -1) - call void @__quantum__rt__string_update_reference_count(%String* %4, i64 -1) - ret i64 1 - -continue__1: ; preds = %entry - %7 = call %String* @__quantum__rt__string_create(i32 6, i8* getelementptr inbounds ([7 x i8], [7 x i8]* @10, i32 0, i32 0)) - %8 = load i2, i2* @PauliX - %9 = call %String* @__quantum__rt__pauli_to_string(i2 %8) - %10 = call i1 @__quantum__rt__string_equal(%String* %7, %String* %9) - %11 = xor i1 %10, true - br i1 %11, label %then0__2, label %continue__2 - -then0__2: ; preds = %continue__1 - call void @__quantum__rt__string_update_reference_count(%String* %0, i64 -1) - call void @__quantum__rt__string_update_reference_count(%String* %4, i64 -1) - call void @__quantum__rt__string_update_reference_count(%String* %7, i64 -1) - call void @__quantum__rt__string_update_reference_count(%String* %9, i64 -1) - ret i64 2 - -continue__2: ; preds = %continue__1 - %12 = call %String* @__quantum__rt__string_create(i32 6, i8* getelementptr inbounds ([7 x i8], [7 x i8]* @11, i32 0, i32 0)) - %13 = load i2, i2* @PauliY - %14 = call %String* @__quantum__rt__pauli_to_string(i2 %13) - %15 = call i1 @__quantum__rt__string_equal(%String* %12, %String* %14) - %16 = xor i1 %15, true - br i1 %16, label %then0__3, label %continue__3 - -then0__3: ; preds = %continue__2 - call void @__quantum__rt__string_update_reference_count(%String* %0, i64 -1) - call void @__quantum__rt__string_update_reference_count(%String* %4, i64 -1) - call void @__quantum__rt__string_update_reference_count(%String* %7, i64 -1) - call void @__quantum__rt__string_update_reference_count(%String* %9, i64 -1) - call void @__quantum__rt__string_update_reference_count(%String* %12, i64 -1) - call void @__quantum__rt__string_update_reference_count(%String* %14, i64 -1) - ret i64 3 - -continue__3: ; preds = %continue__2 - %17 = call %String* @__quantum__rt__string_create(i32 6, i8* getelementptr inbounds ([7 x i8], [7 x i8]* @12, i32 0, i32 0)) - %18 = load i2, i2* @PauliZ - %19 = call %String* @__quantum__rt__pauli_to_string(i2 %18) - %20 = call i1 @__quantum__rt__string_equal(%String* %17, %String* %19) - %21 = xor i1 %20, true - br i1 %21, label %then0__4, label %continue__4 - -then0__4: ; preds = %continue__3 - call void @__quantum__rt__string_update_reference_count(%String* %0, i64 -1) - call void @__quantum__rt__string_update_reference_count(%String* %4, i64 -1) - call void @__quantum__rt__string_update_reference_count(%String* %7, i64 -1) - call void @__quantum__rt__string_update_reference_count(%String* %9, i64 -1) - call void @__quantum__rt__string_update_reference_count(%String* %12, i64 -1) - call void @__quantum__rt__string_update_reference_count(%String* %14, i64 -1) - call void @__quantum__rt__string_update_reference_count(%String* %17, i64 -1) - call void @__quantum__rt__string_update_reference_count(%String* %19, i64 -1) - ret i64 4 - -continue__4: ; preds = %continue__3 - call void @__quantum__rt__string_update_reference_count(%String* %0, i64 -1) - call void @__quantum__rt__string_update_reference_count(%String* %4, i64 -1) - call void @__quantum__rt__string_update_reference_count(%String* %7, i64 -1) - call void @__quantum__rt__string_update_reference_count(%String* %9, i64 -1) - call void @__quantum__rt__string_update_reference_count(%String* %12, i64 -1) - call void @__quantum__rt__string_update_reference_count(%String* %14, i64 -1) - call void @__quantum__rt__string_update_reference_count(%String* %17, i64 -1) - call void @__quantum__rt__string_update_reference_count(%String* %19, i64 -1) - ret i64 0 -} - -define void @Microsoft__Quantum__Testing__QIR__Qop__body(%Qubit* %q, i64 %n) { -entry: - %0 = srem i64 %n, 2 - %1 = icmp eq i64 %0, 1 - br i1 %1, label %then0__1, label %continue__1 - -then0__1: ; preds = %entry - call void @__quantum__qis__k__body(%Qubit* %q) - br label %continue__1 - -continue__1: ; preds = %then0__1, %entry - ret void -} - -declare void @__quantum__qis__k__body(%Qubit*) - -define void @Microsoft__Quantum__Testing__QIR__Qop__adj(%Qubit* %q, i64 %n) { -entry: - call void @Microsoft__Quantum__Testing__QIR__Qop__body(%Qubit* %q, i64 %n) - ret void -} - -define void @Microsoft__Quantum__Testing__QIR__Qop__ctl(%Array* %ctrls, { %Qubit*, i64 }* %0) { -entry: - call void @__quantum__rt__array_update_alias_count(%Array* %ctrls, i64 1) - %1 = getelementptr inbounds { %Qubit*, i64 }, { %Qubit*, i64 }* %0, i32 0, i32 0 - %q = load %Qubit*, %Qubit** %1 - %2 = getelementptr inbounds { %Qubit*, i64 }, { %Qubit*, i64 }* %0, i32 0, i32 1 - %n = load i64, i64* %2 - %3 = srem i64 %n, 2 - %4 = icmp eq i64 %3, 1 - br i1 %4, label %then0__1, label %continue__1 - -then0__1: ; preds = %entry - call void @__quantum__rt__array_update_alias_count(%Array* %ctrls, i64 1) - call void @__quantum__qis__k__ctl(%Array* %ctrls, %Qubit* %q) - call void @__quantum__rt__array_update_alias_count(%Array* %ctrls, i64 -1) - br label %continue__1 - -continue__1: ; preds = %then0__1, %entry - call void @__quantum__rt__array_update_alias_count(%Array* %ctrls, i64 -1) - ret void -} - -declare void @__quantum__qis__k__ctl(%Array*, %Qubit*) - -define void @Microsoft__Quantum__Testing__QIR__Qop__ctladj(%Array* %__controlQubits__, { %Qubit*, i64 }* %0) { -entry: - call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i64 1) - %1 = getelementptr inbounds { %Qubit*, i64 }, { %Qubit*, i64 }* %0, i32 0, i32 0 - %q = load %Qubit*, %Qubit** %1 - %2 = getelementptr inbounds { %Qubit*, i64 }, { %Qubit*, i64 }* %0, i32 0, i32 1 - %n = load i64, i64* %2 - %3 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Qubit*, i64 }* getelementptr ({ %Qubit*, i64 }, { %Qubit*, i64 }* null, i32 1) to i64)) - %4 = bitcast %Tuple* %3 to { %Qubit*, i64 }* - %5 = getelementptr inbounds { %Qubit*, i64 }, { %Qubit*, i64 }* %4, i32 0, i32 0 - %6 = getelementptr inbounds { %Qubit*, i64 }, { %Qubit*, i64 }* %4, i32 0, i32 1 - store %Qubit* %q, %Qubit** %5 - store i64 %n, i64* %6 - call void @Microsoft__Quantum__Testing__QIR__Qop__ctl(%Array* %__controlQubits__, { %Qubit*, i64 }* %4) - call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i64 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i64 -1) - ret void -} - -define void @Microsoft__Quantum__Testing__QIR__Subtract__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { -entry: - %0 = bitcast %Tuple* %arg-tuple to { i64, i64 }* - %1 = getelementptr inbounds { i64, i64 }, { i64, i64 }* %0, i32 0, i32 0 - %2 = getelementptr inbounds { i64, i64 }, { i64, i64 }* %0, i32 0, i32 1 - %3 = load i64, i64* %1 - %4 = load i64, i64* %2 - %5 = call i64 @Microsoft__Quantum__Testing__QIR__Subtract__body(i64 %3, i64 %4) - %6 = bitcast %Tuple* %result-tuple to { i64 }* - %7 = getelementptr inbounds { i64 }, { i64 }* %6, i32 0, i32 0 - store i64 %5, i64* %7 - ret void -} - -define void @Lifted__PartialApplication__2__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { -entry: - %0 = bitcast %Tuple* %capture-tuple to { %Callable*, i64 }* - %1 = getelementptr inbounds { %Callable*, i64 }, { %Callable*, i64 }* %0, i32 0, i32 1 - %2 = load i64, i64* %1 - %3 = bitcast %Tuple* %arg-tuple to { i64 }* - %4 = getelementptr inbounds { i64 }, { i64 }* %3, i32 0, i32 0 - %5 = load i64, i64* %4 - %6 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i64* getelementptr (i64, i64* null, i32 1) to i64), i64 2)) - %7 = bitcast %Tuple* %6 to { i64, i64 }* - %8 = getelementptr inbounds { i64, i64 }, { i64, i64 }* %7, i32 0, i32 0 - %9 = getelementptr inbounds { i64, i64 }, { i64, i64 }* %7, i32 0, i32 1 - store i64 %2, i64* %8 - store i64 %5, i64* %9 - %10 = getelementptr inbounds { %Callable*, i64 }, { %Callable*, i64 }* %0, i32 0, i32 0 - %11 = load %Callable*, %Callable** %10 - call void @__quantum__rt__callable_invoke(%Callable* %11, %Tuple* %6, %Tuple* %result-tuple) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i64 -1) - ret void -} - -define void @MemoryManagement__2__RefCount(%Tuple* %capture-tuple, i64 %count-change) { -entry: - %0 = bitcast %Tuple* %capture-tuple to { %Callable*, i64 }* - %1 = getelementptr inbounds { %Callable*, i64 }, { %Callable*, i64 }* %0, i32 0, i32 0 - %2 = load %Callable*, %Callable** %1 - call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %2, i64 %count-change) - call void @__quantum__rt__callable_update_reference_count(%Callable* %2, i64 %count-change) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %capture-tuple, i64 %count-change) - ret void -} - -define void @MemoryManagement__2__AliasCount(%Tuple* %capture-tuple, i64 %count-change) { -entry: - %0 = bitcast %Tuple* %capture-tuple to { %Callable*, i64 }* - %1 = getelementptr inbounds { %Callable*, i64 }, { %Callable*, i64 }* %0, i32 0, i32 0 - %2 = load %Callable*, %Callable** %1 - call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %2, i64 %count-change) - call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i64 %count-change) - call void @__quantum__rt__tuple_update_alias_count(%Tuple* %capture-tuple, i64 %count-change) - ret void -} - -define void @Microsoft__Quantum__Intrinsic__X__body(%Qubit* %qubit) { -entry: - call void @__quantum__qis__x__body(%Qubit* %qubit) - ret void -} - -define void @Microsoft__Quantum__Intrinsic__X__adj(%Qubit* %qubit) { -entry: - call void @__quantum__qis__x__body(%Qubit* %qubit) - ret void -} - -define void @Microsoft__Quantum__Intrinsic__X__ctl(%Array* %__controlQubits__, %Qubit* %qubit) { -entry: - call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i64 1) - call void @__quantum__qis__x__ctl(%Array* %__controlQubits__, %Qubit* %qubit) - call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i64 -1) - ret void -} - -declare void @__quantum__qis__x__ctl(%Array*, %Qubit*) - -define void @Microsoft__Quantum__Intrinsic__X__ctladj(%Array* %__controlQubits__, %Qubit* %qubit) { -entry: - call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i64 1) - call void @__quantum__qis__x__ctl(%Array* %__controlQubits__, %Qubit* %qubit) - call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i64 -1) - ret void -} - -define %Result* @Microsoft__Quantum__Intrinsic__Measure__body(%Array* %bases, %Array* %qubits) { -entry: - call void @__quantum__rt__array_update_alias_count(%Array* %bases, i64 1) - call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i64 1) - %0 = call %Result* @__quantum__qis__measure__body(%Array* %bases, %Array* %qubits) - call void @__quantum__rt__array_update_alias_count(%Array* %bases, i64 -1) - call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i64 -1) - ret %Result* %0 -} - -declare %Result* @__quantum__qis__measure__body(%Array*, %Array*) - -define i1 @Microsoft__Quantum__Intrinsic__IsInf__body(double %d) { -entry: - %0 = call i1 @__quantum__qis__isinf__body(double %d) - ret i1 %0 -} - -declare i1 @__quantum__qis__isinf__body(double) - -define double @Microsoft__Quantum__Intrinsic__NAN__body() { -entry: - %0 = call double @__quantum__qis__nan__body() - ret double %0 -} - -declare double @__quantum__qis__nan__body() - -define i1 @Microsoft__Quantum__Intrinsic__IsNan__body(double %d) { -entry: - %0 = call i1 @__quantum__qis__isnan__body(double %d) - ret i1 %0 -} - -declare i1 @__quantum__qis__isnan__body(double) - -define double @Microsoft__Quantum__Intrinsic__INFINITY__body() { -entry: - %0 = call double @__quantum__qis__infinity__body() - ret double %0 -} - -declare double @__quantum__qis__infinity__body() - -define i1 @Microsoft__Quantum__Intrinsic__IsNegativeInfinity__body(double %d) { -entry: - %0 = call i1 @__quantum__qis__isnegativeinfinity__body(double %d) - ret i1 %0 -} - -declare i1 @__quantum__qis__isnegativeinfinity__body(double) - -define void @Microsoft__Quantum__Intrinsic__K__body(%Qubit* %q) { -entry: - call void @__quantum__qis__k__body(%Qubit* %q) - ret void -} - -define void @Microsoft__Quantum__Intrinsic__K__adj(%Qubit* %q) { -entry: - call void @__quantum__qis__k__body(%Qubit* %q) - ret void -} - -define void @Microsoft__Quantum__Intrinsic__K__ctl(%Array* %__controlQubits__, %Qubit* %q) { -entry: - call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i64 1) - call void @__quantum__qis__k__ctl(%Array* %__controlQubits__, %Qubit* %q) - call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i64 -1) - ret void -} - -define void @Microsoft__Quantum__Intrinsic__K__ctladj(%Array* %__controlQubits__, %Qubit* %q) { -entry: - call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i64 1) - call void @__quantum__qis__k__ctl(%Array* %__controlQubits__, %Qubit* %q) - call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i64 -1) - ret void -} - -declare double @__quantum__qis__arctan2__body(double, double) - -define double @Microsoft__Quantum__Math__PI__body() { -entry: - ret double 0x400921FB54442D18 -} - -declare double @__quantum__qis__sqrt__body(double) - -define double @Microsoft__Quantum__Math__E__body() { -entry: - ret double 0x4005BF0A8B145769 -} - -declare double @__quantum__qis__log__body(double) - -declare %String* @__quantum__rt__pauli_to_string(i2) - -declare %String* @__quantum__rt__string_concatenate(%String*, %String*) - -declare void @__quantum__rt__string_update_reference_count(%String*, i64) - -declare i1 @__quantum__rt__string_equal(%String*, %String*) - -define double @Microsoft__Quantum__Math__Log__body(double %input) { -entry: - %0 = call double @__quantum__qis__log__body(double %input) - ret double %0 -} - -define double @Microsoft__Quantum__Math__ArcTan2__body(double %y, double %x) { -entry: - %0 = call double @__quantum__qis__arctan2__body(double %y, double %x) - ret double %0 -} - -define double @Microsoft__Quantum__Math__Sqrt__body(double %d) { -entry: - %0 = call double @__quantum__qis__sqrt__body(double %d) - ret double %0 -} - -define i64 @Microsoft__Quantum__Testing__QIR__Test_Arrays(i64 %array__count, i64* %array, i64 %index, i64 %val, i1 %compilerDecoy) #0 { -entry: - %0 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 %array__count) - %1 = icmp sgt i64 %array__count, 0 - br i1 %1, label %copy, label %next - -copy: ; preds = %entry - %2 = ptrtoint i64* %array to i64 - %3 = sub i64 %array__count, 1 - br label %header__1 - -next: ; preds = %exit__1, %entry - %4 = call i64 @Microsoft__Quantum__Testing__QIR__Test_Arrays__body(%Array* %0, i64 %index, i64 %val, i1 %compilerDecoy) - call void @__quantum__rt__array_update_reference_count(%Array* %0, i64 -1) - ret i64 %4 - -header__1: ; preds = %exiting__1, %copy - %5 = phi i64 [ 0, %copy ], [ %13, %exiting__1 ] - %6 = icmp sle i64 %5, %3 - br i1 %6, label %body__1, label %exit__1 - -body__1: ; preds = %header__1 - %7 = mul i64 %5, 8 - %8 = add i64 %2, %7 - %9 = inttoptr i64 %8 to i64* - %10 = load i64, i64* %9 - %11 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %0, i64 %5) - %12 = bitcast i8* %11 to i64* - store i64 %10, i64* %12 - br label %exiting__1 - -exiting__1: ; preds = %body__1 - %13 = add i64 %5, 1 - br label %header__1 - -exit__1: ; preds = %header__1 - br label %next -} - -declare void @__quantum__rt__tuple_update_alias_count(%Tuple*, i64) - -attributes #0 = { "EntryPoint" } diff --git a/src/QirRuntime/test/QIR-static/qsharp/qir-gen.csproj b/src/QirRuntime/test/QIR-static/qsharp/qir-gen.csproj index 8f4f360a7bb..17f43b9b0c8 100644 --- a/src/QirRuntime/test/QIR-static/qsharp/qir-gen.csproj +++ b/src/QirRuntime/test/QIR-static/qsharp/qir-gen.csproj @@ -1,4 +1,4 @@ - + Exe From ec763fbeb513b85beebc9985ae8e0d40162f573c Mon Sep 17 00:00:00 2001 From: Robin Kuzmin Date: Wed, 17 Feb 2021 23:01:58 -0800 Subject: [PATCH 08/30] Added DrawRandomInt() and Message() (#511) * Added __quantum__qis__message__body ( function Message (msg : String) : Unit ) * Added DrawRadomInt() --- src/QirRuntime/lib/QIR/CMakeLists.txt | 1 + src/QirRuntime/lib/QIR/allocationsTracker.cpp | 2 +- src/QirRuntime/lib/QIR/allocationsTracker.hpp | 2 +- src/QirRuntime/lib/QIR/arrays.cpp | 2 +- src/QirRuntime/lib/QIR/bridge-qis.ll | 21 +- src/QirRuntime/lib/QIR/bridge-rt.ll | 2 +- src/QirRuntime/lib/QIR/callables.cpp | 2 +- src/QirRuntime/lib/QIR/context.cpp | 2 +- src/QirRuntime/lib/QIR/context.hpp | 2 +- src/QirRuntime/lib/QIR/delegated.cpp | 2 +- src/QirRuntime/lib/QIR/intrinsics.cpp | 2 +- src/QirRuntime/lib/QIR/intrinsicsMath.cpp | 59 +- src/QirRuntime/lib/QIR/intrinsicsOut.cpp | 47 + src/QirRuntime/lib/QIR/qirTypes.hpp | 2 +- src/QirRuntime/lib/QIR/quantum__qis.hpp | 6 +- .../lib/QIR/quantum__qis_internal.hpp | 29 + src/QirRuntime/lib/QIR/quantum__rt.hpp | 2 +- src/QirRuntime/lib/QIR/strings.cpp | 2 +- src/QirRuntime/lib/QIR/utils.cpp | 2 +- src/QirRuntime/test/QIR-static/CMakeLists.txt | 1 + src/QirRuntime/test/QIR-static/qir-driver.cpp | 2 +- src/QirRuntime/test/QIR-static/qir-gen.ll | 1690 +++++++++++++++++ .../test/QIR-static/qir-test-math.cpp | 124 +- .../test/QIR-static/qir-test-ouput.cpp | 52 + src/QirRuntime/test/QIR-static/qsharp/Math.qs | 2 +- .../test/QIR-static/qsharp/qir-test-arrays.qs | 5 +- .../test/QIR-static/qsharp/qir-test-math.qs | 5 + .../test/QIR-static/qsharp/qir-test-output.qs | 13 + .../QIR-static/qsharp/qir-test-partials.qs | 2 +- 29 files changed, 2062 insertions(+), 23 deletions(-) create mode 100644 src/QirRuntime/lib/QIR/intrinsicsOut.cpp create mode 100644 src/QirRuntime/lib/QIR/quantum__qis_internal.hpp create mode 100644 src/QirRuntime/test/QIR-static/qir-gen.ll create mode 100644 src/QirRuntime/test/QIR-static/qir-test-ouput.cpp create mode 100644 src/QirRuntime/test/QIR-static/qsharp/qir-test-output.qs diff --git a/src/QirRuntime/lib/QIR/CMakeLists.txt b/src/QirRuntime/lib/QIR/CMakeLists.txt index a90b95c7e71..48d8e755119 100644 --- a/src/QirRuntime/lib/QIR/CMakeLists.txt +++ b/src/QirRuntime/lib/QIR/CMakeLists.txt @@ -51,6 +51,7 @@ compile_from_qir(bridge-qis ${bridge_qis_target}) set(qis_sup_source_files "intrinsics.cpp" "intrinsicsMath.cpp" + intrinsicsOut.cpp ) add_library(qir-qis-support ${qis_sup_source_files}) diff --git a/src/QirRuntime/lib/QIR/allocationsTracker.cpp b/src/QirRuntime/lib/QIR/allocationsTracker.cpp index fe32e677538..2eaf5613b87 100644 --- a/src/QirRuntime/lib/QIR/allocationsTracker.cpp +++ b/src/QirRuntime/lib/QIR/allocationsTracker.cpp @@ -1,4 +1,4 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. +// Copyright (c) Microsoft Corporation. // Licensed under the MIT License. #include "allocationsTracker.hpp" diff --git a/src/QirRuntime/lib/QIR/allocationsTracker.hpp b/src/QirRuntime/lib/QIR/allocationsTracker.hpp index 020d4e77834..b5621ed2850 100644 --- a/src/QirRuntime/lib/QIR/allocationsTracker.hpp +++ b/src/QirRuntime/lib/QIR/allocationsTracker.hpp @@ -1,4 +1,4 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. +// Copyright (c) Microsoft Corporation. // Licensed under the MIT License. #pragma once diff --git a/src/QirRuntime/lib/QIR/arrays.cpp b/src/QirRuntime/lib/QIR/arrays.cpp index 1e65b6831d4..b7cd195a8ed 100644 --- a/src/QirRuntime/lib/QIR/arrays.cpp +++ b/src/QirRuntime/lib/QIR/arrays.cpp @@ -1,4 +1,4 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. +// Copyright (c) Microsoft Corporation. // Licensed under the MIT License. #include diff --git a/src/QirRuntime/lib/QIR/bridge-qis.ll b/src/QirRuntime/lib/QIR/bridge-qis.ll index 1c93d58f0fe..729b8c77e2e 100644 --- a/src/QirRuntime/lib/QIR/bridge-qis.ll +++ b/src/QirRuntime/lib/QIR/bridge-qis.ll @@ -1,4 +1,4 @@ -; Copyright (c) Microsoft Corporation. All rights reserved. +; Copyright (c) Microsoft Corporation. ; Licensed under the MIT License. ; The __quantum__qis__* definitions should be automatically generated by QIR, depending on the specific target. @@ -60,6 +60,8 @@ declare void @quantum__qis__y__ctl(%struct.QirArray*, %class.QUBIT*) declare void @quantum__qis__z__body(%class.QUBIT*) declare void @quantum__qis__z__ctl(%struct.QirArray*, %class.QUBIT*) +declare void @quantum__qis__message__body(%"struct.QirString"* %str) + ;=============================================================================== ; quantum.qis namespace implementations ; @@ -279,6 +281,14 @@ define void @__quantum__qis__z__ctl(%Array* %.ctls, %Qubit* %.q) { } +;=============================================================================== +; +define void @__quantum__qis__message__body(%String* %.str) { + %str = bitcast %String* %.str to %"struct.QirString"* + call void @quantum__qis__message__body(%"struct.QirString"* %str) + ret void +} + ;=============================================================================== ; quantum.qis math functions ; @@ -292,6 +302,7 @@ declare i1 @quantum__qis__isnan__body(double %d) declare double @quantum__qis__infinity__body() declare i1 @quantum__qis__isinf__body(double %d) declare double @quantum__qis__arctan2__body(double %y, double %x) +declare i64 @quantum__qis__drawrandomint__body(i64 %min, i64 %max) ; API for the user code: define double @__quantum__qis__nan__body() { ; Q#: function NAN() : Double http://www.cplusplus.com/reference/cmath/nan-function/ @@ -336,3 +347,11 @@ define double @__quantum__qis__arctan2__body(double %y, double %x) { ; Q#: func %result = call double @quantum__qis__arctan2__body(double %y, double %x) ret double %result } + + +; operation DrawRandomInt (min : Int, max : Int) : Int +; https://docs.microsoft.com/qsharp/api/qsharp/microsoft.quantum.random.drawrandomint +define i64 @__quantum__qis__drawrandomint__body(i64 %min, i64 %max) { + %result = call i64 @quantum__qis__drawrandomint__body(i64 %min, i64 %max) + ret i64 %result +} diff --git a/src/QirRuntime/lib/QIR/bridge-rt.ll b/src/QirRuntime/lib/QIR/bridge-rt.ll index dba25ad0067..d7712b0d8d8 100644 --- a/src/QirRuntime/lib/QIR/bridge-rt.ll +++ b/src/QirRuntime/lib/QIR/bridge-rt.ll @@ -1,4 +1,4 @@ -; Copyright (c) Microsoft Corporation. All rights reserved. +; Copyright (c) Microsoft Corporation. ; Licensed under the MIT License. ;======================================================================================================================= diff --git a/src/QirRuntime/lib/QIR/callables.cpp b/src/QirRuntime/lib/QIR/callables.cpp index 0ec5ddc54ba..752f109e810 100644 --- a/src/QirRuntime/lib/QIR/callables.cpp +++ b/src/QirRuntime/lib/QIR/callables.cpp @@ -1,4 +1,4 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. +// Copyright (c) Microsoft Corporation. // Licensed under the MIT License. #include diff --git a/src/QirRuntime/lib/QIR/context.cpp b/src/QirRuntime/lib/QIR/context.cpp index 81dbc1a9ac0..531c9157ecf 100644 --- a/src/QirRuntime/lib/QIR/context.cpp +++ b/src/QirRuntime/lib/QIR/context.cpp @@ -1,4 +1,4 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. +// Copyright (c) Microsoft Corporation. // Licensed under the MIT License. #include diff --git a/src/QirRuntime/lib/QIR/context.hpp b/src/QirRuntime/lib/QIR/context.hpp index e3719cd32f1..672eef42660 100644 --- a/src/QirRuntime/lib/QIR/context.hpp +++ b/src/QirRuntime/lib/QIR/context.hpp @@ -1,4 +1,4 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. +// Copyright (c) Microsoft Corporation. // Licensed under the MIT License. #pragma once diff --git a/src/QirRuntime/lib/QIR/delegated.cpp b/src/QirRuntime/lib/QIR/delegated.cpp index 1eadee7f826..f3d82253f3c 100644 --- a/src/QirRuntime/lib/QIR/delegated.cpp +++ b/src/QirRuntime/lib/QIR/delegated.cpp @@ -1,4 +1,4 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. +// Copyright (c) Microsoft Corporation. // Licensed under the MIT License. /*============================================================================= diff --git a/src/QirRuntime/lib/QIR/intrinsics.cpp b/src/QirRuntime/lib/QIR/intrinsics.cpp index 301047a345b..d2ebb99eb2c 100644 --- a/src/QirRuntime/lib/QIR/intrinsics.cpp +++ b/src/QirRuntime/lib/QIR/intrinsics.cpp @@ -1,4 +1,4 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. +// Copyright (c) Microsoft Corporation. // Licensed under the MIT License. /*============================================================================= diff --git a/src/QirRuntime/lib/QIR/intrinsicsMath.cpp b/src/QirRuntime/lib/QIR/intrinsicsMath.cpp index a3b97e420e9..fedfbd6ba7b 100644 --- a/src/QirRuntime/lib/QIR/intrinsicsMath.cpp +++ b/src/QirRuntime/lib/QIR/intrinsicsMath.cpp @@ -2,8 +2,20 @@ // Licensed under the MIT License. #include +#include +#include #include "quantum__qis.hpp" +#include "quantum__qis_internal.hpp" +#include "quantum__rt.hpp" +// Forward declarations: +namespace // Visible in this translation unit only. +{ +extern thread_local bool randomizeSeed; +extern int64_t lastGeneratedRndNum; +} + +// Implementation: extern "C" { @@ -28,4 +40,49 @@ double quantum__qis__arctan2__body(double y, double x) return std::atan2(y, x); // https://en.cppreference.com/w/cpp/numeric/math/atan2 } -} // extern "C" +int64_t quantum__qis__drawrandomint__body(int64_t minimum, int64_t maximum) +{ + if(minimum > maximum) + { + quantum__rt__fail(quantum__rt__string_create(Quantum::Qis::Internal::excStrDrawRandomInt)); + } + + // https://en.cppreference.com/w/cpp/numeric/random/uniform_int_distribution + // https://en.cppreference.com/w/cpp/numeric/random + thread_local static std::mt19937_64 gen(randomizeSeed + ? std::random_device()() : // Default + 0); // For test purposes only. + + lastGeneratedRndNum = std::uniform_int_distribution(minimum, maximum)(gen); + return lastGeneratedRndNum; +} + +} // extern "C" + +namespace // Visible in this translation unit only. +{ +thread_local bool randomizeSeed = true; +int64_t lastGeneratedRndNum = 0; +} + +// For test purposes only: +namespace Quantum +{ +namespace Qis +{ + namespace Internal + { + char const excStrDrawRandomInt[] = "Invalid Argument: minimum > maximum for DrawRandomInt()"; + + void RandomizeSeed(bool randomize) + { + randomizeSeed = randomize; + } + + int64_t GetLastGeneratedRandomNumber() + { + return lastGeneratedRndNum; + } + } // namespace Internal +} // namespace Qis +} // namespace Quantum diff --git a/src/QirRuntime/lib/QIR/intrinsicsOut.cpp b/src/QirRuntime/lib/QIR/intrinsicsOut.cpp new file mode 100644 index 00000000000..6b46289be10 --- /dev/null +++ b/src/QirRuntime/lib/QIR/intrinsicsOut.cpp @@ -0,0 +1,47 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +#include + +#include "qirTypes.hpp" +#include "quantum__qis.hpp" + +// Forward declarations: +static std::ostream& GetOutputStream(); + +// Public API: +extern "C" +{ + void quantum__qis__message__body(QirString* qstr) // NOLINT + { + GetOutputStream() << qstr->str << std::endl; + } +} // extern "C" + + +// Internal API: +static std::ostream* currentOutputStream = &std::cout; // Log to std::cout by default. + +static std::ostream& GetOutputStream() +{ + return *currentOutputStream; +} + + +// For test purposes only: +namespace Quantum // Replace with `namespace Quantum::Qis::Internal` after migration to C++17. +{ +namespace Qis +{ + namespace Internal + { + std::ostream& SetOutputStream(std::ostream & newOStream) + { + std::ostream& oldOStream = *currentOutputStream; + currentOutputStream = &newOStream; + return oldOStream; + } + } // namespace Internal +} // namespace Qis +} // namespace Quantum + diff --git a/src/QirRuntime/lib/QIR/qirTypes.hpp b/src/QirRuntime/lib/QIR/qirTypes.hpp index e8db8ccf4e7..a9ca03fa72f 100644 --- a/src/QirRuntime/lib/QIR/qirTypes.hpp +++ b/src/QirRuntime/lib/QIR/qirTypes.hpp @@ -1,4 +1,4 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. +// Copyright (c) Microsoft Corporation. // Licensed under the MIT License. #pragma once diff --git a/src/QirRuntime/lib/QIR/quantum__qis.hpp b/src/QirRuntime/lib/QIR/quantum__qis.hpp index 50e8c249016..8e80cd80404 100644 --- a/src/QirRuntime/lib/QIR/quantum__qis.hpp +++ b/src/QirRuntime/lib/QIR/quantum__qis.hpp @@ -4,7 +4,7 @@ #include "CoreTypes.hpp" -// Copyright (c) Microsoft Corporation. All rights reserved. +// Copyright (c) Microsoft Corporation. // Licensed under the MIT License. #ifdef _WIN32 @@ -60,9 +60,13 @@ extern "C" QIR_SHARED_API void quantum__qis__z__body(QUBIT*); // NOLINT QIR_SHARED_API void quantum__qis__z__ctl(QirArray*, QUBIT*); // NOLINT + QIR_SHARED_API void quantum__qis__message__body(QirString* qstr); // NOLINT + + // Q# Math: QIR_SHARED_API bool quantum__qis__isnan__body(double d); // NOLINT QIR_SHARED_API double quantum__qis__infinity__body(); // NOLINT QIR_SHARED_API bool quantum__qis__isinf__body(double d); // NOLINT QIR_SHARED_API double quantum__qis__arctan2__body(double y, double x); // NOLINT + QIR_SHARED_API int64_t quantum__qis__drawrandomint__body(int64_t minimum, int64_t maximum); // NOLINT } \ No newline at end of file diff --git a/src/QirRuntime/lib/QIR/quantum__qis_internal.hpp b/src/QirRuntime/lib/QIR/quantum__qis_internal.hpp new file mode 100644 index 00000000000..52335f32027 --- /dev/null +++ b/src/QirRuntime/lib/QIR/quantum__qis_internal.hpp @@ -0,0 +1,29 @@ +#pragma once + +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +// To be included by the QIS implementation and QIS tests only. +// Not to be included by parties outside QIS. + +#ifdef _WIN32 +#define QIR_SHARED_API __declspec(dllexport) +#else +#define QIR_SHARED_API +#endif + +// For test purposes only: +namespace Quantum // Replace with `namespace Quantum::Qis::Internal` after migration to C++17. +{ +namespace Qis +{ + namespace Internal + { + extern char const excStrDrawRandomInt[]; + + extern std::ostream& SetOutputStream(std::ostream& newOStream); + void RandomizeSeed(bool randomize); + int64_t GetLastGeneratedRandomNumber(); + } // namespace Internal +} // namespace Qis +} // namespace Quantum diff --git a/src/QirRuntime/lib/QIR/quantum__rt.hpp b/src/QirRuntime/lib/QIR/quantum__rt.hpp index 40679a99895..a3e26a05c8f 100644 --- a/src/QirRuntime/lib/QIR/quantum__rt.hpp +++ b/src/QirRuntime/lib/QIR/quantum__rt.hpp @@ -1,4 +1,4 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. +// Copyright (c) Microsoft Corporation. // Licensed under the MIT License. #pragma once diff --git a/src/QirRuntime/lib/QIR/strings.cpp b/src/QirRuntime/lib/QIR/strings.cpp index 0435329a1ef..90679a5dc1f 100644 --- a/src/QirRuntime/lib/QIR/strings.cpp +++ b/src/QirRuntime/lib/QIR/strings.cpp @@ -1,4 +1,4 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. +// Copyright (c) Microsoft Corporation. // Licensed under the MIT License. #include diff --git a/src/QirRuntime/lib/QIR/utils.cpp b/src/QirRuntime/lib/QIR/utils.cpp index 8fb77d8ff07..8ae3e7b98df 100644 --- a/src/QirRuntime/lib/QIR/utils.cpp +++ b/src/QirRuntime/lib/QIR/utils.cpp @@ -1,4 +1,4 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. +// Copyright (c) Microsoft Corporation. // Licensed under the MIT License. #include diff --git a/src/QirRuntime/test/QIR-static/CMakeLists.txt b/src/QirRuntime/test/QIR-static/CMakeLists.txt index 838e1f36f47..a9754805783 100644 --- a/src/QirRuntime/test/QIR-static/CMakeLists.txt +++ b/src/QirRuntime/test/QIR-static/CMakeLists.txt @@ -19,6 +19,7 @@ add_executable(qir-static-tests qir-driver.cpp qir-test-math.cpp qir-test-strings.cpp + qir-test-ouput.cpp ) target_link_libraries(qir-static-tests PUBLIC diff --git a/src/QirRuntime/test/QIR-static/qir-driver.cpp b/src/QirRuntime/test/QIR-static/qir-driver.cpp index 79f9cfdcca4..1c143af5d0a 100644 --- a/src/QirRuntime/test/QIR-static/qir-driver.cpp +++ b/src/QirRuntime/test/QIR-static/qir-driver.cpp @@ -1,4 +1,4 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. +// Copyright (c) Microsoft Corporation. // Licensed under the MIT License. #include diff --git a/src/QirRuntime/test/QIR-static/qir-gen.ll b/src/QirRuntime/test/QIR-static/qir-gen.ll new file mode 100644 index 00000000000..f4edf3a2b5a --- /dev/null +++ b/src/QirRuntime/test/QIR-static/qir-gen.ll @@ -0,0 +1,1690 @@ +%Result = type opaque +%Range = type { i64, i64, i64 } +%Tuple = type opaque +%Callable = type opaque +%Qubit = type opaque +%String = type opaque +%Array = type opaque + +@ResultZero = external global %Result* +@ResultOne = external global %Result* +@PauliI = constant i2 0 +@PauliX = constant i2 1 +@PauliY = constant i2 -1 +@PauliZ = constant i2 -2 +@EmptyRange = internal constant %Range { i64 0, i64 1, i64 -1 } +@Microsoft__Quantum__Testing__QIR__Qop = constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Testing__QIR__Qop__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Testing__QIR__Qop__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Testing__QIR__Qop__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Testing__QIR__Qop__ctladj__wrapper] +@PartialApplication__1 = constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__1__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__1__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__1__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__1__ctladj__wrapper] +@MemoryManagement__1 = constant [2 x void (%Tuple*, i64)*] [void (%Tuple*, i64)* @MemoryManagement__1__RefCount, void (%Tuple*, i64)* @MemoryManagement__1__AliasCount] +@0 = internal constant [14 x i8] c"error code: 1\00" +@1 = internal constant [14 x i8] c"error code: 2\00" +@2 = internal constant [14 x i8] c"error code: 3\00" +@3 = internal constant [14 x i8] c"error code: 2\00" +@4 = internal constant [14 x i8] c"error code: 5\00" +@5 = internal constant [14 x i8] c"error code: 6\00" +@6 = internal constant [14 x i8] c"error code: 7\00" +@7 = internal constant [5 x i8] c"Test\00" +@8 = internal constant [30 x i8] c"Unexpected measurement result\00" +@Microsoft__Quantum__Testing__QIR__Subtract = constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Testing__QIR__Subtract__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null] +@PartialApplication__2 = constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__2__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null] +@MemoryManagement__2 = constant [2 x void (%Tuple*, i64)*] [void (%Tuple*, i64)* @MemoryManagement__2__RefCount, void (%Tuple*, i64)* @MemoryManagement__2__AliasCount] +@9 = internal constant [20 x i8] c"Pauli value: PauliI\00" +@10 = internal constant [14 x i8] c"Pauli value: \00" +@11 = internal constant [7 x i8] c"PauliX\00" +@12 = internal constant [7 x i8] c"PauliY\00" +@13 = internal constant [7 x i8] c"PauliZ\00" + +define void @Microsoft__Quantum__Testing__QIR__TestControlled__body() { +entry: + %0 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, i64 }* getelementptr ({ %Callable*, i64 }, { %Callable*, i64 }* null, i32 1) to i64)) + %1 = bitcast %Tuple* %0 to { %Callable*, i64 }* + %2 = getelementptr inbounds { %Callable*, i64 }, { %Callable*, i64 }* %1, i32 0, i32 0 + %3 = getelementptr inbounds { %Callable*, i64 }, { %Callable*, i64 }* %1, i32 0, i32 1 + %4 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Testing__QIR__Qop, [2 x void (%Tuple*, i64)*]* null, %Tuple* null) + store %Callable* %4, %Callable** %2 + store i64 1, i64* %3 + %qop = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__1, [2 x void (%Tuple*, i64)*]* @MemoryManagement__1, %Tuple* %0) + call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %qop, i64 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %qop, i64 1) + %adj_qop = call %Callable* @__quantum__rt__callable_copy(%Callable* %qop, i1 false) + call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %adj_qop, i64 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %adj_qop) + call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %adj_qop, i64 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %adj_qop, i64 1) + %ctl_qop = call %Callable* @__quantum__rt__callable_copy(%Callable* %qop, i1 false) + call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %ctl_qop, i64 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %ctl_qop) + call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %ctl_qop, i64 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %ctl_qop, i64 1) + %adj_ctl_qop = call %Callable* @__quantum__rt__callable_copy(%Callable* %qop, i1 false) + call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %adj_ctl_qop, i64 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %adj_ctl_qop) + call void @__quantum__rt__callable_make_adjoint(%Callable* %adj_ctl_qop) + call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %adj_ctl_qop, i64 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %adj_ctl_qop, i64 1) + %ctl_ctl_qop = call %Callable* @__quantum__rt__callable_copy(%Callable* %ctl_qop, i1 false) + call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %ctl_ctl_qop, i64 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %ctl_ctl_qop) + call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %ctl_ctl_qop, i64 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %ctl_ctl_qop, i64 1) + %q1 = call %Qubit* @__quantum__rt__qubit_allocate() + %q2 = call %Qubit* @__quantum__rt__qubit_allocate() + %q3 = call %Qubit* @__quantum__rt__qubit_allocate() + %5 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64)) + %6 = bitcast %Tuple* %5 to { %Qubit* }* + %7 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %6, i32 0, i32 0 + store %Qubit* %q1, %Qubit** %7 + call void @__quantum__rt__callable_invoke(%Callable* %qop, %Tuple* %5, %Tuple* null) + %8 = call %Result* @Microsoft__Quantum__Intrinsic__M__body(%Qubit* %q1) + %9 = load %Result*, %Result** @ResultOne + %10 = call i1 @__quantum__rt__result_equal(%Result* %8, %Result* %9) + %11 = xor i1 %10, true + br i1 %11, label %then0__1, label %continue__1 + +then0__1: ; preds = %entry + %12 = call %String* @__quantum__rt__string_create(i32 13, i8* getelementptr inbounds ([14 x i8], [14 x i8]* @0, i32 0, i32 0)) + call void @__quantum__rt__qubit_release(%Qubit* %q1) + call void @__quantum__rt__qubit_release(%Qubit* %q2) + call void @__quantum__rt__qubit_release(%Qubit* %q3) + call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %qop, i64 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %qop, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %adj_qop, i64 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %adj_qop, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %ctl_qop, i64 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %ctl_qop, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %adj_ctl_qop, i64 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %adj_ctl_qop, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %ctl_ctl_qop, i64 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %ctl_ctl_qop, i64 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %5, i64 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %8, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %qop, i64 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %qop, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %adj_qop, i64 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %adj_qop, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %ctl_qop, i64 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %ctl_qop, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %adj_ctl_qop, i64 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %adj_ctl_qop, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %ctl_ctl_qop, i64 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %ctl_ctl_qop, i64 -1) + call void @__quantum__rt__fail(%String* %12) + unreachable + +continue__1: ; preds = %entry + %13 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64)) + %14 = bitcast %Tuple* %13 to { %Qubit* }* + %15 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %14, i32 0, i32 0 + store %Qubit* %q2, %Qubit** %15 + call void @__quantum__rt__callable_invoke(%Callable* %adj_qop, %Tuple* %13, %Tuple* null) + %16 = call %Result* @Microsoft__Quantum__Intrinsic__M__body(%Qubit* %q2) + %17 = load %Result*, %Result** @ResultOne + %18 = call i1 @__quantum__rt__result_equal(%Result* %16, %Result* %17) + %19 = xor i1 %18, true + br i1 %19, label %then0__2, label %continue__2 + +then0__2: ; preds = %continue__1 + %20 = call %String* @__quantum__rt__string_create(i32 13, i8* getelementptr inbounds ([14 x i8], [14 x i8]* @1, i32 0, i32 0)) + call void @__quantum__rt__qubit_release(%Qubit* %q1) + call void @__quantum__rt__qubit_release(%Qubit* %q2) + call void @__quantum__rt__qubit_release(%Qubit* %q3) + call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %qop, i64 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %qop, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %adj_qop, i64 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %adj_qop, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %ctl_qop, i64 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %ctl_qop, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %adj_ctl_qop, i64 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %adj_ctl_qop, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %ctl_ctl_qop, i64 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %ctl_ctl_qop, i64 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %5, i64 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %8, i64 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %13, i64 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %16, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %qop, i64 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %qop, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %adj_qop, i64 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %adj_qop, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %ctl_qop, i64 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %ctl_qop, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %adj_ctl_qop, i64 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %adj_ctl_qop, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %ctl_ctl_qop, i64 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %ctl_ctl_qop, i64 -1) + call void @__quantum__rt__fail(%String* %20) + unreachable + +continue__2: ; preds = %continue__1 + %21 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %22 = bitcast %Tuple* %21 to { %Array*, %Qubit* }* + %23 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %22, i32 0, i32 0 + %24 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %22, i32 0, i32 1 + %25 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %26 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %25, i64 0) + %27 = bitcast i8* %26 to %Qubit** + store %Qubit* %q1, %Qubit** %27 + store %Array* %25, %Array** %23 + store %Qubit* %q3, %Qubit** %24 + call void @__quantum__rt__callable_invoke(%Callable* %ctl_qop, %Tuple* %21, %Tuple* null) + %28 = call %Result* @Microsoft__Quantum__Intrinsic__M__body(%Qubit* %q3) + %29 = load %Result*, %Result** @ResultOne + %30 = call i1 @__quantum__rt__result_equal(%Result* %28, %Result* %29) + %31 = xor i1 %30, true + br i1 %31, label %then0__3, label %continue__3 + +then0__3: ; preds = %continue__2 + %32 = call %String* @__quantum__rt__string_create(i32 13, i8* getelementptr inbounds ([14 x i8], [14 x i8]* @2, i32 0, i32 0)) + call void @__quantum__rt__qubit_release(%Qubit* %q1) + call void @__quantum__rt__qubit_release(%Qubit* %q2) + call void @__quantum__rt__qubit_release(%Qubit* %q3) + call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %qop, i64 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %qop, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %adj_qop, i64 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %adj_qop, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %ctl_qop, i64 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %ctl_qop, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %adj_ctl_qop, i64 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %adj_ctl_qop, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %ctl_ctl_qop, i64 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %ctl_ctl_qop, i64 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %5, i64 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %8, i64 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %13, i64 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %16, i64 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %25, i64 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %21, i64 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %28, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %qop, i64 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %qop, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %adj_qop, i64 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %adj_qop, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %ctl_qop, i64 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %ctl_qop, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %adj_ctl_qop, i64 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %adj_ctl_qop, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %ctl_ctl_qop, i64 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %ctl_ctl_qop, i64 -1) + call void @__quantum__rt__fail(%String* %32) + unreachable + +continue__3: ; preds = %continue__2 + %33 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %34 = bitcast %Tuple* %33 to { %Array*, %Qubit* }* + %35 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %34, i32 0, i32 0 + %36 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %34, i32 0, i32 1 + %37 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %38 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %37, i64 0) + %39 = bitcast i8* %38 to %Qubit** + store %Qubit* %q2, %Qubit** %39 + store %Array* %37, %Array** %35 + store %Qubit* %q3, %Qubit** %36 + call void @__quantum__rt__callable_invoke(%Callable* %adj_ctl_qop, %Tuple* %33, %Tuple* null) + %40 = call %Result* @Microsoft__Quantum__Intrinsic__M__body(%Qubit* %q3) + %41 = load %Result*, %Result** @ResultZero + %42 = call i1 @__quantum__rt__result_equal(%Result* %40, %Result* %41) + %43 = xor i1 %42, true + br i1 %43, label %then0__4, label %continue__4 + +then0__4: ; preds = %continue__3 + %44 = call %String* @__quantum__rt__string_create(i32 13, i8* getelementptr inbounds ([14 x i8], [14 x i8]* @3, i32 0, i32 0)) + call void @__quantum__rt__qubit_release(%Qubit* %q1) + call void @__quantum__rt__qubit_release(%Qubit* %q2) + call void @__quantum__rt__qubit_release(%Qubit* %q3) + call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %qop, i64 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %qop, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %adj_qop, i64 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %adj_qop, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %ctl_qop, i64 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %ctl_qop, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %adj_ctl_qop, i64 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %adj_ctl_qop, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %ctl_ctl_qop, i64 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %ctl_ctl_qop, i64 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %5, i64 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %8, i64 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %13, i64 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %16, i64 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %25, i64 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %21, i64 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %28, i64 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %37, i64 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %33, i64 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %40, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %qop, i64 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %qop, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %adj_qop, i64 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %adj_qop, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %ctl_qop, i64 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %ctl_qop, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %adj_ctl_qop, i64 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %adj_ctl_qop, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %ctl_ctl_qop, i64 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %ctl_ctl_qop, i64 -1) + call void @__quantum__rt__fail(%String* %44) + unreachable + +continue__4: ; preds = %continue__3 + %45 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %46 = bitcast %Tuple* %45 to { %Array*, { %Array*, %Qubit* }* }* + %47 = getelementptr inbounds { %Array*, { %Array*, %Qubit* }* }, { %Array*, { %Array*, %Qubit* }* }* %46, i32 0, i32 0 + %48 = getelementptr inbounds { %Array*, { %Array*, %Qubit* }* }, { %Array*, { %Array*, %Qubit* }* }* %46, i32 0, i32 1 + %49 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %50 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %49, i64 0) + %51 = bitcast i8* %50 to %Qubit** + store %Qubit* %q1, %Qubit** %51 + %52 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %53 = bitcast %Tuple* %52 to { %Array*, %Qubit* }* + %54 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %53, i32 0, i32 0 + %55 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %53, i32 0, i32 1 + %56 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %57 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %56, i64 0) + %58 = bitcast i8* %57 to %Qubit** + store %Qubit* %q2, %Qubit** %58 + store %Array* %56, %Array** %54 + store %Qubit* %q3, %Qubit** %55 + store %Array* %49, %Array** %47 + store { %Array*, %Qubit* }* %53, { %Array*, %Qubit* }** %48 + call void @__quantum__rt__callable_invoke(%Callable* %ctl_ctl_qop, %Tuple* %45, %Tuple* null) + %59 = call %Result* @Microsoft__Quantum__Intrinsic__M__body(%Qubit* %q3) + %60 = load %Result*, %Result** @ResultOne + %61 = call i1 @__quantum__rt__result_equal(%Result* %59, %Result* %60) + %62 = xor i1 %61, true + br i1 %62, label %then0__5, label %continue__5 + +then0__5: ; preds = %continue__4 + %63 = call %String* @__quantum__rt__string_create(i32 13, i8* getelementptr inbounds ([14 x i8], [14 x i8]* @4, i32 0, i32 0)) + call void @__quantum__rt__qubit_release(%Qubit* %q1) + call void @__quantum__rt__qubit_release(%Qubit* %q2) + call void @__quantum__rt__qubit_release(%Qubit* %q3) + call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %qop, i64 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %qop, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %adj_qop, i64 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %adj_qop, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %ctl_qop, i64 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %ctl_qop, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %adj_ctl_qop, i64 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %adj_ctl_qop, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %ctl_ctl_qop, i64 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %ctl_ctl_qop, i64 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %5, i64 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %8, i64 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %13, i64 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %16, i64 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %25, i64 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %21, i64 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %28, i64 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %37, i64 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %33, i64 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %40, i64 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %49, i64 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %56, i64 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %52, i64 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %45, i64 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %59, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %qop, i64 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %qop, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %adj_qop, i64 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %adj_qop, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %ctl_qop, i64 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %ctl_qop, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %adj_ctl_qop, i64 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %adj_ctl_qop, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %ctl_ctl_qop, i64 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %ctl_ctl_qop, i64 -1) + call void @__quantum__rt__fail(%String* %63) + unreachable + +continue__5: ; preds = %continue__4 + %64 = call %Callable* @__quantum__rt__callable_copy(%Callable* %qop, i1 false) + call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %64, i64 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %64) + %65 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %66 = bitcast %Tuple* %65 to { %Array*, %Qubit* }* + %67 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %66, i32 0, i32 0 + %68 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %66, i32 0, i32 1 + %69 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 2) + %70 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %69, i64 0) + %71 = bitcast i8* %70 to %Qubit** + %72 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %69, i64 1) + %73 = bitcast i8* %72 to %Qubit** + store %Qubit* %q1, %Qubit** %71 + store %Qubit* %q2, %Qubit** %73 + store %Array* %69, %Array** %67 + store %Qubit* %q3, %Qubit** %68 + call void @__quantum__rt__callable_invoke(%Callable* %64, %Tuple* %65, %Tuple* null) + %74 = call %Result* @Microsoft__Quantum__Intrinsic__M__body(%Qubit* %q3) + %75 = load %Result*, %Result** @ResultZero + %76 = call i1 @__quantum__rt__result_equal(%Result* %74, %Result* %75) + %77 = xor i1 %76, true + br i1 %77, label %then0__6, label %continue__6 + +then0__6: ; preds = %continue__5 + %78 = call %String* @__quantum__rt__string_create(i32 13, i8* getelementptr inbounds ([14 x i8], [14 x i8]* @5, i32 0, i32 0)) + call void @__quantum__rt__qubit_release(%Qubit* %q1) + call void @__quantum__rt__qubit_release(%Qubit* %q2) + call void @__quantum__rt__qubit_release(%Qubit* %q3) + call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %qop, i64 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %qop, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %adj_qop, i64 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %adj_qop, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %ctl_qop, i64 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %ctl_qop, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %adj_ctl_qop, i64 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %adj_ctl_qop, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %ctl_ctl_qop, i64 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %ctl_ctl_qop, i64 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %5, i64 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %8, i64 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %13, i64 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %16, i64 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %25, i64 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %21, i64 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %28, i64 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %37, i64 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %33, i64 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %40, i64 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %49, i64 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %56, i64 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %52, i64 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %45, i64 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %59, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %64, i64 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %64, i64 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %69, i64 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %65, i64 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %74, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %qop, i64 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %qop, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %adj_qop, i64 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %adj_qop, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %ctl_qop, i64 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %ctl_qop, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %adj_ctl_qop, i64 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %adj_ctl_qop, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %ctl_ctl_qop, i64 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %ctl_ctl_qop, i64 -1) + call void @__quantum__rt__fail(%String* %78) + unreachable + +continue__6: ; preds = %continue__5 + %q4 = call %Qubit* @__quantum__rt__qubit_allocate() + %79 = call %Callable* @__quantum__rt__callable_copy(%Callable* %qop, i1 false) + call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %79, i64 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %79) + %80 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64)) + %81 = bitcast %Tuple* %80 to { %Qubit* }* + %82 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %81, i32 0, i32 0 + store %Qubit* %q3, %Qubit** %82 + call void @__quantum__rt__callable_invoke(%Callable* %79, %Tuple* %80, %Tuple* null) + %83 = call %Callable* @__quantum__rt__callable_copy(%Callable* %ctl_ctl_qop, i1 false) + call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %83, i64 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %83) + call void @__quantum__rt__callable_make_adjoint(%Callable* %83) + %84 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %85 = bitcast %Tuple* %84 to { %Array*, { %Array*, { %Array*, %Qubit* }* }* }* + %86 = getelementptr inbounds { %Array*, { %Array*, { %Array*, %Qubit* }* }* }, { %Array*, { %Array*, { %Array*, %Qubit* }* }* }* %85, i32 0, i32 0 + %87 = getelementptr inbounds { %Array*, { %Array*, { %Array*, %Qubit* }* }* }, { %Array*, { %Array*, { %Array*, %Qubit* }* }* }* %85, i32 0, i32 1 + %88 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %89 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %88, i64 0) + %90 = bitcast i8* %89 to %Qubit** + store %Qubit* %q1, %Qubit** %90 + %91 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %92 = bitcast %Tuple* %91 to { %Array*, { %Array*, %Qubit* }* }* + %93 = getelementptr inbounds { %Array*, { %Array*, %Qubit* }* }, { %Array*, { %Array*, %Qubit* }* }* %92, i32 0, i32 0 + %94 = getelementptr inbounds { %Array*, { %Array*, %Qubit* }* }, { %Array*, { %Array*, %Qubit* }* }* %92, i32 0, i32 1 + %95 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %96 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %95, i64 0) + %97 = bitcast i8* %96 to %Qubit** + store %Qubit* %q2, %Qubit** %97 + %98 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %99 = bitcast %Tuple* %98 to { %Array*, %Qubit* }* + %100 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %99, i32 0, i32 0 + %101 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %99, i32 0, i32 1 + %102 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %103 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %102, i64 0) + %104 = bitcast i8* %103 to %Qubit** + store %Qubit* %q3, %Qubit** %104 + store %Array* %102, %Array** %100 + store %Qubit* %q4, %Qubit** %101 + store %Array* %95, %Array** %93 + store { %Array*, %Qubit* }* %99, { %Array*, %Qubit* }** %94 + store %Array* %88, %Array** %86 + store { %Array*, { %Array*, %Qubit* }* }* %92, { %Array*, { %Array*, %Qubit* }* }** %87 + call void @__quantum__rt__callable_invoke(%Callable* %83, %Tuple* %84, %Tuple* null) + %105 = call %Result* @Microsoft__Quantum__Intrinsic__M__body(%Qubit* %q4) + %106 = load %Result*, %Result** @ResultOne + %107 = call i1 @__quantum__rt__result_equal(%Result* %105, %Result* %106) + %108 = xor i1 %107, true + br i1 %108, label %then0__7, label %continue__7 + +then0__7: ; preds = %continue__6 + %109 = call %String* @__quantum__rt__string_create(i32 13, i8* getelementptr inbounds ([14 x i8], [14 x i8]* @6, i32 0, i32 0)) + call void @__quantum__rt__qubit_release(%Qubit* %q4) + call void @__quantum__rt__qubit_release(%Qubit* %q1) + call void @__quantum__rt__qubit_release(%Qubit* %q2) + call void @__quantum__rt__qubit_release(%Qubit* %q3) + call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %qop, i64 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %qop, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %adj_qop, i64 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %adj_qop, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %ctl_qop, i64 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %ctl_qop, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %adj_ctl_qop, i64 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %adj_ctl_qop, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %ctl_ctl_qop, i64 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %ctl_ctl_qop, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %79, i64 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %79, i64 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %80, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %83, i64 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %83, i64 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %88, i64 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %95, i64 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %102, i64 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %98, i64 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %91, i64 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %84, i64 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %105, i64 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %5, i64 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %8, i64 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %13, i64 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %16, i64 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %25, i64 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %21, i64 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %28, i64 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %37, i64 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %33, i64 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %40, i64 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %49, i64 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %56, i64 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %52, i64 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %45, i64 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %59, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %64, i64 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %64, i64 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %69, i64 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %65, i64 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %74, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %qop, i64 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %qop, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %adj_qop, i64 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %adj_qop, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %ctl_qop, i64 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %ctl_qop, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %adj_ctl_qop, i64 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %adj_ctl_qop, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %ctl_ctl_qop, i64 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %ctl_ctl_qop, i64 -1) + call void @__quantum__rt__fail(%String* %109) + unreachable + +continue__7: ; preds = %continue__6 + call void @__quantum__rt__qubit_release(%Qubit* %q4) + call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %79, i64 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %79, i64 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %80, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %83, i64 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %83, i64 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %88, i64 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %95, i64 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %102, i64 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %98, i64 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %91, i64 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %84, i64 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %105, i64 -1) + call void @__quantum__rt__qubit_release(%Qubit* %q1) + call void @__quantum__rt__qubit_release(%Qubit* %q2) + call void @__quantum__rt__qubit_release(%Qubit* %q3) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %5, i64 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %8, i64 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %13, i64 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %16, i64 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %25, i64 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %21, i64 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %28, i64 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %37, i64 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %33, i64 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %40, i64 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %49, i64 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %56, i64 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %52, i64 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %45, i64 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %59, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %64, i64 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %64, i64 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %69, i64 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %65, i64 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %74, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %qop, i64 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %qop, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %adj_qop, i64 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %adj_qop, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %ctl_qop, i64 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %ctl_qop, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %adj_ctl_qop, i64 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %adj_ctl_qop, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %ctl_ctl_qop, i64 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %ctl_ctl_qop, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %qop, i64 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %qop, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %adj_qop, i64 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %adj_qop, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %ctl_qop, i64 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %ctl_qop, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %adj_ctl_qop, i64 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %adj_ctl_qop, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %ctl_ctl_qop, i64 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %ctl_ctl_qop, i64 -1) + ret void +} + +declare %Tuple* @__quantum__rt__tuple_create(i64) + +define void @Microsoft__Quantum__Testing__QIR__Qop__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Qubit*, i64 }* + %1 = getelementptr inbounds { %Qubit*, i64 }, { %Qubit*, i64 }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Qubit*, i64 }, { %Qubit*, i64 }* %0, i32 0, i32 1 + %3 = load %Qubit*, %Qubit** %1 + %4 = load i64, i64* %2 + call void @Microsoft__Quantum__Testing__QIR__Qop__body(%Qubit* %3, i64 %4) + ret void +} + +define void @Microsoft__Quantum__Testing__QIR__Qop__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Qubit*, i64 }* + %1 = getelementptr inbounds { %Qubit*, i64 }, { %Qubit*, i64 }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Qubit*, i64 }, { %Qubit*, i64 }* %0, i32 0, i32 1 + %3 = load %Qubit*, %Qubit** %1 + %4 = load i64, i64* %2 + call void @Microsoft__Quantum__Testing__QIR__Qop__adj(%Qubit* %3, i64 %4) + ret void +} + +define void @Microsoft__Quantum__Testing__QIR__Qop__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { %Qubit*, i64 }* }* + %1 = getelementptr inbounds { %Array*, { %Qubit*, i64 }* }, { %Array*, { %Qubit*, i64 }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { %Qubit*, i64 }* }, { %Array*, { %Qubit*, i64 }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1 + %4 = load { %Qubit*, i64 }*, { %Qubit*, i64 }** %2 + call void @Microsoft__Quantum__Testing__QIR__Qop__ctl(%Array* %3, { %Qubit*, i64 }* %4) + ret void +} + +define void @Microsoft__Quantum__Testing__QIR__Qop__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { %Qubit*, i64 }* }* + %1 = getelementptr inbounds { %Array*, { %Qubit*, i64 }* }, { %Array*, { %Qubit*, i64 }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { %Qubit*, i64 }* }, { %Array*, { %Qubit*, i64 }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1 + %4 = load { %Qubit*, i64 }*, { %Qubit*, i64 }** %2 + call void @Microsoft__Quantum__Testing__QIR__Qop__ctladj(%Array* %3, { %Qubit*, i64 }* %4) + ret void +} + +declare %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]*, [2 x void (%Tuple*, i64)*]*, %Tuple*) + +define void @Lifted__PartialApplication__1__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %1 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %0, i32 0, i32 0 + %2 = load %Qubit*, %Qubit** %1 + %3 = bitcast %Tuple* %capture-tuple to { %Callable*, i64 }* + %4 = getelementptr inbounds { %Callable*, i64 }, { %Callable*, i64 }* %3, i32 0, i32 1 + %5 = load i64, i64* %4 + %6 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Qubit*, i64 }* getelementptr ({ %Qubit*, i64 }, { %Qubit*, i64 }* null, i32 1) to i64)) + %7 = bitcast %Tuple* %6 to { %Qubit*, i64 }* + %8 = getelementptr inbounds { %Qubit*, i64 }, { %Qubit*, i64 }* %7, i32 0, i32 0 + %9 = getelementptr inbounds { %Qubit*, i64 }, { %Qubit*, i64 }* %7, i32 0, i32 1 + store %Qubit* %2, %Qubit** %8 + store i64 %5, i64* %9 + %10 = getelementptr inbounds { %Callable*, i64 }, { %Callable*, i64 }* %3, i32 0, i32 0 + %11 = load %Callable*, %Callable** %10 + call void @__quantum__rt__callable_invoke(%Callable* %11, %Tuple* %6, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i64 -1) + ret void +} + +define void @Lifted__PartialApplication__1__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %1 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %0, i32 0, i32 0 + %2 = load %Qubit*, %Qubit** %1 + %3 = bitcast %Tuple* %capture-tuple to { %Callable*, i64 }* + %4 = getelementptr inbounds { %Callable*, i64 }, { %Callable*, i64 }* %3, i32 0, i32 1 + %5 = load i64, i64* %4 + %6 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Qubit*, i64 }* getelementptr ({ %Qubit*, i64 }, { %Qubit*, i64 }* null, i32 1) to i64)) + %7 = bitcast %Tuple* %6 to { %Qubit*, i64 }* + %8 = getelementptr inbounds { %Qubit*, i64 }, { %Qubit*, i64 }* %7, i32 0, i32 0 + %9 = getelementptr inbounds { %Qubit*, i64 }, { %Qubit*, i64 }* %7, i32 0, i32 1 + store %Qubit* %2, %Qubit** %8 + store i64 %5, i64* %9 + %10 = getelementptr inbounds { %Callable*, i64 }, { %Callable*, i64 }* %3, i32 0, i32 0 + %11 = load %Callable*, %Callable** %10 + %12 = call %Callable* @__quantum__rt__callable_copy(%Callable* %11, i1 false) + call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %12, i64 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %12) + call void @__quantum__rt__callable_invoke(%Callable* %12, %Tuple* %6, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %12, i64 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %12, i64 -1) + ret void +} + +define void @Lifted__PartialApplication__1__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1 + %4 = load %Qubit*, %Qubit** %2 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, i64 }* + %6 = getelementptr inbounds { %Callable*, i64 }, { %Callable*, i64 }* %5, i32 0, i32 1 + %7 = load i64, i64* %6 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Qubit*, i64 }* getelementptr ({ %Qubit*, i64 }, { %Qubit*, i64 }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { %Qubit*, i64 }* + %10 = getelementptr inbounds { %Qubit*, i64 }, { %Qubit*, i64 }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { %Qubit*, i64 }, { %Qubit*, i64 }* %9, i32 0, i32 1 + store %Qubit* %4, %Qubit** %10 + store i64 %7, i64* %11 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %13 = bitcast %Tuple* %12 to { %Array*, { %Qubit*, i64 }* }* + %14 = getelementptr inbounds { %Array*, { %Qubit*, i64 }* }, { %Array*, { %Qubit*, i64 }* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, { %Qubit*, i64 }* }, { %Array*, { %Qubit*, i64 }* }* %13, i32 0, i32 1 + store %Array* %3, %Array** %14 + store { %Qubit*, i64 }* %9, { %Qubit*, i64 }** %15 + %16 = getelementptr inbounds { %Callable*, i64 }, { %Callable*, i64 }* %5, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %18, i64 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i64 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %18, i64 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i64 -1) + ret void +} + +define void @Lifted__PartialApplication__1__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1 + %4 = load %Qubit*, %Qubit** %2 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, i64 }* + %6 = getelementptr inbounds { %Callable*, i64 }, { %Callable*, i64 }* %5, i32 0, i32 1 + %7 = load i64, i64* %6 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Qubit*, i64 }* getelementptr ({ %Qubit*, i64 }, { %Qubit*, i64 }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { %Qubit*, i64 }* + %10 = getelementptr inbounds { %Qubit*, i64 }, { %Qubit*, i64 }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { %Qubit*, i64 }, { %Qubit*, i64 }* %9, i32 0, i32 1 + store %Qubit* %4, %Qubit** %10 + store i64 %7, i64* %11 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %13 = bitcast %Tuple* %12 to { %Array*, { %Qubit*, i64 }* }* + %14 = getelementptr inbounds { %Array*, { %Qubit*, i64 }* }, { %Array*, { %Qubit*, i64 }* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, { %Qubit*, i64 }* }, { %Array*, { %Qubit*, i64 }* }* %13, i32 0, i32 1 + store %Array* %3, %Array** %14 + store { %Qubit*, i64 }* %9, { %Qubit*, i64 }** %15 + %16 = getelementptr inbounds { %Callable*, i64 }, { %Callable*, i64 }* %5, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %18, i64 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %18) + call void @__quantum__rt__callable_make_controlled(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i64 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %18, i64 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i64 -1) + ret void +} + +define void @MemoryManagement__1__RefCount(%Tuple* %capture-tuple, i64 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, i64 }* + %1 = getelementptr inbounds { %Callable*, i64 }, { %Callable*, i64 }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1 + call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %2, i64 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %2, i64 %count-change) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %capture-tuple, i64 %count-change) + ret void +} + +define void @MemoryManagement__1__AliasCount(%Tuple* %capture-tuple, i64 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, i64 }* + %1 = getelementptr inbounds { %Callable*, i64 }, { %Callable*, i64 }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1 + call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %2, i64 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i64 %count-change) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %capture-tuple, i64 %count-change) + ret void +} + +declare void @__quantum__rt__callable_memory_management(i32, %Callable*, i64) + +declare void @__quantum__rt__callable_update_alias_count(%Callable*, i64) + +declare %Callable* @__quantum__rt__callable_copy(%Callable*, i1) + +declare void @__quantum__rt__callable_make_adjoint(%Callable*) + +declare void @__quantum__rt__callable_make_controlled(%Callable*) + +declare %Qubit* @__quantum__rt__qubit_allocate() + +declare %Array* @__quantum__rt__qubit_allocate_array(i64) + +declare void @__quantum__rt__callable_invoke(%Callable*, %Tuple*, %Tuple*) + +define %Result* @Microsoft__Quantum__Intrinsic__M__body(%Qubit* %qubit) { +entry: + %bases = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 1) + %0 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %bases, i64 0) + %1 = bitcast i8* %0 to i2* + %2 = load i2, i2* @PauliZ + store i2 %2, i2* %1 + call void @__quantum__rt__array_update_alias_count(%Array* %bases, i64 1) + %qubits = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %3 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qubits, i64 0) + %4 = bitcast i8* %3 to %Qubit** + store %Qubit* %qubit, %Qubit** %4 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i64 1) + %5 = call %Result* @__quantum__qis__measure__body(%Array* %bases, %Array* %qubits) + call void @__quantum__rt__array_update_alias_count(%Array* %bases, i64 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i64 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %bases, i64 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i64 -1) + ret %Result* %5 +} + +declare i1 @__quantum__rt__result_equal(%Result*, %Result*) + +declare %String* @__quantum__rt__string_create(i32, i8*) + +declare void @__quantum__rt__qubit_release(%Qubit*) + +declare void @__quantum__rt__tuple_update_reference_count(%Tuple*, i64) + +declare void @__quantum__rt__result_update_reference_count(%Result*, i64) + +declare void @__quantum__rt__callable_update_reference_count(%Callable*, i64) + +declare void @__quantum__rt__fail(%String*) + +declare %Array* @__quantum__rt__array_create_1d(i32, i64) + +declare i8* @__quantum__rt__array_get_element_ptr_1d(%Array*, i64) + +declare void @__quantum__rt__array_update_reference_count(%Array*, i64) + +define i64 @Microsoft__Quantum__Testing__QIR__Test_Arrays__body(%Array* %array, i64 %index, i64 %val, i1 %compilerDecoy) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %array, i64 1) + %local = alloca %Array* + store %Array* %array, %Array** %local + call void @__quantum__rt__array_update_alias_count(%Array* %array, i64 1) + call void @__quantum__rt__array_update_reference_count(%Array* %array, i64 1) + call void @__quantum__rt__array_update_alias_count(%Array* %array, i64 -1) + %0 = call %Array* @__quantum__rt__array_copy(%Array* %array, i1 false) + %1 = icmp ne %Array* %array, %0 + %2 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %0, i64 %index) + %3 = bitcast i8* %2 to i64* + store i64 %val, i64* %3 + call void @__quantum__rt__array_update_reference_count(%Array* %0, i64 1) + call void @__quantum__rt__array_update_alias_count(%Array* %0, i64 1) + store %Array* %0, %Array** %local + %n = call i64 @__quantum__rt__array_get_size_1d(%Array* %0) + %4 = sub i64 %n, 1 + %5 = load %Range, %Range* @EmptyRange + %6 = insertvalue %Range %5, i64 %index, 0 + %7 = insertvalue %Range %6, i64 1, 1 + %8 = insertvalue %Range %7, i64 %4, 2 + %slice1 = call %Array* @__quantum__rt__array_slice_1d(%Array* %0, %Range %8, i1 false) + call void @__quantum__rt__array_update_alias_count(%Array* %slice1, i64 1) + %9 = load %Range, %Range* @EmptyRange + %10 = insertvalue %Range %9, i64 %index, 0 + %11 = insertvalue %Range %10, i64 -2, 1 + %12 = insertvalue %Range %11, i64 0, 2 + %slice2 = call %Array* @__quantum__rt__array_slice_1d(%Array* %0, %Range %12, i1 false) + call void @__quantum__rt__array_update_alias_count(%Array* %slice2, i64 1) + %result = call %Array* @__quantum__rt__array_concatenate(%Array* %slice2, %Array* %slice1) + call void @__quantum__rt__array_update_alias_count(%Array* %result, i64 1) + %sum = alloca i64 + store i64 0, i64* %sum + %13 = call i64 @__quantum__rt__array_get_size_1d(%Array* %result) + %14 = sub i64 %13, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %i = phi i64 [ 0, %entry ], [ %21, %exiting__1 ] + %15 = icmp sle i64 %i, %14 + br i1 %15, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %16 = load i64, i64* %sum + %17 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %result, i64 %i) + %18 = bitcast i8* %17 to i64* + %19 = load i64, i64* %18 + %20 = add i64 %16, %19 + store i64 %20, i64* %sum + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %21 = add i64 %i, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + br i1 %compilerDecoy, label %then0__1, label %continue__1 + +then0__1: ; preds = %exit__1 + call void @Microsoft__Quantum__Testing__QIR__TestControlled__body() + %res2 = call i64 @Microsoft__Quantum__Testing__QIR__TestPartials__body(i64 17, i64 42) + call void @Microsoft__Quantum__Testing__QIR__TestQubitResultManagement__body() + %res4 = call i64 @Microsoft__Quantum__Testing__QIR__Math__SqrtTest__body() + %res5 = call i64 @Microsoft__Quantum__Testing__QIR__Math__LogTest__body() + %res6 = call i64 @Microsoft__Quantum__Testing__QIR__Math__ArcTan2Test__body() + %res7 = call i64 @Microsoft__Quantum__Testing__QIR__Str__PauliToStringTest__body() + %res8 = call i64 @Microsoft__Quantum__Testing__QIR__Math__TestDrawRandomInt__body(i64 0, i64 1) + %22 = call %String* @__quantum__rt__string_create(i32 4, i8* getelementptr inbounds ([5 x i8], [5 x i8]* @7, i32 0, i32 0)) + call void @Microsoft__Quantum__Testing__QIR__Out__MessageTest__body(%String* %22) + call void @__quantum__rt__string_update_reference_count(%String* %22, i64 -1) + br label %continue__1 + +continue__1: ; preds = %then0__1, %exit__1 + %23 = load i64, i64* %sum + call void @__quantum__rt__array_update_alias_count(%Array* %array, i64 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %0, i64 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %slice1, i64 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %slice2, i64 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %result, i64 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %array, i64 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %0, i64 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %slice1, i64 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %slice2, i64 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %result, i64 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %0, i64 -1) + ret i64 %23 +} + +declare void @__quantum__rt__array_update_alias_count(%Array*, i64) + +declare %Array* @__quantum__rt__array_copy(%Array*, i1) + +declare i64 @__quantum__rt__array_get_size_1d(%Array*) + +declare %Array* @__quantum__rt__array_slice_1d(%Array*, %Range, i1) + +declare %Array* @__quantum__rt__array_concatenate(%Array*, %Array*) + +define i64 @Microsoft__Quantum__Testing__QIR__TestPartials__body(i64 %x, i64 %y) { +entry: + %0 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, i64 }* getelementptr ({ %Callable*, i64 }, { %Callable*, i64 }* null, i32 1) to i64)) + %1 = bitcast %Tuple* %0 to { %Callable*, i64 }* + %2 = getelementptr inbounds { %Callable*, i64 }, { %Callable*, i64 }* %1, i32 0, i32 0 + %3 = getelementptr inbounds { %Callable*, i64 }, { %Callable*, i64 }* %1, i32 0, i32 1 + %4 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Testing__QIR__Subtract, [2 x void (%Tuple*, i64)*]* null, %Tuple* null) + store %Callable* %4, %Callable** %2 + store i64 %x, i64* %3 + %subtractor = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__2, [2 x void (%Tuple*, i64)*]* @MemoryManagement__2, %Tuple* %0) + call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %subtractor, i64 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %subtractor, i64 1) + %5 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint (i64* getelementptr (i64, i64* null, i32 1) to i64)) + %6 = bitcast %Tuple* %5 to { i64 }* + %7 = getelementptr inbounds { i64 }, { i64 }* %6, i32 0, i32 0 + store i64 %y, i64* %7 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint (i64* getelementptr (i64, i64* null, i32 1) to i64)) + call void @__quantum__rt__callable_invoke(%Callable* %subtractor, %Tuple* %5, %Tuple* %8) + %9 = bitcast %Tuple* %8 to { i64 }* + %10 = getelementptr inbounds { i64 }, { i64 }* %9, i32 0, i32 0 + %11 = load i64, i64* %10 + call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %subtractor, i64 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %subtractor, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %subtractor, i64 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %subtractor, i64 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %5, i64 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i64 -1) + ret i64 %11 +} + +define void @Microsoft__Quantum__Testing__QIR__TestQubitResultManagement__body() { +entry: + %qs = call %Array* @__quantum__rt__qubit_allocate_array(i64 2) + call void @__quantum__rt__array_update_alias_count(%Array* %qs, i64 1) + %0 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 1) + %1 = bitcast i8* %0 to %Qubit** + %qubit = load %Qubit*, %Qubit** %1 + call void @__quantum__qis__x__body(%Qubit* %qubit) + %q = call %Qubit* @__quantum__rt__qubit_allocate() + %2 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 1) + %3 = bitcast i8* %2 to %Qubit** + %4 = load %Qubit*, %Qubit** %3 + %5 = call %Result* @Microsoft__Quantum__Intrinsic__M__body(%Qubit* %4) + %6 = load %Result*, %Result** @ResultOne + %7 = call i1 @__quantum__rt__result_equal(%Result* %5, %Result* %6) + br i1 %7, label %then0__1, label %continue__1 + +then0__1: ; preds = %entry + call void @__quantum__qis__x__body(%Qubit* %q) + br label %continue__1 + +continue__1: ; preds = %then0__1, %entry + %8 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 0) + %9 = bitcast i8* %8 to %Qubit** + %10 = load %Qubit*, %Qubit** %9 + %11 = call %Result* @Microsoft__Quantum__Intrinsic__M__body(%Qubit* %10) + %12 = call %Result* @Microsoft__Quantum__Intrinsic__M__body(%Qubit* %q) + %13 = call i1 @__quantum__rt__result_equal(%Result* %11, %Result* %12) + br i1 %13, label %then0__2, label %continue__2 + +then0__2: ; preds = %continue__1 + %14 = call %String* @__quantum__rt__string_create(i32 29, i8* getelementptr inbounds ([30 x i8], [30 x i8]* @8, i32 0, i32 0)) + call void @__quantum__rt__qubit_release(%Qubit* %q) + call void @__quantum__rt__qubit_release_array(%Array* %qs) + call void @__quantum__rt__array_update_alias_count(%Array* %qs, i64 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %5, i64 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %11, i64 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %12, i64 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qs, i64 -1) + call void @__quantum__rt__fail(%String* %14) + unreachable + +continue__2: ; preds = %continue__1 + call void @__quantum__rt__qubit_release(%Qubit* %q) + call void @__quantum__rt__result_update_reference_count(%Result* %5, i64 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %11, i64 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %12, i64 -1) + call void @__quantum__rt__qubit_release_array(%Array* %qs) + call void @__quantum__rt__array_update_alias_count(%Array* %qs, i64 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qs, i64 -1) + ret void +} + +define i64 @Microsoft__Quantum__Testing__QIR__Math__SqrtTest__body() { +entry: + %0 = call double @__quantum__qis__sqrt__body(double 4.000000e+00) + %1 = fcmp one double 2.000000e+00, %0 + br i1 %1, label %then0__1, label %continue__1 + +then0__1: ; preds = %entry + ret i64 1 + +continue__1: ; preds = %entry + %2 = call double @__quantum__qis__sqrt__body(double 9.000000e+00) + %3 = fcmp one double 3.000000e+00, %2 + br i1 %3, label %then0__2, label %continue__2 + +then0__2: ; preds = %continue__1 + ret i64 2 + +continue__2: ; preds = %continue__1 + %4 = call double @__quantum__qis__sqrt__body(double 1.000000e+02) + %5 = fcmp one double 1.000000e+01, %4 + br i1 %5, label %then0__3, label %continue__3 + +then0__3: ; preds = %continue__2 + ret i64 3 + +continue__3: ; preds = %continue__2 + %d__4 = call double @__quantum__qis__sqrt__body(double -5.000000e+00) + %6 = call i1 @__quantum__qis__isnan__body(double %d__4) + %7 = xor i1 %6, true + br i1 %7, label %then0__4, label %continue__4 + +then0__4: ; preds = %continue__3 + ret i64 4 + +continue__4: ; preds = %continue__3 + %d__5 = call double @__quantum__qis__nan__body() + %d__6 = call double @__quantum__qis__sqrt__body(double %d__5) + %8 = call i1 @__quantum__qis__isnan__body(double %d__6) + %9 = xor i1 %8, true + br i1 %9, label %then0__5, label %continue__5 + +then0__5: ; preds = %continue__4 + ret i64 5 + +continue__5: ; preds = %continue__4 + %d__7 = call double @__quantum__qis__infinity__body() + %d__8 = call double @__quantum__qis__sqrt__body(double %d__7) + %10 = call i1 @__quantum__qis__isinf__body(double %d__8) + %11 = xor i1 %10, true + br i1 %11, label %then0__6, label %continue__6 + +then0__6: ; preds = %continue__5 + ret i64 6 + +continue__6: ; preds = %continue__5 + ret i64 0 +} + +define i64 @Microsoft__Quantum__Testing__QIR__Math__LogTest__body() { +entry: + %input = call double @Microsoft__Quantum__Math__E__body() + %0 = call double @__quantum__qis__log__body(double %input) + %1 = fcmp one double 1.000000e+00, %0 + br i1 %1, label %then0__1, label %continue__1 + +then0__1: ; preds = %entry + ret i64 1 + +continue__1: ; preds = %entry + %2 = call double @Microsoft__Quantum__Math__E__body() + %3 = call double @Microsoft__Quantum__Math__E__body() + %input__1 = fmul double %2, %3 + %4 = call double @__quantum__qis__log__body(double %input__1) + %5 = fcmp one double 2.000000e+00, %4 + br i1 %5, label %then0__2, label %continue__2 + +then0__2: ; preds = %continue__1 + ret i64 2 + +continue__2: ; preds = %continue__1 + %d = call double @__quantum__qis__log__body(double 0.000000e+00) + %6 = call i1 @__quantum__qis__isnegativeinfinity__body(double %d) + %7 = xor i1 %6, true + br i1 %7, label %then0__3, label %continue__3 + +then0__3: ; preds = %continue__2 + ret i64 3 + +continue__3: ; preds = %continue__2 + %d__1 = call double @__quantum__qis__log__body(double -5.000000e+00) + %8 = call i1 @__quantum__qis__isnan__body(double %d__1) + %9 = xor i1 %8, true + br i1 %9, label %then0__4, label %continue__4 + +then0__4: ; preds = %continue__3 + ret i64 4 + +continue__4: ; preds = %continue__3 + %input__4 = call double @__quantum__qis__nan__body() + %d__2 = call double @__quantum__qis__log__body(double %input__4) + %10 = call i1 @__quantum__qis__isnan__body(double %d__2) + %11 = xor i1 %10, true + br i1 %11, label %then0__5, label %continue__5 + +then0__5: ; preds = %continue__4 + ret i64 5 + +continue__5: ; preds = %continue__4 + %input__5 = call double @__quantum__qis__infinity__body() + %d__3 = call double @__quantum__qis__log__body(double %input__5) + %12 = call i1 @__quantum__qis__isinf__body(double %d__3) + %13 = xor i1 %12, true + br i1 %13, label %then0__6, label %continue__6 + +then0__6: ; preds = %continue__5 + ret i64 6 + +continue__6: ; preds = %continue__5 + ret i64 0 +} + +define i64 @Microsoft__Quantum__Testing__QIR__Math__ArcTan2Test__body() { +entry: + %0 = call double @__quantum__qis__arctan2__body(double 0.000000e+00, double 1.000000e+00) + %1 = fcmp one double 0.000000e+00, %0 + br i1 %1, label %then0__1, label %continue__1 + +then0__1: ; preds = %entry + ret i64 1 + +continue__1: ; preds = %entry + %2 = call double @Microsoft__Quantum__Math__PI__body() + %3 = call double @__quantum__qis__arctan2__body(double 0.000000e+00, double -1.000000e+00) + %4 = fcmp one double %2, %3 + br i1 %4, label %then0__2, label %continue__2 + +then0__2: ; preds = %continue__1 + ret i64 2 + +continue__2: ; preds = %continue__1 + %5 = call double @Microsoft__Quantum__Math__PI__body() + %6 = fdiv double %5, 2.000000e+00 + %7 = call double @__quantum__qis__arctan2__body(double 1.000000e+00, double 0.000000e+00) + %8 = fcmp one double %6, %7 + br i1 %8, label %then0__3, label %continue__3 + +then0__3: ; preds = %continue__2 + ret i64 3 + +continue__3: ; preds = %continue__2 + %9 = call double @Microsoft__Quantum__Math__PI__body() + %10 = fneg double %9 + %11 = fdiv double %10, 2.000000e+00 + %12 = call double @__quantum__qis__arctan2__body(double -1.000000e+00, double 0.000000e+00) + %13 = fcmp one double %11, %12 + br i1 %13, label %then0__4, label %continue__4 + +then0__4: ; preds = %continue__3 + ret i64 4 + +continue__4: ; preds = %continue__3 + %14 = call double @Microsoft__Quantum__Math__PI__body() + %15 = fdiv double %14, 4.000000e+00 + %16 = call double @__quantum__qis__arctan2__body(double 1.000000e+00, double 1.000000e+00) + %17 = fcmp one double %15, %16 + br i1 %17, label %then0__5, label %continue__5 + +then0__5: ; preds = %continue__4 + ret i64 5 + +continue__5: ; preds = %continue__4 + %18 = call double @Microsoft__Quantum__Math__PI__body() + %19 = fmul double %18, 3.000000e+00 + %20 = fdiv double %19, 4.000000e+00 + %21 = call double @__quantum__qis__arctan2__body(double 1.000000e+00, double -1.000000e+00) + %22 = fcmp one double %20, %21 + br i1 %22, label %then0__6, label %continue__6 + +then0__6: ; preds = %continue__5 + ret i64 6 + +continue__6: ; preds = %continue__5 + %23 = call double @Microsoft__Quantum__Math__PI__body() + %24 = fneg double %23 + %25 = fmul double %24, 3.000000e+00 + %26 = fdiv double %25, 4.000000e+00 + %27 = call double @__quantum__qis__arctan2__body(double -1.000000e+00, double -1.000000e+00) + %28 = fcmp one double %26, %27 + br i1 %28, label %then0__7, label %continue__7 + +then0__7: ; preds = %continue__6 + ret i64 7 + +continue__7: ; preds = %continue__6 + %29 = call double @Microsoft__Quantum__Math__PI__body() + %30 = fneg double %29 + %31 = fdiv double %30, 4.000000e+00 + %32 = call double @__quantum__qis__arctan2__body(double -1.000000e+00, double 1.000000e+00) + %33 = fcmp one double %31, %32 + br i1 %33, label %then0__8, label %continue__8 + +then0__8: ; preds = %continue__7 + ret i64 8 + +continue__8: ; preds = %continue__7 + %34 = call double @__quantum__qis__arctan2__body(double 0.000000e+00, double 0.000000e+00) + %35 = fcmp one double 0.000000e+00, %34 + br i1 %35, label %then0__9, label %continue__9 + +then0__9: ; preds = %continue__8 + ret i64 9 + +continue__9: ; preds = %continue__8 + %y__9 = call double @__quantum__qis__nan__body() + %d = call double @__quantum__qis__arctan2__body(double %y__9, double 0.000000e+00) + %36 = call i1 @__quantum__qis__isnan__body(double %d) + %37 = xor i1 %36, true + br i1 %37, label %then0__10, label %continue__10 + +then0__10: ; preds = %continue__9 + ret i64 11 + +continue__10: ; preds = %continue__9 + %x__10 = call double @__quantum__qis__nan__body() + %d__1 = call double @__quantum__qis__arctan2__body(double 0.000000e+00, double %x__10) + %38 = call i1 @__quantum__qis__isnan__body(double %d__1) + %39 = xor i1 %38, true + br i1 %39, label %then0__11, label %continue__11 + +then0__11: ; preds = %continue__10 + ret i64 12 + +continue__11: ; preds = %continue__10 + %y__11 = call double @__quantum__qis__nan__body() + %x__11 = call double @__quantum__qis__nan__body() + %d__2 = call double @__quantum__qis__arctan2__body(double %y__11, double %x__11) + %40 = call i1 @__quantum__qis__isnan__body(double %d__2) + %41 = xor i1 %40, true + br i1 %41, label %then0__12, label %continue__12 + +then0__12: ; preds = %continue__11 + ret i64 13 + +continue__12: ; preds = %continue__11 + ret i64 0 +} + +define i64 @Microsoft__Quantum__Testing__QIR__Str__PauliToStringTest__body() { +entry: + %0 = call %String* @__quantum__rt__string_create(i32 19, i8* getelementptr inbounds ([20 x i8], [20 x i8]* @9, i32 0, i32 0)) + %1 = call %String* @__quantum__rt__string_create(i32 13, i8* getelementptr inbounds ([14 x i8], [14 x i8]* @10, i32 0, i32 0)) + %2 = load i2, i2* @PauliI + %3 = call %String* @__quantum__rt__pauli_to_string(i2 %2) + %4 = call %String* @__quantum__rt__string_concatenate(%String* %1, %String* %3) + call void @__quantum__rt__string_update_reference_count(%String* %1, i64 -1) + call void @__quantum__rt__string_update_reference_count(%String* %3, i64 -1) + %5 = call i1 @__quantum__rt__string_equal(%String* %0, %String* %4) + %6 = xor i1 %5, true + br i1 %6, label %then0__1, label %continue__1 + +then0__1: ; preds = %entry + call void @__quantum__rt__string_update_reference_count(%String* %0, i64 -1) + call void @__quantum__rt__string_update_reference_count(%String* %4, i64 -1) + ret i64 1 + +continue__1: ; preds = %entry + %7 = call %String* @__quantum__rt__string_create(i32 6, i8* getelementptr inbounds ([7 x i8], [7 x i8]* @11, i32 0, i32 0)) + %8 = load i2, i2* @PauliX + %9 = call %String* @__quantum__rt__pauli_to_string(i2 %8) + %10 = call i1 @__quantum__rt__string_equal(%String* %7, %String* %9) + %11 = xor i1 %10, true + br i1 %11, label %then0__2, label %continue__2 + +then0__2: ; preds = %continue__1 + call void @__quantum__rt__string_update_reference_count(%String* %0, i64 -1) + call void @__quantum__rt__string_update_reference_count(%String* %4, i64 -1) + call void @__quantum__rt__string_update_reference_count(%String* %7, i64 -1) + call void @__quantum__rt__string_update_reference_count(%String* %9, i64 -1) + ret i64 2 + +continue__2: ; preds = %continue__1 + %12 = call %String* @__quantum__rt__string_create(i32 6, i8* getelementptr inbounds ([7 x i8], [7 x i8]* @12, i32 0, i32 0)) + %13 = load i2, i2* @PauliY + %14 = call %String* @__quantum__rt__pauli_to_string(i2 %13) + %15 = call i1 @__quantum__rt__string_equal(%String* %12, %String* %14) + %16 = xor i1 %15, true + br i1 %16, label %then0__3, label %continue__3 + +then0__3: ; preds = %continue__2 + call void @__quantum__rt__string_update_reference_count(%String* %0, i64 -1) + call void @__quantum__rt__string_update_reference_count(%String* %4, i64 -1) + call void @__quantum__rt__string_update_reference_count(%String* %7, i64 -1) + call void @__quantum__rt__string_update_reference_count(%String* %9, i64 -1) + call void @__quantum__rt__string_update_reference_count(%String* %12, i64 -1) + call void @__quantum__rt__string_update_reference_count(%String* %14, i64 -1) + ret i64 3 + +continue__3: ; preds = %continue__2 + %17 = call %String* @__quantum__rt__string_create(i32 6, i8* getelementptr inbounds ([7 x i8], [7 x i8]* @13, i32 0, i32 0)) + %18 = load i2, i2* @PauliZ + %19 = call %String* @__quantum__rt__pauli_to_string(i2 %18) + %20 = call i1 @__quantum__rt__string_equal(%String* %17, %String* %19) + %21 = xor i1 %20, true + br i1 %21, label %then0__4, label %continue__4 + +then0__4: ; preds = %continue__3 + call void @__quantum__rt__string_update_reference_count(%String* %0, i64 -1) + call void @__quantum__rt__string_update_reference_count(%String* %4, i64 -1) + call void @__quantum__rt__string_update_reference_count(%String* %7, i64 -1) + call void @__quantum__rt__string_update_reference_count(%String* %9, i64 -1) + call void @__quantum__rt__string_update_reference_count(%String* %12, i64 -1) + call void @__quantum__rt__string_update_reference_count(%String* %14, i64 -1) + call void @__quantum__rt__string_update_reference_count(%String* %17, i64 -1) + call void @__quantum__rt__string_update_reference_count(%String* %19, i64 -1) + ret i64 4 + +continue__4: ; preds = %continue__3 + call void @__quantum__rt__string_update_reference_count(%String* %0, i64 -1) + call void @__quantum__rt__string_update_reference_count(%String* %4, i64 -1) + call void @__quantum__rt__string_update_reference_count(%String* %7, i64 -1) + call void @__quantum__rt__string_update_reference_count(%String* %9, i64 -1) + call void @__quantum__rt__string_update_reference_count(%String* %12, i64 -1) + call void @__quantum__rt__string_update_reference_count(%String* %14, i64 -1) + call void @__quantum__rt__string_update_reference_count(%String* %17, i64 -1) + call void @__quantum__rt__string_update_reference_count(%String* %19, i64 -1) + ret i64 0 +} + +define i64 @Microsoft__Quantum__Testing__QIR__Math__TestDrawRandomInt__body(i64 %min, i64 %max) { +entry: + %0 = call i64 @__quantum__qis__drawrandomint__body(i64 %min, i64 %max) + ret i64 %0 +} + +define void @Microsoft__Quantum__Testing__QIR__Out__MessageTest__body(%String* %msg) { +entry: + call void @__quantum__qis__message__body(%String* %msg) + ret void +} + +declare void @__quantum__rt__string_update_reference_count(%String*, i64) + +define void @Microsoft__Quantum__Testing__QIR__Qop__body(%Qubit* %q, i64 %n) { +entry: + %0 = srem i64 %n, 2 + %1 = icmp eq i64 %0, 1 + br i1 %1, label %then0__1, label %continue__1 + +then0__1: ; preds = %entry + call void @__quantum__qis__k__body(%Qubit* %q) + br label %continue__1 + +continue__1: ; preds = %then0__1, %entry + ret void +} + +declare void @__quantum__qis__k__body(%Qubit*) + +define void @Microsoft__Quantum__Testing__QIR__Qop__adj(%Qubit* %q, i64 %n) { +entry: + call void @Microsoft__Quantum__Testing__QIR__Qop__body(%Qubit* %q, i64 %n) + ret void +} + +define void @Microsoft__Quantum__Testing__QIR__Qop__ctl(%Array* %ctrls, { %Qubit*, i64 }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %ctrls, i64 1) + %1 = getelementptr inbounds { %Qubit*, i64 }, { %Qubit*, i64 }* %0, i32 0, i32 0 + %q = load %Qubit*, %Qubit** %1 + %2 = getelementptr inbounds { %Qubit*, i64 }, { %Qubit*, i64 }* %0, i32 0, i32 1 + %n = load i64, i64* %2 + %3 = srem i64 %n, 2 + %4 = icmp eq i64 %3, 1 + br i1 %4, label %then0__1, label %continue__1 + +then0__1: ; preds = %entry + call void @__quantum__rt__array_update_alias_count(%Array* %ctrls, i64 1) + call void @__quantum__qis__k__ctl(%Array* %ctrls, %Qubit* %q) + call void @__quantum__rt__array_update_alias_count(%Array* %ctrls, i64 -1) + br label %continue__1 + +continue__1: ; preds = %then0__1, %entry + call void @__quantum__rt__array_update_alias_count(%Array* %ctrls, i64 -1) + ret void +} + +declare void @__quantum__qis__k__ctl(%Array*, %Qubit*) + +define void @Microsoft__Quantum__Testing__QIR__Qop__ctladj(%Array* %__controlQubits__, { %Qubit*, i64 }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i64 1) + %1 = getelementptr inbounds { %Qubit*, i64 }, { %Qubit*, i64 }* %0, i32 0, i32 0 + %q = load %Qubit*, %Qubit** %1 + %2 = getelementptr inbounds { %Qubit*, i64 }, { %Qubit*, i64 }* %0, i32 0, i32 1 + %n = load i64, i64* %2 + %3 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Qubit*, i64 }* getelementptr ({ %Qubit*, i64 }, { %Qubit*, i64 }* null, i32 1) to i64)) + %4 = bitcast %Tuple* %3 to { %Qubit*, i64 }* + %5 = getelementptr inbounds { %Qubit*, i64 }, { %Qubit*, i64 }* %4, i32 0, i32 0 + %6 = getelementptr inbounds { %Qubit*, i64 }, { %Qubit*, i64 }* %4, i32 0, i32 1 + store %Qubit* %q, %Qubit** %5 + store i64 %n, i64* %6 + call void @Microsoft__Quantum__Testing__QIR__Qop__ctl(%Array* %__controlQubits__, { %Qubit*, i64 }* %4) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i64 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i64 -1) + ret void +} + +define i64 @Microsoft__Quantum__Testing__QIR__Subtract__body(i64 %from, i64 %what) { +entry: + %0 = sub i64 %from, %what + ret i64 %0 +} + +declare void @__quantum__qis__x__body(%Qubit*) + +declare void @__quantum__rt__qubit_release_array(%Array*) + +define void @Microsoft__Quantum__Testing__QIR__Subtract__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { i64, i64 }* + %1 = getelementptr inbounds { i64, i64 }, { i64, i64 }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { i64, i64 }, { i64, i64 }* %0, i32 0, i32 1 + %3 = load i64, i64* %1 + %4 = load i64, i64* %2 + %5 = call i64 @Microsoft__Quantum__Testing__QIR__Subtract__body(i64 %3, i64 %4) + %6 = bitcast %Tuple* %result-tuple to { i64 }* + %7 = getelementptr inbounds { i64 }, { i64 }* %6, i32 0, i32 0 + store i64 %5, i64* %7 + ret void +} + +define void @Lifted__PartialApplication__2__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, i64 }* + %1 = getelementptr inbounds { %Callable*, i64 }, { %Callable*, i64 }* %0, i32 0, i32 1 + %2 = load i64, i64* %1 + %3 = bitcast %Tuple* %arg-tuple to { i64 }* + %4 = getelementptr inbounds { i64 }, { i64 }* %3, i32 0, i32 0 + %5 = load i64, i64* %4 + %6 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i64* getelementptr (i64, i64* null, i32 1) to i64), i64 2)) + %7 = bitcast %Tuple* %6 to { i64, i64 }* + %8 = getelementptr inbounds { i64, i64 }, { i64, i64 }* %7, i32 0, i32 0 + %9 = getelementptr inbounds { i64, i64 }, { i64, i64 }* %7, i32 0, i32 1 + store i64 %2, i64* %8 + store i64 %5, i64* %9 + %10 = getelementptr inbounds { %Callable*, i64 }, { %Callable*, i64 }* %0, i32 0, i32 0 + %11 = load %Callable*, %Callable** %10 + call void @__quantum__rt__callable_invoke(%Callable* %11, %Tuple* %6, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i64 -1) + ret void +} + +define void @MemoryManagement__2__RefCount(%Tuple* %capture-tuple, i64 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, i64 }* + %1 = getelementptr inbounds { %Callable*, i64 }, { %Callable*, i64 }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1 + call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %2, i64 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %2, i64 %count-change) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %capture-tuple, i64 %count-change) + ret void +} + +define void @MemoryManagement__2__AliasCount(%Tuple* %capture-tuple, i64 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, i64 }* + %1 = getelementptr inbounds { %Callable*, i64 }, { %Callable*, i64 }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1 + call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %2, i64 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i64 %count-change) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %capture-tuple, i64 %count-change) + ret void +} + +define void @Microsoft__Quantum__Intrinsic__Message__body(%String* %msg) { +entry: + call void @__quantum__qis__message__body(%String* %msg) + ret void +} + +declare void @__quantum__qis__message__body(%String*) + +define void @Microsoft__Quantum__Intrinsic__K__body(%Qubit* %q) { +entry: + call void @__quantum__qis__k__body(%Qubit* %q) + ret void +} + +define void @Microsoft__Quantum__Intrinsic__K__adj(%Qubit* %q) { +entry: + call void @__quantum__qis__k__body(%Qubit* %q) + ret void +} + +define void @Microsoft__Quantum__Intrinsic__K__ctl(%Array* %__controlQubits__, %Qubit* %q) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i64 1) + call void @__quantum__qis__k__ctl(%Array* %__controlQubits__, %Qubit* %q) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i64 -1) + ret void +} + +define void @Microsoft__Quantum__Intrinsic__K__ctladj(%Array* %__controlQubits__, %Qubit* %q) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i64 1) + call void @__quantum__qis__k__ctl(%Array* %__controlQubits__, %Qubit* %q) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i64 -1) + ret void +} + +define i1 @Microsoft__Quantum__Intrinsic__IsNegativeInfinity__body(double %d) { +entry: + %0 = call i1 @__quantum__qis__isnegativeinfinity__body(double %d) + ret i1 %0 +} + +declare i1 @__quantum__qis__isnegativeinfinity__body(double) + +define %Result* @Microsoft__Quantum__Intrinsic__Measure__body(%Array* %bases, %Array* %qubits) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %bases, i64 1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i64 1) + %0 = call %Result* @__quantum__qis__measure__body(%Array* %bases, %Array* %qubits) + call void @__quantum__rt__array_update_alias_count(%Array* %bases, i64 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i64 -1) + ret %Result* %0 +} + +declare %Result* @__quantum__qis__measure__body(%Array*, %Array*) + +define i1 @Microsoft__Quantum__Intrinsic__IsNan__body(double %d) { +entry: + %0 = call i1 @__quantum__qis__isnan__body(double %d) + ret i1 %0 +} + +declare i1 @__quantum__qis__isnan__body(double) + +define double @Microsoft__Quantum__Intrinsic__NAN__body() { +entry: + %0 = call double @__quantum__qis__nan__body() + ret double %0 +} + +declare double @__quantum__qis__nan__body() + +define i1 @Microsoft__Quantum__Intrinsic__IsInf__body(double %d) { +entry: + %0 = call i1 @__quantum__qis__isinf__body(double %d) + ret i1 %0 +} + +declare i1 @__quantum__qis__isinf__body(double) + +define double @Microsoft__Quantum__Intrinsic__INFINITY__body() { +entry: + %0 = call double @__quantum__qis__infinity__body() + ret double %0 +} + +declare double @__quantum__qis__infinity__body() + +define void @Microsoft__Quantum__Intrinsic__X__body(%Qubit* %qubit) { +entry: + call void @__quantum__qis__x__body(%Qubit* %qubit) + ret void +} + +define void @Microsoft__Quantum__Intrinsic__X__adj(%Qubit* %qubit) { +entry: + call void @__quantum__qis__x__body(%Qubit* %qubit) + ret void +} + +define void @Microsoft__Quantum__Intrinsic__X__ctl(%Array* %__controlQubits__, %Qubit* %qubit) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i64 1) + call void @__quantum__qis__x__ctl(%Array* %__controlQubits__, %Qubit* %qubit) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i64 -1) + ret void +} + +declare void @__quantum__qis__x__ctl(%Array*, %Qubit*) + +define void @Microsoft__Quantum__Intrinsic__X__ctladj(%Array* %__controlQubits__, %Qubit* %qubit) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i64 1) + call void @__quantum__qis__x__ctl(%Array* %__controlQubits__, %Qubit* %qubit) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i64 -1) + ret void +} + +declare %String* @__quantum__rt__pauli_to_string(i2) + +declare %String* @__quantum__rt__string_concatenate(%String*, %String*) + +declare i1 @__quantum__rt__string_equal(%String*, %String*) + +declare i64 @__quantum__qis__drawrandomint__body(i64, i64) + +define double @Microsoft__Quantum__Math__E__body() { +entry: + ret double 0x4005BF0A8B145769 +} + +declare double @__quantum__qis__log__body(double) + +declare double @__quantum__qis__arctan2__body(double, double) + +define double @Microsoft__Quantum__Math__PI__body() { +entry: + ret double 0x400921FB54442D18 +} + +declare double @__quantum__qis__sqrt__body(double) + +define double @Microsoft__Quantum__Math__Sqrt__body(double %d) { +entry: + %0 = call double @__quantum__qis__sqrt__body(double %d) + ret double %0 +} + +define double @Microsoft__Quantum__Math__Log__body(double %input) { +entry: + %0 = call double @__quantum__qis__log__body(double %input) + ret double %0 +} + +define double @Microsoft__Quantum__Math__ArcTan2__body(double %y, double %x) { +entry: + %0 = call double @__quantum__qis__arctan2__body(double %y, double %x) + ret double %0 +} + +define i64 @Microsoft__Quantum__Random__DrawRandomInt__body(i64 %min, i64 %max) { +entry: + %0 = call i64 @__quantum__qis__drawrandomint__body(i64 %min, i64 %max) + ret i64 %0 +} + +define i64 @Microsoft__Quantum__Testing__QIR__Test_Arrays(i64 %array__count, i64* %array, i64 %index, i64 %val, i1 %compilerDecoy) #0 { +entry: + %0 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 %array__count) + %1 = icmp sgt i64 %array__count, 0 + br i1 %1, label %copy, label %next + +copy: ; preds = %entry + %2 = ptrtoint i64* %array to i64 + %3 = sub i64 %array__count, 1 + br label %header__1 + +next: ; preds = %exit__1, %entry + %4 = call i64 @Microsoft__Quantum__Testing__QIR__Test_Arrays__body(%Array* %0, i64 %index, i64 %val, i1 %compilerDecoy) + call void @__quantum__rt__array_update_reference_count(%Array* %0, i64 -1) + ret i64 %4 + +header__1: ; preds = %exiting__1, %copy + %5 = phi i64 [ 0, %copy ], [ %13, %exiting__1 ] + %6 = icmp sle i64 %5, %3 + br i1 %6, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %7 = mul i64 %5, 8 + %8 = add i64 %2, %7 + %9 = inttoptr i64 %8 to i64* + %10 = load i64, i64* %9 + %11 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %0, i64 %5) + %12 = bitcast i8* %11 to i64* + store i64 %10, i64* %12 + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %13 = add i64 %5, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + br label %next +} + +declare void @__quantum__rt__tuple_update_alias_count(%Tuple*, i64) + +attributes #0 = { "EntryPoint" } diff --git a/src/QirRuntime/test/QIR-static/qir-test-math.cpp b/src/QirRuntime/test/QIR-static/qir-test-math.cpp index 27ccea024f0..f1007acd73e 100644 --- a/src/QirRuntime/test/QIR-static/qir-test-math.cpp +++ b/src/QirRuntime/test/QIR-static/qir-test-math.cpp @@ -1,12 +1,17 @@ // Copyright (c) Microsoft Corporation. // Licensed under the MIT License. #include +#include +#include #include "catch.hpp" -extern "C" uint64_t Microsoft__Quantum__Testing__QIR__Math__SqrtTest__body(); // NOLINT -extern "C" uint64_t Microsoft__Quantum__Testing__QIR__Math__LogTest__body(); // NOLINT -extern "C" uint64_t Microsoft__Quantum__Testing__QIR__Math__ArcTan2Test__body(); // NOLINT +#include "quantum__qis_internal.hpp" + +extern "C" uint64_t Microsoft__Quantum__Testing__QIR__Math__SqrtTest__body(); // NOLINT +extern "C" uint64_t Microsoft__Quantum__Testing__QIR__Math__LogTest__body(); // NOLINT +extern "C" uint64_t Microsoft__Quantum__Testing__QIR__Math__ArcTan2Test__body(); // NOLINT +extern "C" uint64_t Microsoft__Quantum__Testing__QIR__Math__TestDrawRandomInt__body(int64_t min, int64_t max); // NOLINT TEST_CASE("QIR: Math.Sqrt", "[qir.math][qir.Math.Sqrt]") { @@ -23,3 +28,116 @@ TEST_CASE("QIR: Math.ArcTan2", "[qir.math][qir.Math.ArcTan2]") REQUIRE(0 == Microsoft__Quantum__Testing__QIR__Math__ArcTan2Test__body()); } +TEST_CASE("QIR: Math.DrawRandomInt", "[qir.math][qir.Math.DrawRandomInt]") +{ + // Test that the Q# random number generator is a wrapper around the C++ generator: + size_t times = 1000; + while(--times) + { + const uint64_t qsRndNum = + Microsoft__Quantum__Testing__QIR__Math__TestDrawRandomInt__body(std::numeric_limits::min(), + std::numeric_limits::max()); + const uint64_t cppRndNum = Quantum::Qis::Internal::GetLastGeneratedRandomNumber(); // This call must be done + // _after_ the Microsoft__Quantum__Testing__QIR__Math__TestDrawRandomInt__body(). + REQUIRE(qsRndNum == cppRndNum); + } + + // Make sure the correct exception is thrown if min > max: + REQUIRE_THROWS_AS(Microsoft__Quantum__Testing__QIR__Math__TestDrawRandomInt__body(10, 5), std::runtime_error); + + // Check the exception string: + try + { + (void)Microsoft__Quantum__Testing__QIR__Math__TestDrawRandomInt__body(10, 5); + } + catch (std::runtime_error const& exc) + { + REQUIRE(0 == strcmp(exc.what(), Quantum::Qis::Internal::excStrDrawRandomInt)); + } + + // There is a strong difference in the opinions about how the random number generator must be tested. + // More or less agreed-upon items are: + // * The test must be 100% deterministic, i.e. must not fail, even with a very low probability. + // Otherwise it will bother the unrelated CI builds/test-runs, that are done a lot. + // * The test must not be platform-, compiler-, etc. dependent. + // Otherwise it can break upon migration to a newer compiler, OS update, new OS added to the test runs, etc. + // + // The code below is platform-dependent, can also depend on the compiler version. + // Commenting out for now. + + // #ifdef __APPLE__ + // std::vector expectedLargeNumbers( { + // -2160833387943730151 /*0xe2032d7b74cf6419*/, 7375078072468444798 /*0x66598a6e9c41167e*/, 7708428399011769513 /*0x6af9d6c9b3a12ca9*/, + // -8929332642100591101 /*0x8414a3458a4b4603*/, 9131959130339861073 /*0x7ebb3c72234ae251*/, 2129461186021157660 /*0x1d8d5daa93c5eb1c*/, + // -4466415676527644493 /*0xc2041a9b355570b3*/, 2654403080104352464 /*0x24d65589a8529ad0*/, 3948910203829515833 /*0x36cd58e87d2c7639*/, + // 3600951923571138577 /*0x31f926b221b7a011*/, -7454003569285620820 /*0x988e0f3f2a3c9bac*/, -2896776822558058671 /*0xd7cc94d3e1d79751*/, + // -2510694579170103717 /*0xdd2838951d112e5b*/, -8679035075952589054 /*0x878ddf94f8b0c702*/, -8480296875123573728 /*0x8a4feeec30677c20*/, + + // -8613430109842542716 /*0x8876f2f3752e6f84*/, 2140032717197149199 /*0x1db2ec6afc40040f*/, -917262003397267527 /*0xf3453b11598aefb9*/, + // -3734430349428794203 /*0xcc2ca3620fdbdca5*/, 5134567830016493736 /*0x4741a63cbf4808a8*/, -8243723698983337761 /*0x8d9868f90fa100df*/, + // 5560736588152128922 /*0x4d2bb4770253459a*/, 50526560201835791 /* 0xb381a3888d850f*/, 1288735234894005209 /*0x11e281de3d6303d9*/, + // 3656101241126025060 /*0x32bd14b53c2e7764*/, 872395409727236160 /* 0xc1b5f00c4792840*/, 7628415731883617240 /*0x69dd93b0e9ecabd8*/, + // -1986081594003691539 /*0xe47005501e77e7ed*/, 7532118334194327900 /*0x688775b7d3dc215c*/, -4186893097968929306 /*0xc5e52ae91706f5e6*/ + // } ); + // std::vector expectedSmallNumbers( { 1, 4, 2, 4, 5, -2, -4, 9, 4, -4, -9, 9, -1, 5, 7, + // -8, 0, 2, 5, 0, -1, 1, 9, -3, 5, -8, -9, 6, -1, 6 } ); + // #else + // std::vector expectedLargeNumbers( { + // -5906760355100746824 /*0xae06f8c09cdc1bb8*/, -5720189720460620649 /*0xb09dcdc1901a8c97*/, -439612500227010677 /*0xf9e62ec29d25cf8b*/, + // -4480907261563067469 /*0xc1d09e962310a3b3*/, 8861952245290091527 /*0x7afbfa9d4cfdc407*/, 8955350353842143311 /*0x7c47cbb307ee004f*/, + // -6280323296958344769 /*0xa8d7cf3c6a3011bf*/, 3137151747734999458 /*0x2b8966e4aa3d91a2*/, 4939508655077151009 /*0x448ca8f37ed75121*/, + // 6238374286314258160 /*0x5693285c6fb13ef0*/, -6040247118112373857 /*0xac2cbb3fa955c39f*/, -6824740380414679031 /*0xa149a6c8751e6809*/, + // -3380739839894412592 /*0xd11533070d1522d0*/, 7062538648911045657 /*0x62032d7b74cf6419*/, -1848293964386331010 /*0xe6598a6e9c41167e*/, + + // -1514943637843006295 /*0xeaf9d6c9b3a12ca9*/, 294039394754184707 /* 0x414a3458a4b4603*/, -91412906514914735 /*0xfebb3c72234ae251*/, + // -7093910850833618148 /*0x9d8d5daa93c5eb1c*/, 4756956360327131315 /*0x42041a9b355570b3*/, -6568968956750423344 /*0xa4d65589a8529ad0*/, + // -5274461833025259975 /*0xb6cd58e87d2c7639*/, -5622420113283637231 /*0xb1f926b221b7a011*/, 1769368467569154988 /*0x188e0f3f2a3c9bac*/, + // 6326595214296717137 /*0x57cc94d3e1d79751*/, 6712677457684672091 /*0x5d2838951d112e5b*/, 544336960902186754 /* 0x78ddf94f8b0c702*/, + // 743075161731202080 /* 0xa4feeec30677c20*/, 609941927012233092 /* 0x876f2f3752e6f84*/, -7083339319657626609 /*0x9db2ec6afc40040f*/ + // } ); + // #ifdef _WIN32 + // std::vector expectedSmallNumbers( { -7, 7, 0, -4, 9, -8, -6, 2, 10, -2, -2, 5, -6, 7, -6, + // -8, -7, 7, 1, 0, -7, -4, -4, -5, 9, 6, 9, 8, 0, 10 } ); + // #else + // std::vector expectedSmallNumbers( { -7, 10, -10, 2, 1, -9, 3, -2, 7, 9, -2, 3, 9, -3, -5, + // -1, 6, 4, 1, -4, -7, 2, 1, -7, -7, -10, 1, 4, -2, 9 } ); + // #endif // #ifdef _WIN32 + // #endif // #ifdef __APPLE__ + + // // Use const seed (and 100%-predictable sequence of pseudo-random numbers): + // Quantum::Qis::Internal::RandomizeSeed(false); + + // size_t times = 30; + // std::vector actualNumbers; + // // Get the actual pseudo-random numbers: + // actualNumbers.reserve(times); + // while (times--) + // { + // actualNumbers.emplace_back(Microsoft__Quantum__Testing__QIR__Math__TestDrawRandomInt__body(-10, 10)); + // } + + // // Compare the actual numbers with the expected ones: + // for (auto iterExp = expectedSmallNumbers.begin(), iterAct = actualNumbers.begin(); + // iterExp != expectedSmallNumbers.end(); ++iterExp, ++iterAct) + // { + // REQUIRE(*iterExp == *iterAct); + // } + + + // // Repeat for large numbers: + // times = 30; + // actualNumbers.clear(); + // while (times--) + // { + // actualNumbers.emplace_back( + // Microsoft__Quantum__Testing__QIR__Math__TestDrawRandomInt__body(std::numeric_limits::min(), + // std::numeric_limits::max())); + // } + + // for (auto iterExp = expectedLargeNumbers.begin(), iterAct = actualNumbers.begin(); + // iterExp != expectedLargeNumbers.end(); ++iterExp, ++iterAct) + // { + // REQUIRE(*iterExp == *iterAct); + // } + +} // TEST_CASE("QIR: Math.DrawRandomInt", "[qir.math][qir.Math.DrawRandomInt]") diff --git a/src/QirRuntime/test/QIR-static/qir-test-ouput.cpp b/src/QirRuntime/test/QIR-static/qir-test-ouput.cpp new file mode 100644 index 00000000000..8456be356b0 --- /dev/null +++ b/src/QirRuntime/test/QIR-static/qir-test-ouput.cpp @@ -0,0 +1,52 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. +#include +#include + +#include "catch.hpp" + +#include "qirTypes.hpp" +#include "quantum__qis_internal.hpp" + +extern "C" void Microsoft__Quantum__Testing__QIR__Out__MessageTest__body(void*); // NOLINT + + +// https://stackoverflow.com/a/5419388/6362941 +// https://github.com/microsoft/qsharp-runtime/pull/511#discussion_r574170031 +// https://github.com/microsoft/qsharp-runtime/pull/511#discussion_r574194191 +struct OstreamRedirectorScoped +{ + OstreamRedirectorScoped(std::ostream& newOstream) + : old(Quantum::Qis::Internal::SetOutputStream(newOstream)) + {} + + ~OstreamRedirectorScoped() + { + Quantum::Qis::Internal::SetOutputStream(old); + } + + private: + std::ostream& old; +}; + + +TEST_CASE("QIR: Out.Message", "[qir.Out][qir.Out.Message]") +{ + const std::string testStr1 = "Test String 1"; + const std::string testStr2 = "Test String 2"; + + std::ostringstream outStrStream; + + { + OstreamRedirectorScoped qOStreamRedirector(outStrStream); // Redirect the output from std::cout to outStrStream. + + // Log something (to the redirected output): + QirString qstr{std::string(testStr1)}; + Microsoft__Quantum__Testing__QIR__Out__MessageTest__body(&qstr); + qstr.str = testStr2; + Microsoft__Quantum__Testing__QIR__Out__MessageTest__body(&qstr); + + } // Recover the output stream. + + REQUIRE(outStrStream.str() == (testStr1 + "\n" + testStr2 + "\n")); +} diff --git a/src/QirRuntime/test/QIR-static/qsharp/Math.qs b/src/QirRuntime/test/QIR-static/qsharp/Math.qs index 12e98bd5458..23d81bcbe09 100644 --- a/src/QirRuntime/test/QIR-static/qsharp/Math.qs +++ b/src/QirRuntime/test/QIR-static/qsharp/Math.qs @@ -28,4 +28,4 @@ namespace Microsoft.Quantum.Intrinsic { body intrinsic; } -} \ No newline at end of file +} diff --git a/src/QirRuntime/test/QIR-static/qsharp/qir-test-arrays.qs b/src/QirRuntime/test/QIR-static/qsharp/qir-test-arrays.qs index f46e7a06bc4..10154ba9a02 100644 --- a/src/QirRuntime/test/QIR-static/qsharp/qir-test-arrays.qs +++ b/src/QirRuntime/test/QIR-static/qsharp/qir-test-arrays.qs @@ -1,9 +1,10 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. +// Copyright (c) Microsoft Corporation. // Licensed under the MIT License. namespace Microsoft.Quantum.Testing.QIR { open Microsoft.Quantum.Testing.QIR.Math; open Microsoft.Quantum.Testing.QIR.Str; + open Microsoft.Quantum.Testing.QIR.Out; @EntryPoint() operation Test_Arrays(array : Int[], index : Int, val : Int, compilerDecoy : Bool) : Int { @@ -40,6 +41,8 @@ namespace Microsoft.Quantum.Testing.QIR { let res5 = LogTest(); let res6 = ArcTan2Test(); let res7 = PauliToStringTest(); + let res8 = TestDrawRandomInt(0, 1); + MessageTest("Test"); } return sum; } diff --git a/src/QirRuntime/test/QIR-static/qsharp/qir-test-math.qs b/src/QirRuntime/test/QIR-static/qsharp/qir-test-math.qs index bdca70c30b9..2ad876a087c 100644 --- a/src/QirRuntime/test/QIR-static/qsharp/qir-test-math.qs +++ b/src/QirRuntime/test/QIR-static/qsharp/qir-test-math.qs @@ -5,6 +5,7 @@ namespace Microsoft.Quantum.Testing.QIR.Math { open Microsoft.Quantum.Intrinsic; open Microsoft.Quantum.Math; // E() + open Microsoft.Quantum.Random; function SqrtTest() : Int { if 2.0 != Sqrt( 4.0) { return 1; } // The return value indicates which test case has failed. @@ -60,5 +61,9 @@ namespace Microsoft.Quantum.Testing.QIR.Math { return 0; } + operation TestDrawRandomInt(min : Int, max : Int) : Int { + return DrawRandomInt(min, max); + } + } diff --git a/src/QirRuntime/test/QIR-static/qsharp/qir-test-output.qs b/src/QirRuntime/test/QIR-static/qsharp/qir-test-output.qs new file mode 100644 index 00000000000..17efad96151 --- /dev/null +++ b/src/QirRuntime/test/QIR-static/qsharp/qir-test-output.qs @@ -0,0 +1,13 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +namespace Microsoft.Quantum.Testing.QIR.Out { + + open Microsoft.Quantum.Intrinsic; + + function MessageTest(msg: String) : Unit { + Message(msg); + } + +} // namespace Microsoft.Quantum.Testing.QIR.Out + diff --git a/src/QirRuntime/test/QIR-static/qsharp/qir-test-partials.qs b/src/QirRuntime/test/QIR-static/qsharp/qir-test-partials.qs index 837f09f6286..05e63538699 100644 --- a/src/QirRuntime/test/QIR-static/qsharp/qir-test-partials.qs +++ b/src/QirRuntime/test/QIR-static/qsharp/qir-test-partials.qs @@ -1,4 +1,4 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. +// Copyright (c) Microsoft Corporation. // Licensed under the MIT License. namespace Microsoft.Quantum.Testing.QIR { From df59efb7e11fc62439c777a85d0da95db4604d68 Mon Sep 17 00:00:00 2001 From: Irina Yatsenko <36858951+irinayat-MS@users.noreply.github.com> Date: Thu, 18 Feb 2021 13:39:43 -0800 Subject: [PATCH 09/30] Cleanup qdk.dll build (#524) Cleanup qdk.dll build plus moved a few QIR headers into public and fixed includes to use headers rather than <*.h> --- src/QirRuntime/lib/CMakeLists.txt | 26 +- src/QirRuntime/lib/QIR/arrays.cpp | 8 +- src/QirRuntime/lib/QIR/callables.cpp | 8 +- src/QirRuntime/lib/QIR/context.cpp | 4 +- src/QirRuntime/lib/QIR/delegated.cpp | 24 +- src/QirRuntime/lib/QIR/intrinsics.cpp | 6 +- src/QirRuntime/lib/QIR/intrinsicsOut.cpp | 2 +- src/QirRuntime/lib/QIR/quantum__rt.hpp | 2 +- src/QirRuntime/lib/QIR/strings.cpp | 4 +- src/QirRuntime/lib/QIR/utils.cpp | 4 +- .../lib/Simulators/FullstateSimulator.cpp | 2 +- .../lib/Simulators/ToffoliSimulator.cpp | 2 +- src/QirRuntime/lib/qdk/CMakeLists.txt | 28 + src/QirRuntime/lib/qdk/qdk.cpp | 18 + .../QIR/context.hpp => public/QirContext.hpp} | 0 .../QIR/qirTypes.hpp => public/QirTypes.hpp} | 0 .../FullstateSimulatorTests.cpp | 2 +- src/QirRuntime/test/QIR-static/qir-driver.cpp | 6 +- src/QirRuntime/test/QIR-static/qir-gen.ll | 1690 ----------------- .../test/QIR-static/qir-test-ouput.cpp | 2 +- .../test/unittests/QirRuntimeTests.cpp | 6 +- 21 files changed, 82 insertions(+), 1762 deletions(-) create mode 100644 src/QirRuntime/lib/qdk/CMakeLists.txt create mode 100644 src/QirRuntime/lib/qdk/qdk.cpp rename src/QirRuntime/{lib/QIR/context.hpp => public/QirContext.hpp} (100%) rename src/QirRuntime/{lib/QIR/qirTypes.hpp => public/QirTypes.hpp} (100%) delete mode 100644 src/QirRuntime/test/QIR-static/qir-gen.ll diff --git a/src/QirRuntime/lib/CMakeLists.txt b/src/QirRuntime/lib/CMakeLists.txt index 3589d6b764e..789d0ba7503 100644 --- a/src/QirRuntime/lib/CMakeLists.txt +++ b/src/QirRuntime/lib/CMakeLists.txt @@ -1,27 +1,3 @@ add_subdirectory(QIR) add_subdirectory(Simulators) - - -#=============================================================================== -# Produce the qdk dynamic library -# -add_library(qdk SHARED - $ - $ - $ -) - -target_link_libraries(qdk - ${QIR_BRIDGE_UTILITY_LIB} - ${QIR_BRIDGE_QIS_UTILITY_LIB} - ${CMAKE_DL_LIBS} -) - -set_property(TARGET qdk PROPERTY POSITION_INDEPENDENT_CODE ON) - -install(TARGETS qdk - RUNTIME DESTINATION "${CMAKE_BINARY_DIR}/bin" - LIBRARY DESTINATION "${CMAKE_BINARY_DIR}/bin" -) - - +add_subdirectory(qdk) diff --git a/src/QirRuntime/lib/QIR/arrays.cpp b/src/QirRuntime/lib/QIR/arrays.cpp index b7cd195a8ed..04532fa55ce 100644 --- a/src/QirRuntime/lib/QIR/arrays.cpp +++ b/src/QirRuntime/lib/QIR/arrays.cpp @@ -2,17 +2,17 @@ // Licensed under the MIT License. #include -#include +#include +#include // for memcpy #include #include #include -#include // for memcpy #include #include "CoreTypes.hpp" +#include "QirContext.hpp" +#include "QirTypes.hpp" #include "allocationsTracker.hpp" -#include "context.hpp" -#include "qirTypes.hpp" #include "quantum__rt.hpp" using namespace Microsoft::Quantum; diff --git a/src/QirRuntime/lib/QIR/callables.cpp b/src/QirRuntime/lib/QIR/callables.cpp index 752f109e810..b1fb38cc51c 100644 --- a/src/QirRuntime/lib/QIR/callables.cpp +++ b/src/QirRuntime/lib/QIR/callables.cpp @@ -1,16 +1,16 @@ // Copyright (c) Microsoft Corporation. // Licensed under the MIT License. -#include +#include +#include // for memcpy #include #include #include -#include // for memcpy #include +#include "QirContext.hpp" +#include "QirTypes.hpp" #include "allocationsTracker.hpp" -#include "context.hpp" -#include "qirTypes.hpp" #include "quantum__rt.hpp" using namespace Microsoft::Quantum; diff --git a/src/QirRuntime/lib/QIR/context.cpp b/src/QirRuntime/lib/QIR/context.cpp index 531c9157ecf..5456b112fa6 100644 --- a/src/QirRuntime/lib/QIR/context.cpp +++ b/src/QirRuntime/lib/QIR/context.cpp @@ -1,9 +1,9 @@ // Copyright (c) Microsoft Corporation. // Licensed under the MIT License. -#include +#include -#include "context.hpp" +#include "QirContext.hpp" #include "CoreTypes.hpp" #include "QuantumApi_I.hpp" diff --git a/src/QirRuntime/lib/QIR/delegated.cpp b/src/QirRuntime/lib/QIR/delegated.cpp index f3d82253f3c..129e6a615ee 100644 --- a/src/QirRuntime/lib/QIR/delegated.cpp +++ b/src/QirRuntime/lib/QIR/delegated.cpp @@ -1,31 +1,19 @@ // Copyright (c) Microsoft Corporation. // Licensed under the MIT License. -/*============================================================================= - QIR assumes a single global execution context. - To support the dispatch over the qir-bridge, the clients must implement - Microsoft::Quantum::IQuantumGateSet* g_qapi; -=============================================================================*/ -#include +#include #include #include "quantum__rt.hpp" #include "QuantumApi_I.hpp" #include "SimFactory.hpp" -#include "context.hpp" -#include "qirTypes.hpp" +#include "QirContext.hpp" +#include "QirTypes.hpp" -#ifdef _WIN32 -#define EXPORTAPI extern "C" __declspec(dllexport) -#else -#define EXPORTAPI extern "C" -#endif -EXPORTAPI void SetupQirToRunOnFullStateSimulator() -{ - // Leak the simulator, because the QIR only creates one and it will exist for the duration of the session - InitializeQirContext(Microsoft::Quantum::CreateFullstateSimulator().release(), false /*trackAllocatedObjects*/); -} +/*============================================================================= + Note: QIR assumes a single global execution context! +=============================================================================*/ // QIR specification requires the Result type to be reference counted, even though Results are created by the target and // qubits, created by the same target, aren't reference counted. To minimize the implementation burden on the target, diff --git a/src/QirRuntime/lib/QIR/intrinsics.cpp b/src/QirRuntime/lib/QIR/intrinsics.cpp index d2ebb99eb2c..4e20bc4d0b5 100644 --- a/src/QirRuntime/lib/QIR/intrinsics.cpp +++ b/src/QirRuntime/lib/QIR/intrinsics.cpp @@ -6,14 +6,14 @@ To support the dispatch over the qir-bridge, the clients must register their Microsoft::Quantum::ISimulator* first. =============================================================================*/ -#include +#include #include #include "quantum__qis.hpp" #include "QuantumApi_I.hpp" -#include "context.hpp" -#include "qirTypes.hpp" +#include "QirContext.hpp" +#include "QirTypes.hpp" // Pauli consts are {i2} in QIR, likely stored as {i8} in arrays, but we are using the standard C++ enum type based on // {i32} so cannot pass through the buffer and have to allocate a new one instead and copy. diff --git a/src/QirRuntime/lib/QIR/intrinsicsOut.cpp b/src/QirRuntime/lib/QIR/intrinsicsOut.cpp index 6b46289be10..17a48cec660 100644 --- a/src/QirRuntime/lib/QIR/intrinsicsOut.cpp +++ b/src/QirRuntime/lib/QIR/intrinsicsOut.cpp @@ -3,7 +3,7 @@ #include -#include "qirTypes.hpp" +#include "QirTypes.hpp" #include "quantum__qis.hpp" // Forward declarations: diff --git a/src/QirRuntime/lib/QIR/quantum__rt.hpp b/src/QirRuntime/lib/QIR/quantum__rt.hpp index a3e26a05c8f..f0d24001f19 100644 --- a/src/QirRuntime/lib/QIR/quantum__rt.hpp +++ b/src/QirRuntime/lib/QIR/quantum__rt.hpp @@ -7,7 +7,7 @@ #include // for va_list #include "CoreTypes.hpp" -#include "qirTypes.hpp" +#include "QirTypes.hpp" struct QirArray; struct QirCallable; diff --git a/src/QirRuntime/lib/QIR/strings.cpp b/src/QirRuntime/lib/QIR/strings.cpp index 90679a5dc1f..5ecbb8400ff 100644 --- a/src/QirRuntime/lib/QIR/strings.cpp +++ b/src/QirRuntime/lib/QIR/strings.cpp @@ -1,13 +1,13 @@ // Copyright (c) Microsoft Corporation. // Licensed under the MIT License. -#include +#include #include #include #include #include -#include "qirTypes.hpp" +#include "QirTypes.hpp" #include "quantum__rt.hpp" std::unordered_map& AllocatedStrings() diff --git a/src/QirRuntime/lib/QIR/utils.cpp b/src/QirRuntime/lib/QIR/utils.cpp index 8ae3e7b98df..36195562ae3 100644 --- a/src/QirRuntime/lib/QIR/utils.cpp +++ b/src/QirRuntime/lib/QIR/utils.cpp @@ -1,7 +1,7 @@ // Copyright (c) Microsoft Corporation. // Licensed under the MIT License. -#include +#include #include #include #include @@ -9,7 +9,7 @@ #include "quantum__rt.hpp" -#include "qirTypes.hpp" +#include "QirTypes.hpp" std::unordered_set& UseMemoryTracker() { diff --git a/src/QirRuntime/lib/Simulators/FullstateSimulator.cpp b/src/QirRuntime/lib/Simulators/FullstateSimulator.cpp index d4d016da4e0..60355b78b72 100644 --- a/src/QirRuntime/lib/Simulators/FullstateSimulator.cpp +++ b/src/QirRuntime/lib/Simulators/FullstateSimulator.cpp @@ -1,7 +1,7 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. -#include +#include #include #include #include diff --git a/src/QirRuntime/lib/Simulators/ToffoliSimulator.cpp b/src/QirRuntime/lib/Simulators/ToffoliSimulator.cpp index 166df66e3f5..59a03c8a0db 100644 --- a/src/QirRuntime/lib/Simulators/ToffoliSimulator.cpp +++ b/src/QirRuntime/lib/Simulators/ToffoliSimulator.cpp @@ -1,7 +1,7 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. -#include +#include #include #include "QuantumApi_I.hpp" diff --git a/src/QirRuntime/lib/qdk/CMakeLists.txt b/src/QirRuntime/lib/qdk/CMakeLists.txt new file mode 100644 index 00000000000..45a4709da06 --- /dev/null +++ b/src/QirRuntime/lib/qdk/CMakeLists.txt @@ -0,0 +1,28 @@ +#=============================================================================== +# Produce the qdk dynamic library +# +add_library(qdk SHARED + qdk.cpp + $ + $ + $ +) + +target_link_libraries(qdk + ${QIR_BRIDGE_UTILITY_LIB} + ${QIR_BRIDGE_QIS_UTILITY_LIB} + ${CMAKE_DL_LIBS} +) + +target_include_directories(qdk PUBLIC + ${public_includes} +) + +set_property(TARGET qdk PROPERTY POSITION_INDEPENDENT_CODE ON) + +install(TARGETS qdk + RUNTIME DESTINATION "${CMAKE_BINARY_DIR}/bin" + LIBRARY DESTINATION "${CMAKE_BINARY_DIR}/bin" +) + + diff --git a/src/QirRuntime/lib/qdk/qdk.cpp b/src/QirRuntime/lib/qdk/qdk.cpp new file mode 100644 index 00000000000..41d1c13088b --- /dev/null +++ b/src/QirRuntime/lib/qdk/qdk.cpp @@ -0,0 +1,18 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +#include "QuantumApi_I.hpp" +#include "SimFactory.hpp" +#include "QirContext.hpp" +#include "QirTypes.hpp" + +#ifdef _WIN32 +#define EXPORTAPI extern "C" __declspec(dllexport) +#else +#define EXPORTAPI extern "C" +#endif +EXPORTAPI void SetupQirToRunOnFullStateSimulator() +{ + // Leak the simulator, because the QIR only creates one and it will exist for the duration of the session + InitializeQirContext(Microsoft::Quantum::CreateFullstateSimulator().release(), false /*trackAllocatedObjects*/); +} \ No newline at end of file diff --git a/src/QirRuntime/lib/QIR/context.hpp b/src/QirRuntime/public/QirContext.hpp similarity index 100% rename from src/QirRuntime/lib/QIR/context.hpp rename to src/QirRuntime/public/QirContext.hpp diff --git a/src/QirRuntime/lib/QIR/qirTypes.hpp b/src/QirRuntime/public/QirTypes.hpp similarity index 100% rename from src/QirRuntime/lib/QIR/qirTypes.hpp rename to src/QirRuntime/public/QirTypes.hpp diff --git a/src/QirRuntime/test/FullstateSimulator/FullstateSimulatorTests.cpp b/src/QirRuntime/test/FullstateSimulator/FullstateSimulatorTests.cpp index 02f8a2d7926..477be7ce06a 100644 --- a/src/QirRuntime/test/FullstateSimulator/FullstateSimulatorTests.cpp +++ b/src/QirRuntime/test/FullstateSimulator/FullstateSimulatorTests.cpp @@ -10,7 +10,7 @@ #include "QuantumApi_I.hpp" #include "SimFactory.hpp" -#include "context.hpp" +#include "QirContext.hpp" using namespace Microsoft::Quantum; using namespace std; diff --git a/src/QirRuntime/test/QIR-static/qir-driver.cpp b/src/QirRuntime/test/QIR-static/qir-driver.cpp index 1c143af5d0a..3015db659bb 100644 --- a/src/QirRuntime/test/QIR-static/qir-driver.cpp +++ b/src/QirRuntime/test/QIR-static/qir-driver.cpp @@ -1,7 +1,7 @@ // Copyright (c) Microsoft Corporation. // Licensed under the MIT License. -#include +#include #include #include #include @@ -12,8 +12,8 @@ #include "QuantumApi_I.hpp" #include "SimFactory.hpp" #include "SimulatorStub.hpp" -#include "context.hpp" -#include "qirTypes.hpp" +#include "QirContext.hpp" +#include "QirTypes.hpp" #include "quantum__rt.hpp" #define CATCH_CONFIG_MAIN // This tells Catch to provide a main() - only do this in one cpp file diff --git a/src/QirRuntime/test/QIR-static/qir-gen.ll b/src/QirRuntime/test/QIR-static/qir-gen.ll deleted file mode 100644 index f4edf3a2b5a..00000000000 --- a/src/QirRuntime/test/QIR-static/qir-gen.ll +++ /dev/null @@ -1,1690 +0,0 @@ -%Result = type opaque -%Range = type { i64, i64, i64 } -%Tuple = type opaque -%Callable = type opaque -%Qubit = type opaque -%String = type opaque -%Array = type opaque - -@ResultZero = external global %Result* -@ResultOne = external global %Result* -@PauliI = constant i2 0 -@PauliX = constant i2 1 -@PauliY = constant i2 -1 -@PauliZ = constant i2 -2 -@EmptyRange = internal constant %Range { i64 0, i64 1, i64 -1 } -@Microsoft__Quantum__Testing__QIR__Qop = constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Testing__QIR__Qop__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Testing__QIR__Qop__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Testing__QIR__Qop__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Testing__QIR__Qop__ctladj__wrapper] -@PartialApplication__1 = constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__1__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__1__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__1__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__1__ctladj__wrapper] -@MemoryManagement__1 = constant [2 x void (%Tuple*, i64)*] [void (%Tuple*, i64)* @MemoryManagement__1__RefCount, void (%Tuple*, i64)* @MemoryManagement__1__AliasCount] -@0 = internal constant [14 x i8] c"error code: 1\00" -@1 = internal constant [14 x i8] c"error code: 2\00" -@2 = internal constant [14 x i8] c"error code: 3\00" -@3 = internal constant [14 x i8] c"error code: 2\00" -@4 = internal constant [14 x i8] c"error code: 5\00" -@5 = internal constant [14 x i8] c"error code: 6\00" -@6 = internal constant [14 x i8] c"error code: 7\00" -@7 = internal constant [5 x i8] c"Test\00" -@8 = internal constant [30 x i8] c"Unexpected measurement result\00" -@Microsoft__Quantum__Testing__QIR__Subtract = constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Testing__QIR__Subtract__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null] -@PartialApplication__2 = constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__2__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null] -@MemoryManagement__2 = constant [2 x void (%Tuple*, i64)*] [void (%Tuple*, i64)* @MemoryManagement__2__RefCount, void (%Tuple*, i64)* @MemoryManagement__2__AliasCount] -@9 = internal constant [20 x i8] c"Pauli value: PauliI\00" -@10 = internal constant [14 x i8] c"Pauli value: \00" -@11 = internal constant [7 x i8] c"PauliX\00" -@12 = internal constant [7 x i8] c"PauliY\00" -@13 = internal constant [7 x i8] c"PauliZ\00" - -define void @Microsoft__Quantum__Testing__QIR__TestControlled__body() { -entry: - %0 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, i64 }* getelementptr ({ %Callable*, i64 }, { %Callable*, i64 }* null, i32 1) to i64)) - %1 = bitcast %Tuple* %0 to { %Callable*, i64 }* - %2 = getelementptr inbounds { %Callable*, i64 }, { %Callable*, i64 }* %1, i32 0, i32 0 - %3 = getelementptr inbounds { %Callable*, i64 }, { %Callable*, i64 }* %1, i32 0, i32 1 - %4 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Testing__QIR__Qop, [2 x void (%Tuple*, i64)*]* null, %Tuple* null) - store %Callable* %4, %Callable** %2 - store i64 1, i64* %3 - %qop = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__1, [2 x void (%Tuple*, i64)*]* @MemoryManagement__1, %Tuple* %0) - call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %qop, i64 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %qop, i64 1) - %adj_qop = call %Callable* @__quantum__rt__callable_copy(%Callable* %qop, i1 false) - call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %adj_qop, i64 1) - call void @__quantum__rt__callable_make_adjoint(%Callable* %adj_qop) - call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %adj_qop, i64 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %adj_qop, i64 1) - %ctl_qop = call %Callable* @__quantum__rt__callable_copy(%Callable* %qop, i1 false) - call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %ctl_qop, i64 1) - call void @__quantum__rt__callable_make_controlled(%Callable* %ctl_qop) - call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %ctl_qop, i64 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %ctl_qop, i64 1) - %adj_ctl_qop = call %Callable* @__quantum__rt__callable_copy(%Callable* %qop, i1 false) - call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %adj_ctl_qop, i64 1) - call void @__quantum__rt__callable_make_controlled(%Callable* %adj_ctl_qop) - call void @__quantum__rt__callable_make_adjoint(%Callable* %adj_ctl_qop) - call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %adj_ctl_qop, i64 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %adj_ctl_qop, i64 1) - %ctl_ctl_qop = call %Callable* @__quantum__rt__callable_copy(%Callable* %ctl_qop, i1 false) - call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %ctl_ctl_qop, i64 1) - call void @__quantum__rt__callable_make_controlled(%Callable* %ctl_ctl_qop) - call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %ctl_ctl_qop, i64 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %ctl_ctl_qop, i64 1) - %q1 = call %Qubit* @__quantum__rt__qubit_allocate() - %q2 = call %Qubit* @__quantum__rt__qubit_allocate() - %q3 = call %Qubit* @__quantum__rt__qubit_allocate() - %5 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64)) - %6 = bitcast %Tuple* %5 to { %Qubit* }* - %7 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %6, i32 0, i32 0 - store %Qubit* %q1, %Qubit** %7 - call void @__quantum__rt__callable_invoke(%Callable* %qop, %Tuple* %5, %Tuple* null) - %8 = call %Result* @Microsoft__Quantum__Intrinsic__M__body(%Qubit* %q1) - %9 = load %Result*, %Result** @ResultOne - %10 = call i1 @__quantum__rt__result_equal(%Result* %8, %Result* %9) - %11 = xor i1 %10, true - br i1 %11, label %then0__1, label %continue__1 - -then0__1: ; preds = %entry - %12 = call %String* @__quantum__rt__string_create(i32 13, i8* getelementptr inbounds ([14 x i8], [14 x i8]* @0, i32 0, i32 0)) - call void @__quantum__rt__qubit_release(%Qubit* %q1) - call void @__quantum__rt__qubit_release(%Qubit* %q2) - call void @__quantum__rt__qubit_release(%Qubit* %q3) - call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %qop, i64 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %qop, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %adj_qop, i64 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %adj_qop, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %ctl_qop, i64 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %ctl_qop, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %adj_ctl_qop, i64 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %adj_ctl_qop, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %ctl_ctl_qop, i64 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %ctl_ctl_qop, i64 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %5, i64 -1) - call void @__quantum__rt__result_update_reference_count(%Result* %8, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %qop, i64 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %qop, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %adj_qop, i64 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %adj_qop, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %ctl_qop, i64 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %ctl_qop, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %adj_ctl_qop, i64 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %adj_ctl_qop, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %ctl_ctl_qop, i64 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %ctl_ctl_qop, i64 -1) - call void @__quantum__rt__fail(%String* %12) - unreachable - -continue__1: ; preds = %entry - %13 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64)) - %14 = bitcast %Tuple* %13 to { %Qubit* }* - %15 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %14, i32 0, i32 0 - store %Qubit* %q2, %Qubit** %15 - call void @__quantum__rt__callable_invoke(%Callable* %adj_qop, %Tuple* %13, %Tuple* null) - %16 = call %Result* @Microsoft__Quantum__Intrinsic__M__body(%Qubit* %q2) - %17 = load %Result*, %Result** @ResultOne - %18 = call i1 @__quantum__rt__result_equal(%Result* %16, %Result* %17) - %19 = xor i1 %18, true - br i1 %19, label %then0__2, label %continue__2 - -then0__2: ; preds = %continue__1 - %20 = call %String* @__quantum__rt__string_create(i32 13, i8* getelementptr inbounds ([14 x i8], [14 x i8]* @1, i32 0, i32 0)) - call void @__quantum__rt__qubit_release(%Qubit* %q1) - call void @__quantum__rt__qubit_release(%Qubit* %q2) - call void @__quantum__rt__qubit_release(%Qubit* %q3) - call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %qop, i64 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %qop, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %adj_qop, i64 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %adj_qop, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %ctl_qop, i64 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %ctl_qop, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %adj_ctl_qop, i64 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %adj_ctl_qop, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %ctl_ctl_qop, i64 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %ctl_ctl_qop, i64 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %5, i64 -1) - call void @__quantum__rt__result_update_reference_count(%Result* %8, i64 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %13, i64 -1) - call void @__quantum__rt__result_update_reference_count(%Result* %16, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %qop, i64 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %qop, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %adj_qop, i64 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %adj_qop, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %ctl_qop, i64 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %ctl_qop, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %adj_ctl_qop, i64 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %adj_ctl_qop, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %ctl_ctl_qop, i64 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %ctl_ctl_qop, i64 -1) - call void @__quantum__rt__fail(%String* %20) - unreachable - -continue__2: ; preds = %continue__1 - %21 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %22 = bitcast %Tuple* %21 to { %Array*, %Qubit* }* - %23 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %22, i32 0, i32 0 - %24 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %22, i32 0, i32 1 - %25 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) - %26 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %25, i64 0) - %27 = bitcast i8* %26 to %Qubit** - store %Qubit* %q1, %Qubit** %27 - store %Array* %25, %Array** %23 - store %Qubit* %q3, %Qubit** %24 - call void @__quantum__rt__callable_invoke(%Callable* %ctl_qop, %Tuple* %21, %Tuple* null) - %28 = call %Result* @Microsoft__Quantum__Intrinsic__M__body(%Qubit* %q3) - %29 = load %Result*, %Result** @ResultOne - %30 = call i1 @__quantum__rt__result_equal(%Result* %28, %Result* %29) - %31 = xor i1 %30, true - br i1 %31, label %then0__3, label %continue__3 - -then0__3: ; preds = %continue__2 - %32 = call %String* @__quantum__rt__string_create(i32 13, i8* getelementptr inbounds ([14 x i8], [14 x i8]* @2, i32 0, i32 0)) - call void @__quantum__rt__qubit_release(%Qubit* %q1) - call void @__quantum__rt__qubit_release(%Qubit* %q2) - call void @__quantum__rt__qubit_release(%Qubit* %q3) - call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %qop, i64 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %qop, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %adj_qop, i64 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %adj_qop, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %ctl_qop, i64 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %ctl_qop, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %adj_ctl_qop, i64 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %adj_ctl_qop, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %ctl_ctl_qop, i64 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %ctl_ctl_qop, i64 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %5, i64 -1) - call void @__quantum__rt__result_update_reference_count(%Result* %8, i64 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %13, i64 -1) - call void @__quantum__rt__result_update_reference_count(%Result* %16, i64 -1) - call void @__quantum__rt__array_update_reference_count(%Array* %25, i64 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %21, i64 -1) - call void @__quantum__rt__result_update_reference_count(%Result* %28, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %qop, i64 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %qop, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %adj_qop, i64 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %adj_qop, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %ctl_qop, i64 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %ctl_qop, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %adj_ctl_qop, i64 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %adj_ctl_qop, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %ctl_ctl_qop, i64 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %ctl_ctl_qop, i64 -1) - call void @__quantum__rt__fail(%String* %32) - unreachable - -continue__3: ; preds = %continue__2 - %33 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %34 = bitcast %Tuple* %33 to { %Array*, %Qubit* }* - %35 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %34, i32 0, i32 0 - %36 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %34, i32 0, i32 1 - %37 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) - %38 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %37, i64 0) - %39 = bitcast i8* %38 to %Qubit** - store %Qubit* %q2, %Qubit** %39 - store %Array* %37, %Array** %35 - store %Qubit* %q3, %Qubit** %36 - call void @__quantum__rt__callable_invoke(%Callable* %adj_ctl_qop, %Tuple* %33, %Tuple* null) - %40 = call %Result* @Microsoft__Quantum__Intrinsic__M__body(%Qubit* %q3) - %41 = load %Result*, %Result** @ResultZero - %42 = call i1 @__quantum__rt__result_equal(%Result* %40, %Result* %41) - %43 = xor i1 %42, true - br i1 %43, label %then0__4, label %continue__4 - -then0__4: ; preds = %continue__3 - %44 = call %String* @__quantum__rt__string_create(i32 13, i8* getelementptr inbounds ([14 x i8], [14 x i8]* @3, i32 0, i32 0)) - call void @__quantum__rt__qubit_release(%Qubit* %q1) - call void @__quantum__rt__qubit_release(%Qubit* %q2) - call void @__quantum__rt__qubit_release(%Qubit* %q3) - call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %qop, i64 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %qop, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %adj_qop, i64 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %adj_qop, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %ctl_qop, i64 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %ctl_qop, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %adj_ctl_qop, i64 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %adj_ctl_qop, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %ctl_ctl_qop, i64 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %ctl_ctl_qop, i64 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %5, i64 -1) - call void @__quantum__rt__result_update_reference_count(%Result* %8, i64 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %13, i64 -1) - call void @__quantum__rt__result_update_reference_count(%Result* %16, i64 -1) - call void @__quantum__rt__array_update_reference_count(%Array* %25, i64 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %21, i64 -1) - call void @__quantum__rt__result_update_reference_count(%Result* %28, i64 -1) - call void @__quantum__rt__array_update_reference_count(%Array* %37, i64 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %33, i64 -1) - call void @__quantum__rt__result_update_reference_count(%Result* %40, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %qop, i64 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %qop, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %adj_qop, i64 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %adj_qop, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %ctl_qop, i64 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %ctl_qop, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %adj_ctl_qop, i64 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %adj_ctl_qop, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %ctl_ctl_qop, i64 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %ctl_ctl_qop, i64 -1) - call void @__quantum__rt__fail(%String* %44) - unreachable - -continue__4: ; preds = %continue__3 - %45 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %46 = bitcast %Tuple* %45 to { %Array*, { %Array*, %Qubit* }* }* - %47 = getelementptr inbounds { %Array*, { %Array*, %Qubit* }* }, { %Array*, { %Array*, %Qubit* }* }* %46, i32 0, i32 0 - %48 = getelementptr inbounds { %Array*, { %Array*, %Qubit* }* }, { %Array*, { %Array*, %Qubit* }* }* %46, i32 0, i32 1 - %49 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) - %50 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %49, i64 0) - %51 = bitcast i8* %50 to %Qubit** - store %Qubit* %q1, %Qubit** %51 - %52 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %53 = bitcast %Tuple* %52 to { %Array*, %Qubit* }* - %54 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %53, i32 0, i32 0 - %55 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %53, i32 0, i32 1 - %56 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) - %57 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %56, i64 0) - %58 = bitcast i8* %57 to %Qubit** - store %Qubit* %q2, %Qubit** %58 - store %Array* %56, %Array** %54 - store %Qubit* %q3, %Qubit** %55 - store %Array* %49, %Array** %47 - store { %Array*, %Qubit* }* %53, { %Array*, %Qubit* }** %48 - call void @__quantum__rt__callable_invoke(%Callable* %ctl_ctl_qop, %Tuple* %45, %Tuple* null) - %59 = call %Result* @Microsoft__Quantum__Intrinsic__M__body(%Qubit* %q3) - %60 = load %Result*, %Result** @ResultOne - %61 = call i1 @__quantum__rt__result_equal(%Result* %59, %Result* %60) - %62 = xor i1 %61, true - br i1 %62, label %then0__5, label %continue__5 - -then0__5: ; preds = %continue__4 - %63 = call %String* @__quantum__rt__string_create(i32 13, i8* getelementptr inbounds ([14 x i8], [14 x i8]* @4, i32 0, i32 0)) - call void @__quantum__rt__qubit_release(%Qubit* %q1) - call void @__quantum__rt__qubit_release(%Qubit* %q2) - call void @__quantum__rt__qubit_release(%Qubit* %q3) - call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %qop, i64 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %qop, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %adj_qop, i64 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %adj_qop, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %ctl_qop, i64 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %ctl_qop, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %adj_ctl_qop, i64 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %adj_ctl_qop, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %ctl_ctl_qop, i64 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %ctl_ctl_qop, i64 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %5, i64 -1) - call void @__quantum__rt__result_update_reference_count(%Result* %8, i64 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %13, i64 -1) - call void @__quantum__rt__result_update_reference_count(%Result* %16, i64 -1) - call void @__quantum__rt__array_update_reference_count(%Array* %25, i64 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %21, i64 -1) - call void @__quantum__rt__result_update_reference_count(%Result* %28, i64 -1) - call void @__quantum__rt__array_update_reference_count(%Array* %37, i64 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %33, i64 -1) - call void @__quantum__rt__result_update_reference_count(%Result* %40, i64 -1) - call void @__quantum__rt__array_update_reference_count(%Array* %49, i64 -1) - call void @__quantum__rt__array_update_reference_count(%Array* %56, i64 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %52, i64 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %45, i64 -1) - call void @__quantum__rt__result_update_reference_count(%Result* %59, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %qop, i64 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %qop, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %adj_qop, i64 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %adj_qop, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %ctl_qop, i64 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %ctl_qop, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %adj_ctl_qop, i64 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %adj_ctl_qop, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %ctl_ctl_qop, i64 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %ctl_ctl_qop, i64 -1) - call void @__quantum__rt__fail(%String* %63) - unreachable - -continue__5: ; preds = %continue__4 - %64 = call %Callable* @__quantum__rt__callable_copy(%Callable* %qop, i1 false) - call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %64, i64 1) - call void @__quantum__rt__callable_make_controlled(%Callable* %64) - %65 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %66 = bitcast %Tuple* %65 to { %Array*, %Qubit* }* - %67 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %66, i32 0, i32 0 - %68 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %66, i32 0, i32 1 - %69 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 2) - %70 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %69, i64 0) - %71 = bitcast i8* %70 to %Qubit** - %72 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %69, i64 1) - %73 = bitcast i8* %72 to %Qubit** - store %Qubit* %q1, %Qubit** %71 - store %Qubit* %q2, %Qubit** %73 - store %Array* %69, %Array** %67 - store %Qubit* %q3, %Qubit** %68 - call void @__quantum__rt__callable_invoke(%Callable* %64, %Tuple* %65, %Tuple* null) - %74 = call %Result* @Microsoft__Quantum__Intrinsic__M__body(%Qubit* %q3) - %75 = load %Result*, %Result** @ResultZero - %76 = call i1 @__quantum__rt__result_equal(%Result* %74, %Result* %75) - %77 = xor i1 %76, true - br i1 %77, label %then0__6, label %continue__6 - -then0__6: ; preds = %continue__5 - %78 = call %String* @__quantum__rt__string_create(i32 13, i8* getelementptr inbounds ([14 x i8], [14 x i8]* @5, i32 0, i32 0)) - call void @__quantum__rt__qubit_release(%Qubit* %q1) - call void @__quantum__rt__qubit_release(%Qubit* %q2) - call void @__quantum__rt__qubit_release(%Qubit* %q3) - call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %qop, i64 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %qop, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %adj_qop, i64 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %adj_qop, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %ctl_qop, i64 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %ctl_qop, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %adj_ctl_qop, i64 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %adj_ctl_qop, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %ctl_ctl_qop, i64 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %ctl_ctl_qop, i64 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %5, i64 -1) - call void @__quantum__rt__result_update_reference_count(%Result* %8, i64 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %13, i64 -1) - call void @__quantum__rt__result_update_reference_count(%Result* %16, i64 -1) - call void @__quantum__rt__array_update_reference_count(%Array* %25, i64 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %21, i64 -1) - call void @__quantum__rt__result_update_reference_count(%Result* %28, i64 -1) - call void @__quantum__rt__array_update_reference_count(%Array* %37, i64 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %33, i64 -1) - call void @__quantum__rt__result_update_reference_count(%Result* %40, i64 -1) - call void @__quantum__rt__array_update_reference_count(%Array* %49, i64 -1) - call void @__quantum__rt__array_update_reference_count(%Array* %56, i64 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %52, i64 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %45, i64 -1) - call void @__quantum__rt__result_update_reference_count(%Result* %59, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %64, i64 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %64, i64 -1) - call void @__quantum__rt__array_update_reference_count(%Array* %69, i64 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %65, i64 -1) - call void @__quantum__rt__result_update_reference_count(%Result* %74, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %qop, i64 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %qop, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %adj_qop, i64 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %adj_qop, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %ctl_qop, i64 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %ctl_qop, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %adj_ctl_qop, i64 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %adj_ctl_qop, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %ctl_ctl_qop, i64 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %ctl_ctl_qop, i64 -1) - call void @__quantum__rt__fail(%String* %78) - unreachable - -continue__6: ; preds = %continue__5 - %q4 = call %Qubit* @__quantum__rt__qubit_allocate() - %79 = call %Callable* @__quantum__rt__callable_copy(%Callable* %qop, i1 false) - call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %79, i64 1) - call void @__quantum__rt__callable_make_adjoint(%Callable* %79) - %80 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64)) - %81 = bitcast %Tuple* %80 to { %Qubit* }* - %82 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %81, i32 0, i32 0 - store %Qubit* %q3, %Qubit** %82 - call void @__quantum__rt__callable_invoke(%Callable* %79, %Tuple* %80, %Tuple* null) - %83 = call %Callable* @__quantum__rt__callable_copy(%Callable* %ctl_ctl_qop, i1 false) - call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %83, i64 1) - call void @__quantum__rt__callable_make_controlled(%Callable* %83) - call void @__quantum__rt__callable_make_adjoint(%Callable* %83) - %84 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %85 = bitcast %Tuple* %84 to { %Array*, { %Array*, { %Array*, %Qubit* }* }* }* - %86 = getelementptr inbounds { %Array*, { %Array*, { %Array*, %Qubit* }* }* }, { %Array*, { %Array*, { %Array*, %Qubit* }* }* }* %85, i32 0, i32 0 - %87 = getelementptr inbounds { %Array*, { %Array*, { %Array*, %Qubit* }* }* }, { %Array*, { %Array*, { %Array*, %Qubit* }* }* }* %85, i32 0, i32 1 - %88 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) - %89 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %88, i64 0) - %90 = bitcast i8* %89 to %Qubit** - store %Qubit* %q1, %Qubit** %90 - %91 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %92 = bitcast %Tuple* %91 to { %Array*, { %Array*, %Qubit* }* }* - %93 = getelementptr inbounds { %Array*, { %Array*, %Qubit* }* }, { %Array*, { %Array*, %Qubit* }* }* %92, i32 0, i32 0 - %94 = getelementptr inbounds { %Array*, { %Array*, %Qubit* }* }, { %Array*, { %Array*, %Qubit* }* }* %92, i32 0, i32 1 - %95 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) - %96 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %95, i64 0) - %97 = bitcast i8* %96 to %Qubit** - store %Qubit* %q2, %Qubit** %97 - %98 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %99 = bitcast %Tuple* %98 to { %Array*, %Qubit* }* - %100 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %99, i32 0, i32 0 - %101 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %99, i32 0, i32 1 - %102 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) - %103 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %102, i64 0) - %104 = bitcast i8* %103 to %Qubit** - store %Qubit* %q3, %Qubit** %104 - store %Array* %102, %Array** %100 - store %Qubit* %q4, %Qubit** %101 - store %Array* %95, %Array** %93 - store { %Array*, %Qubit* }* %99, { %Array*, %Qubit* }** %94 - store %Array* %88, %Array** %86 - store { %Array*, { %Array*, %Qubit* }* }* %92, { %Array*, { %Array*, %Qubit* }* }** %87 - call void @__quantum__rt__callable_invoke(%Callable* %83, %Tuple* %84, %Tuple* null) - %105 = call %Result* @Microsoft__Quantum__Intrinsic__M__body(%Qubit* %q4) - %106 = load %Result*, %Result** @ResultOne - %107 = call i1 @__quantum__rt__result_equal(%Result* %105, %Result* %106) - %108 = xor i1 %107, true - br i1 %108, label %then0__7, label %continue__7 - -then0__7: ; preds = %continue__6 - %109 = call %String* @__quantum__rt__string_create(i32 13, i8* getelementptr inbounds ([14 x i8], [14 x i8]* @6, i32 0, i32 0)) - call void @__quantum__rt__qubit_release(%Qubit* %q4) - call void @__quantum__rt__qubit_release(%Qubit* %q1) - call void @__quantum__rt__qubit_release(%Qubit* %q2) - call void @__quantum__rt__qubit_release(%Qubit* %q3) - call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %qop, i64 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %qop, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %adj_qop, i64 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %adj_qop, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %ctl_qop, i64 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %ctl_qop, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %adj_ctl_qop, i64 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %adj_ctl_qop, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %ctl_ctl_qop, i64 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %ctl_ctl_qop, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %79, i64 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %79, i64 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %80, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %83, i64 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %83, i64 -1) - call void @__quantum__rt__array_update_reference_count(%Array* %88, i64 -1) - call void @__quantum__rt__array_update_reference_count(%Array* %95, i64 -1) - call void @__quantum__rt__array_update_reference_count(%Array* %102, i64 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %98, i64 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %91, i64 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %84, i64 -1) - call void @__quantum__rt__result_update_reference_count(%Result* %105, i64 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %5, i64 -1) - call void @__quantum__rt__result_update_reference_count(%Result* %8, i64 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %13, i64 -1) - call void @__quantum__rt__result_update_reference_count(%Result* %16, i64 -1) - call void @__quantum__rt__array_update_reference_count(%Array* %25, i64 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %21, i64 -1) - call void @__quantum__rt__result_update_reference_count(%Result* %28, i64 -1) - call void @__quantum__rt__array_update_reference_count(%Array* %37, i64 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %33, i64 -1) - call void @__quantum__rt__result_update_reference_count(%Result* %40, i64 -1) - call void @__quantum__rt__array_update_reference_count(%Array* %49, i64 -1) - call void @__quantum__rt__array_update_reference_count(%Array* %56, i64 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %52, i64 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %45, i64 -1) - call void @__quantum__rt__result_update_reference_count(%Result* %59, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %64, i64 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %64, i64 -1) - call void @__quantum__rt__array_update_reference_count(%Array* %69, i64 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %65, i64 -1) - call void @__quantum__rt__result_update_reference_count(%Result* %74, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %qop, i64 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %qop, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %adj_qop, i64 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %adj_qop, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %ctl_qop, i64 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %ctl_qop, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %adj_ctl_qop, i64 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %adj_ctl_qop, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %ctl_ctl_qop, i64 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %ctl_ctl_qop, i64 -1) - call void @__quantum__rt__fail(%String* %109) - unreachable - -continue__7: ; preds = %continue__6 - call void @__quantum__rt__qubit_release(%Qubit* %q4) - call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %79, i64 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %79, i64 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %80, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %83, i64 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %83, i64 -1) - call void @__quantum__rt__array_update_reference_count(%Array* %88, i64 -1) - call void @__quantum__rt__array_update_reference_count(%Array* %95, i64 -1) - call void @__quantum__rt__array_update_reference_count(%Array* %102, i64 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %98, i64 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %91, i64 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %84, i64 -1) - call void @__quantum__rt__result_update_reference_count(%Result* %105, i64 -1) - call void @__quantum__rt__qubit_release(%Qubit* %q1) - call void @__quantum__rt__qubit_release(%Qubit* %q2) - call void @__quantum__rt__qubit_release(%Qubit* %q3) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %5, i64 -1) - call void @__quantum__rt__result_update_reference_count(%Result* %8, i64 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %13, i64 -1) - call void @__quantum__rt__result_update_reference_count(%Result* %16, i64 -1) - call void @__quantum__rt__array_update_reference_count(%Array* %25, i64 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %21, i64 -1) - call void @__quantum__rt__result_update_reference_count(%Result* %28, i64 -1) - call void @__quantum__rt__array_update_reference_count(%Array* %37, i64 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %33, i64 -1) - call void @__quantum__rt__result_update_reference_count(%Result* %40, i64 -1) - call void @__quantum__rt__array_update_reference_count(%Array* %49, i64 -1) - call void @__quantum__rt__array_update_reference_count(%Array* %56, i64 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %52, i64 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %45, i64 -1) - call void @__quantum__rt__result_update_reference_count(%Result* %59, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %64, i64 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %64, i64 -1) - call void @__quantum__rt__array_update_reference_count(%Array* %69, i64 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %65, i64 -1) - call void @__quantum__rt__result_update_reference_count(%Result* %74, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %qop, i64 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %qop, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %adj_qop, i64 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %adj_qop, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %ctl_qop, i64 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %ctl_qop, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %adj_ctl_qop, i64 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %adj_ctl_qop, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %ctl_ctl_qop, i64 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %ctl_ctl_qop, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %qop, i64 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %qop, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %adj_qop, i64 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %adj_qop, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %ctl_qop, i64 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %ctl_qop, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %adj_ctl_qop, i64 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %adj_ctl_qop, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %ctl_ctl_qop, i64 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %ctl_ctl_qop, i64 -1) - ret void -} - -declare %Tuple* @__quantum__rt__tuple_create(i64) - -define void @Microsoft__Quantum__Testing__QIR__Qop__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { -entry: - %0 = bitcast %Tuple* %arg-tuple to { %Qubit*, i64 }* - %1 = getelementptr inbounds { %Qubit*, i64 }, { %Qubit*, i64 }* %0, i32 0, i32 0 - %2 = getelementptr inbounds { %Qubit*, i64 }, { %Qubit*, i64 }* %0, i32 0, i32 1 - %3 = load %Qubit*, %Qubit** %1 - %4 = load i64, i64* %2 - call void @Microsoft__Quantum__Testing__QIR__Qop__body(%Qubit* %3, i64 %4) - ret void -} - -define void @Microsoft__Quantum__Testing__QIR__Qop__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { -entry: - %0 = bitcast %Tuple* %arg-tuple to { %Qubit*, i64 }* - %1 = getelementptr inbounds { %Qubit*, i64 }, { %Qubit*, i64 }* %0, i32 0, i32 0 - %2 = getelementptr inbounds { %Qubit*, i64 }, { %Qubit*, i64 }* %0, i32 0, i32 1 - %3 = load %Qubit*, %Qubit** %1 - %4 = load i64, i64* %2 - call void @Microsoft__Quantum__Testing__QIR__Qop__adj(%Qubit* %3, i64 %4) - ret void -} - -define void @Microsoft__Quantum__Testing__QIR__Qop__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { -entry: - %0 = bitcast %Tuple* %arg-tuple to { %Array*, { %Qubit*, i64 }* }* - %1 = getelementptr inbounds { %Array*, { %Qubit*, i64 }* }, { %Array*, { %Qubit*, i64 }* }* %0, i32 0, i32 0 - %2 = getelementptr inbounds { %Array*, { %Qubit*, i64 }* }, { %Array*, { %Qubit*, i64 }* }* %0, i32 0, i32 1 - %3 = load %Array*, %Array** %1 - %4 = load { %Qubit*, i64 }*, { %Qubit*, i64 }** %2 - call void @Microsoft__Quantum__Testing__QIR__Qop__ctl(%Array* %3, { %Qubit*, i64 }* %4) - ret void -} - -define void @Microsoft__Quantum__Testing__QIR__Qop__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { -entry: - %0 = bitcast %Tuple* %arg-tuple to { %Array*, { %Qubit*, i64 }* }* - %1 = getelementptr inbounds { %Array*, { %Qubit*, i64 }* }, { %Array*, { %Qubit*, i64 }* }* %0, i32 0, i32 0 - %2 = getelementptr inbounds { %Array*, { %Qubit*, i64 }* }, { %Array*, { %Qubit*, i64 }* }* %0, i32 0, i32 1 - %3 = load %Array*, %Array** %1 - %4 = load { %Qubit*, i64 }*, { %Qubit*, i64 }** %2 - call void @Microsoft__Quantum__Testing__QIR__Qop__ctladj(%Array* %3, { %Qubit*, i64 }* %4) - ret void -} - -declare %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]*, [2 x void (%Tuple*, i64)*]*, %Tuple*) - -define void @Lifted__PartialApplication__1__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { -entry: - %0 = bitcast %Tuple* %arg-tuple to { %Qubit* }* - %1 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %0, i32 0, i32 0 - %2 = load %Qubit*, %Qubit** %1 - %3 = bitcast %Tuple* %capture-tuple to { %Callable*, i64 }* - %4 = getelementptr inbounds { %Callable*, i64 }, { %Callable*, i64 }* %3, i32 0, i32 1 - %5 = load i64, i64* %4 - %6 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Qubit*, i64 }* getelementptr ({ %Qubit*, i64 }, { %Qubit*, i64 }* null, i32 1) to i64)) - %7 = bitcast %Tuple* %6 to { %Qubit*, i64 }* - %8 = getelementptr inbounds { %Qubit*, i64 }, { %Qubit*, i64 }* %7, i32 0, i32 0 - %9 = getelementptr inbounds { %Qubit*, i64 }, { %Qubit*, i64 }* %7, i32 0, i32 1 - store %Qubit* %2, %Qubit** %8 - store i64 %5, i64* %9 - %10 = getelementptr inbounds { %Callable*, i64 }, { %Callable*, i64 }* %3, i32 0, i32 0 - %11 = load %Callable*, %Callable** %10 - call void @__quantum__rt__callable_invoke(%Callable* %11, %Tuple* %6, %Tuple* %result-tuple) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i64 -1) - ret void -} - -define void @Lifted__PartialApplication__1__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { -entry: - %0 = bitcast %Tuple* %arg-tuple to { %Qubit* }* - %1 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %0, i32 0, i32 0 - %2 = load %Qubit*, %Qubit** %1 - %3 = bitcast %Tuple* %capture-tuple to { %Callable*, i64 }* - %4 = getelementptr inbounds { %Callable*, i64 }, { %Callable*, i64 }* %3, i32 0, i32 1 - %5 = load i64, i64* %4 - %6 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Qubit*, i64 }* getelementptr ({ %Qubit*, i64 }, { %Qubit*, i64 }* null, i32 1) to i64)) - %7 = bitcast %Tuple* %6 to { %Qubit*, i64 }* - %8 = getelementptr inbounds { %Qubit*, i64 }, { %Qubit*, i64 }* %7, i32 0, i32 0 - %9 = getelementptr inbounds { %Qubit*, i64 }, { %Qubit*, i64 }* %7, i32 0, i32 1 - store %Qubit* %2, %Qubit** %8 - store i64 %5, i64* %9 - %10 = getelementptr inbounds { %Callable*, i64 }, { %Callable*, i64 }* %3, i32 0, i32 0 - %11 = load %Callable*, %Callable** %10 - %12 = call %Callable* @__quantum__rt__callable_copy(%Callable* %11, i1 false) - call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %12, i64 1) - call void @__quantum__rt__callable_make_adjoint(%Callable* %12) - call void @__quantum__rt__callable_invoke(%Callable* %12, %Tuple* %6, %Tuple* %result-tuple) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %12, i64 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %12, i64 -1) - ret void -} - -define void @Lifted__PartialApplication__1__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { -entry: - %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* - %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 - %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 - %3 = load %Array*, %Array** %1 - %4 = load %Qubit*, %Qubit** %2 - %5 = bitcast %Tuple* %capture-tuple to { %Callable*, i64 }* - %6 = getelementptr inbounds { %Callable*, i64 }, { %Callable*, i64 }* %5, i32 0, i32 1 - %7 = load i64, i64* %6 - %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Qubit*, i64 }* getelementptr ({ %Qubit*, i64 }, { %Qubit*, i64 }* null, i32 1) to i64)) - %9 = bitcast %Tuple* %8 to { %Qubit*, i64 }* - %10 = getelementptr inbounds { %Qubit*, i64 }, { %Qubit*, i64 }* %9, i32 0, i32 0 - %11 = getelementptr inbounds { %Qubit*, i64 }, { %Qubit*, i64 }* %9, i32 0, i32 1 - store %Qubit* %4, %Qubit** %10 - store i64 %7, i64* %11 - %12 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %13 = bitcast %Tuple* %12 to { %Array*, { %Qubit*, i64 }* }* - %14 = getelementptr inbounds { %Array*, { %Qubit*, i64 }* }, { %Array*, { %Qubit*, i64 }* }* %13, i32 0, i32 0 - %15 = getelementptr inbounds { %Array*, { %Qubit*, i64 }* }, { %Array*, { %Qubit*, i64 }* }* %13, i32 0, i32 1 - store %Array* %3, %Array** %14 - store { %Qubit*, i64 }* %9, { %Qubit*, i64 }** %15 - %16 = getelementptr inbounds { %Callable*, i64 }, { %Callable*, i64 }* %5, i32 0, i32 0 - %17 = load %Callable*, %Callable** %16 - %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) - call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %18, i64 1) - call void @__quantum__rt__callable_make_controlled(%Callable* %18) - call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i64 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %18, i64 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i64 -1) - ret void -} - -define void @Lifted__PartialApplication__1__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { -entry: - %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* - %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 - %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 - %3 = load %Array*, %Array** %1 - %4 = load %Qubit*, %Qubit** %2 - %5 = bitcast %Tuple* %capture-tuple to { %Callable*, i64 }* - %6 = getelementptr inbounds { %Callable*, i64 }, { %Callable*, i64 }* %5, i32 0, i32 1 - %7 = load i64, i64* %6 - %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Qubit*, i64 }* getelementptr ({ %Qubit*, i64 }, { %Qubit*, i64 }* null, i32 1) to i64)) - %9 = bitcast %Tuple* %8 to { %Qubit*, i64 }* - %10 = getelementptr inbounds { %Qubit*, i64 }, { %Qubit*, i64 }* %9, i32 0, i32 0 - %11 = getelementptr inbounds { %Qubit*, i64 }, { %Qubit*, i64 }* %9, i32 0, i32 1 - store %Qubit* %4, %Qubit** %10 - store i64 %7, i64* %11 - %12 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %13 = bitcast %Tuple* %12 to { %Array*, { %Qubit*, i64 }* }* - %14 = getelementptr inbounds { %Array*, { %Qubit*, i64 }* }, { %Array*, { %Qubit*, i64 }* }* %13, i32 0, i32 0 - %15 = getelementptr inbounds { %Array*, { %Qubit*, i64 }* }, { %Array*, { %Qubit*, i64 }* }* %13, i32 0, i32 1 - store %Array* %3, %Array** %14 - store { %Qubit*, i64 }* %9, { %Qubit*, i64 }** %15 - %16 = getelementptr inbounds { %Callable*, i64 }, { %Callable*, i64 }* %5, i32 0, i32 0 - %17 = load %Callable*, %Callable** %16 - %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) - call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %18, i64 1) - call void @__quantum__rt__callable_make_adjoint(%Callable* %18) - call void @__quantum__rt__callable_make_controlled(%Callable* %18) - call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i64 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %18, i64 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i64 -1) - ret void -} - -define void @MemoryManagement__1__RefCount(%Tuple* %capture-tuple, i64 %count-change) { -entry: - %0 = bitcast %Tuple* %capture-tuple to { %Callable*, i64 }* - %1 = getelementptr inbounds { %Callable*, i64 }, { %Callable*, i64 }* %0, i32 0, i32 0 - %2 = load %Callable*, %Callable** %1 - call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %2, i64 %count-change) - call void @__quantum__rt__callable_update_reference_count(%Callable* %2, i64 %count-change) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %capture-tuple, i64 %count-change) - ret void -} - -define void @MemoryManagement__1__AliasCount(%Tuple* %capture-tuple, i64 %count-change) { -entry: - %0 = bitcast %Tuple* %capture-tuple to { %Callable*, i64 }* - %1 = getelementptr inbounds { %Callable*, i64 }, { %Callable*, i64 }* %0, i32 0, i32 0 - %2 = load %Callable*, %Callable** %1 - call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %2, i64 %count-change) - call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i64 %count-change) - call void @__quantum__rt__tuple_update_alias_count(%Tuple* %capture-tuple, i64 %count-change) - ret void -} - -declare void @__quantum__rt__callable_memory_management(i32, %Callable*, i64) - -declare void @__quantum__rt__callable_update_alias_count(%Callable*, i64) - -declare %Callable* @__quantum__rt__callable_copy(%Callable*, i1) - -declare void @__quantum__rt__callable_make_adjoint(%Callable*) - -declare void @__quantum__rt__callable_make_controlled(%Callable*) - -declare %Qubit* @__quantum__rt__qubit_allocate() - -declare %Array* @__quantum__rt__qubit_allocate_array(i64) - -declare void @__quantum__rt__callable_invoke(%Callable*, %Tuple*, %Tuple*) - -define %Result* @Microsoft__Quantum__Intrinsic__M__body(%Qubit* %qubit) { -entry: - %bases = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 1) - %0 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %bases, i64 0) - %1 = bitcast i8* %0 to i2* - %2 = load i2, i2* @PauliZ - store i2 %2, i2* %1 - call void @__quantum__rt__array_update_alias_count(%Array* %bases, i64 1) - %qubits = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) - %3 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qubits, i64 0) - %4 = bitcast i8* %3 to %Qubit** - store %Qubit* %qubit, %Qubit** %4 - call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i64 1) - %5 = call %Result* @__quantum__qis__measure__body(%Array* %bases, %Array* %qubits) - call void @__quantum__rt__array_update_alias_count(%Array* %bases, i64 -1) - call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i64 -1) - call void @__quantum__rt__array_update_reference_count(%Array* %bases, i64 -1) - call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i64 -1) - ret %Result* %5 -} - -declare i1 @__quantum__rt__result_equal(%Result*, %Result*) - -declare %String* @__quantum__rt__string_create(i32, i8*) - -declare void @__quantum__rt__qubit_release(%Qubit*) - -declare void @__quantum__rt__tuple_update_reference_count(%Tuple*, i64) - -declare void @__quantum__rt__result_update_reference_count(%Result*, i64) - -declare void @__quantum__rt__callable_update_reference_count(%Callable*, i64) - -declare void @__quantum__rt__fail(%String*) - -declare %Array* @__quantum__rt__array_create_1d(i32, i64) - -declare i8* @__quantum__rt__array_get_element_ptr_1d(%Array*, i64) - -declare void @__quantum__rt__array_update_reference_count(%Array*, i64) - -define i64 @Microsoft__Quantum__Testing__QIR__Test_Arrays__body(%Array* %array, i64 %index, i64 %val, i1 %compilerDecoy) { -entry: - call void @__quantum__rt__array_update_alias_count(%Array* %array, i64 1) - %local = alloca %Array* - store %Array* %array, %Array** %local - call void @__quantum__rt__array_update_alias_count(%Array* %array, i64 1) - call void @__quantum__rt__array_update_reference_count(%Array* %array, i64 1) - call void @__quantum__rt__array_update_alias_count(%Array* %array, i64 -1) - %0 = call %Array* @__quantum__rt__array_copy(%Array* %array, i1 false) - %1 = icmp ne %Array* %array, %0 - %2 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %0, i64 %index) - %3 = bitcast i8* %2 to i64* - store i64 %val, i64* %3 - call void @__quantum__rt__array_update_reference_count(%Array* %0, i64 1) - call void @__quantum__rt__array_update_alias_count(%Array* %0, i64 1) - store %Array* %0, %Array** %local - %n = call i64 @__quantum__rt__array_get_size_1d(%Array* %0) - %4 = sub i64 %n, 1 - %5 = load %Range, %Range* @EmptyRange - %6 = insertvalue %Range %5, i64 %index, 0 - %7 = insertvalue %Range %6, i64 1, 1 - %8 = insertvalue %Range %7, i64 %4, 2 - %slice1 = call %Array* @__quantum__rt__array_slice_1d(%Array* %0, %Range %8, i1 false) - call void @__quantum__rt__array_update_alias_count(%Array* %slice1, i64 1) - %9 = load %Range, %Range* @EmptyRange - %10 = insertvalue %Range %9, i64 %index, 0 - %11 = insertvalue %Range %10, i64 -2, 1 - %12 = insertvalue %Range %11, i64 0, 2 - %slice2 = call %Array* @__quantum__rt__array_slice_1d(%Array* %0, %Range %12, i1 false) - call void @__quantum__rt__array_update_alias_count(%Array* %slice2, i64 1) - %result = call %Array* @__quantum__rt__array_concatenate(%Array* %slice2, %Array* %slice1) - call void @__quantum__rt__array_update_alias_count(%Array* %result, i64 1) - %sum = alloca i64 - store i64 0, i64* %sum - %13 = call i64 @__quantum__rt__array_get_size_1d(%Array* %result) - %14 = sub i64 %13, 1 - br label %header__1 - -header__1: ; preds = %exiting__1, %entry - %i = phi i64 [ 0, %entry ], [ %21, %exiting__1 ] - %15 = icmp sle i64 %i, %14 - br i1 %15, label %body__1, label %exit__1 - -body__1: ; preds = %header__1 - %16 = load i64, i64* %sum - %17 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %result, i64 %i) - %18 = bitcast i8* %17 to i64* - %19 = load i64, i64* %18 - %20 = add i64 %16, %19 - store i64 %20, i64* %sum - br label %exiting__1 - -exiting__1: ; preds = %body__1 - %21 = add i64 %i, 1 - br label %header__1 - -exit__1: ; preds = %header__1 - br i1 %compilerDecoy, label %then0__1, label %continue__1 - -then0__1: ; preds = %exit__1 - call void @Microsoft__Quantum__Testing__QIR__TestControlled__body() - %res2 = call i64 @Microsoft__Quantum__Testing__QIR__TestPartials__body(i64 17, i64 42) - call void @Microsoft__Quantum__Testing__QIR__TestQubitResultManagement__body() - %res4 = call i64 @Microsoft__Quantum__Testing__QIR__Math__SqrtTest__body() - %res5 = call i64 @Microsoft__Quantum__Testing__QIR__Math__LogTest__body() - %res6 = call i64 @Microsoft__Quantum__Testing__QIR__Math__ArcTan2Test__body() - %res7 = call i64 @Microsoft__Quantum__Testing__QIR__Str__PauliToStringTest__body() - %res8 = call i64 @Microsoft__Quantum__Testing__QIR__Math__TestDrawRandomInt__body(i64 0, i64 1) - %22 = call %String* @__quantum__rt__string_create(i32 4, i8* getelementptr inbounds ([5 x i8], [5 x i8]* @7, i32 0, i32 0)) - call void @Microsoft__Quantum__Testing__QIR__Out__MessageTest__body(%String* %22) - call void @__quantum__rt__string_update_reference_count(%String* %22, i64 -1) - br label %continue__1 - -continue__1: ; preds = %then0__1, %exit__1 - %23 = load i64, i64* %sum - call void @__quantum__rt__array_update_alias_count(%Array* %array, i64 -1) - call void @__quantum__rt__array_update_alias_count(%Array* %0, i64 -1) - call void @__quantum__rt__array_update_alias_count(%Array* %slice1, i64 -1) - call void @__quantum__rt__array_update_alias_count(%Array* %slice2, i64 -1) - call void @__quantum__rt__array_update_alias_count(%Array* %result, i64 -1) - call void @__quantum__rt__array_update_reference_count(%Array* %array, i64 -1) - call void @__quantum__rt__array_update_reference_count(%Array* %0, i64 -1) - call void @__quantum__rt__array_update_reference_count(%Array* %slice1, i64 -1) - call void @__quantum__rt__array_update_reference_count(%Array* %slice2, i64 -1) - call void @__quantum__rt__array_update_reference_count(%Array* %result, i64 -1) - call void @__quantum__rt__array_update_reference_count(%Array* %0, i64 -1) - ret i64 %23 -} - -declare void @__quantum__rt__array_update_alias_count(%Array*, i64) - -declare %Array* @__quantum__rt__array_copy(%Array*, i1) - -declare i64 @__quantum__rt__array_get_size_1d(%Array*) - -declare %Array* @__quantum__rt__array_slice_1d(%Array*, %Range, i1) - -declare %Array* @__quantum__rt__array_concatenate(%Array*, %Array*) - -define i64 @Microsoft__Quantum__Testing__QIR__TestPartials__body(i64 %x, i64 %y) { -entry: - %0 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, i64 }* getelementptr ({ %Callable*, i64 }, { %Callable*, i64 }* null, i32 1) to i64)) - %1 = bitcast %Tuple* %0 to { %Callable*, i64 }* - %2 = getelementptr inbounds { %Callable*, i64 }, { %Callable*, i64 }* %1, i32 0, i32 0 - %3 = getelementptr inbounds { %Callable*, i64 }, { %Callable*, i64 }* %1, i32 0, i32 1 - %4 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Testing__QIR__Subtract, [2 x void (%Tuple*, i64)*]* null, %Tuple* null) - store %Callable* %4, %Callable** %2 - store i64 %x, i64* %3 - %subtractor = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__2, [2 x void (%Tuple*, i64)*]* @MemoryManagement__2, %Tuple* %0) - call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %subtractor, i64 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %subtractor, i64 1) - %5 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint (i64* getelementptr (i64, i64* null, i32 1) to i64)) - %6 = bitcast %Tuple* %5 to { i64 }* - %7 = getelementptr inbounds { i64 }, { i64 }* %6, i32 0, i32 0 - store i64 %y, i64* %7 - %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint (i64* getelementptr (i64, i64* null, i32 1) to i64)) - call void @__quantum__rt__callable_invoke(%Callable* %subtractor, %Tuple* %5, %Tuple* %8) - %9 = bitcast %Tuple* %8 to { i64 }* - %10 = getelementptr inbounds { i64 }, { i64 }* %9, i32 0, i32 0 - %11 = load i64, i64* %10 - call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %subtractor, i64 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %subtractor, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %subtractor, i64 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %subtractor, i64 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %5, i64 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i64 -1) - ret i64 %11 -} - -define void @Microsoft__Quantum__Testing__QIR__TestQubitResultManagement__body() { -entry: - %qs = call %Array* @__quantum__rt__qubit_allocate_array(i64 2) - call void @__quantum__rt__array_update_alias_count(%Array* %qs, i64 1) - %0 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 1) - %1 = bitcast i8* %0 to %Qubit** - %qubit = load %Qubit*, %Qubit** %1 - call void @__quantum__qis__x__body(%Qubit* %qubit) - %q = call %Qubit* @__quantum__rt__qubit_allocate() - %2 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 1) - %3 = bitcast i8* %2 to %Qubit** - %4 = load %Qubit*, %Qubit** %3 - %5 = call %Result* @Microsoft__Quantum__Intrinsic__M__body(%Qubit* %4) - %6 = load %Result*, %Result** @ResultOne - %7 = call i1 @__quantum__rt__result_equal(%Result* %5, %Result* %6) - br i1 %7, label %then0__1, label %continue__1 - -then0__1: ; preds = %entry - call void @__quantum__qis__x__body(%Qubit* %q) - br label %continue__1 - -continue__1: ; preds = %then0__1, %entry - %8 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 0) - %9 = bitcast i8* %8 to %Qubit** - %10 = load %Qubit*, %Qubit** %9 - %11 = call %Result* @Microsoft__Quantum__Intrinsic__M__body(%Qubit* %10) - %12 = call %Result* @Microsoft__Quantum__Intrinsic__M__body(%Qubit* %q) - %13 = call i1 @__quantum__rt__result_equal(%Result* %11, %Result* %12) - br i1 %13, label %then0__2, label %continue__2 - -then0__2: ; preds = %continue__1 - %14 = call %String* @__quantum__rt__string_create(i32 29, i8* getelementptr inbounds ([30 x i8], [30 x i8]* @8, i32 0, i32 0)) - call void @__quantum__rt__qubit_release(%Qubit* %q) - call void @__quantum__rt__qubit_release_array(%Array* %qs) - call void @__quantum__rt__array_update_alias_count(%Array* %qs, i64 -1) - call void @__quantum__rt__result_update_reference_count(%Result* %5, i64 -1) - call void @__quantum__rt__result_update_reference_count(%Result* %11, i64 -1) - call void @__quantum__rt__result_update_reference_count(%Result* %12, i64 -1) - call void @__quantum__rt__array_update_reference_count(%Array* %qs, i64 -1) - call void @__quantum__rt__fail(%String* %14) - unreachable - -continue__2: ; preds = %continue__1 - call void @__quantum__rt__qubit_release(%Qubit* %q) - call void @__quantum__rt__result_update_reference_count(%Result* %5, i64 -1) - call void @__quantum__rt__result_update_reference_count(%Result* %11, i64 -1) - call void @__quantum__rt__result_update_reference_count(%Result* %12, i64 -1) - call void @__quantum__rt__qubit_release_array(%Array* %qs) - call void @__quantum__rt__array_update_alias_count(%Array* %qs, i64 -1) - call void @__quantum__rt__array_update_reference_count(%Array* %qs, i64 -1) - ret void -} - -define i64 @Microsoft__Quantum__Testing__QIR__Math__SqrtTest__body() { -entry: - %0 = call double @__quantum__qis__sqrt__body(double 4.000000e+00) - %1 = fcmp one double 2.000000e+00, %0 - br i1 %1, label %then0__1, label %continue__1 - -then0__1: ; preds = %entry - ret i64 1 - -continue__1: ; preds = %entry - %2 = call double @__quantum__qis__sqrt__body(double 9.000000e+00) - %3 = fcmp one double 3.000000e+00, %2 - br i1 %3, label %then0__2, label %continue__2 - -then0__2: ; preds = %continue__1 - ret i64 2 - -continue__2: ; preds = %continue__1 - %4 = call double @__quantum__qis__sqrt__body(double 1.000000e+02) - %5 = fcmp one double 1.000000e+01, %4 - br i1 %5, label %then0__3, label %continue__3 - -then0__3: ; preds = %continue__2 - ret i64 3 - -continue__3: ; preds = %continue__2 - %d__4 = call double @__quantum__qis__sqrt__body(double -5.000000e+00) - %6 = call i1 @__quantum__qis__isnan__body(double %d__4) - %7 = xor i1 %6, true - br i1 %7, label %then0__4, label %continue__4 - -then0__4: ; preds = %continue__3 - ret i64 4 - -continue__4: ; preds = %continue__3 - %d__5 = call double @__quantum__qis__nan__body() - %d__6 = call double @__quantum__qis__sqrt__body(double %d__5) - %8 = call i1 @__quantum__qis__isnan__body(double %d__6) - %9 = xor i1 %8, true - br i1 %9, label %then0__5, label %continue__5 - -then0__5: ; preds = %continue__4 - ret i64 5 - -continue__5: ; preds = %continue__4 - %d__7 = call double @__quantum__qis__infinity__body() - %d__8 = call double @__quantum__qis__sqrt__body(double %d__7) - %10 = call i1 @__quantum__qis__isinf__body(double %d__8) - %11 = xor i1 %10, true - br i1 %11, label %then0__6, label %continue__6 - -then0__6: ; preds = %continue__5 - ret i64 6 - -continue__6: ; preds = %continue__5 - ret i64 0 -} - -define i64 @Microsoft__Quantum__Testing__QIR__Math__LogTest__body() { -entry: - %input = call double @Microsoft__Quantum__Math__E__body() - %0 = call double @__quantum__qis__log__body(double %input) - %1 = fcmp one double 1.000000e+00, %0 - br i1 %1, label %then0__1, label %continue__1 - -then0__1: ; preds = %entry - ret i64 1 - -continue__1: ; preds = %entry - %2 = call double @Microsoft__Quantum__Math__E__body() - %3 = call double @Microsoft__Quantum__Math__E__body() - %input__1 = fmul double %2, %3 - %4 = call double @__quantum__qis__log__body(double %input__1) - %5 = fcmp one double 2.000000e+00, %4 - br i1 %5, label %then0__2, label %continue__2 - -then0__2: ; preds = %continue__1 - ret i64 2 - -continue__2: ; preds = %continue__1 - %d = call double @__quantum__qis__log__body(double 0.000000e+00) - %6 = call i1 @__quantum__qis__isnegativeinfinity__body(double %d) - %7 = xor i1 %6, true - br i1 %7, label %then0__3, label %continue__3 - -then0__3: ; preds = %continue__2 - ret i64 3 - -continue__3: ; preds = %continue__2 - %d__1 = call double @__quantum__qis__log__body(double -5.000000e+00) - %8 = call i1 @__quantum__qis__isnan__body(double %d__1) - %9 = xor i1 %8, true - br i1 %9, label %then0__4, label %continue__4 - -then0__4: ; preds = %continue__3 - ret i64 4 - -continue__4: ; preds = %continue__3 - %input__4 = call double @__quantum__qis__nan__body() - %d__2 = call double @__quantum__qis__log__body(double %input__4) - %10 = call i1 @__quantum__qis__isnan__body(double %d__2) - %11 = xor i1 %10, true - br i1 %11, label %then0__5, label %continue__5 - -then0__5: ; preds = %continue__4 - ret i64 5 - -continue__5: ; preds = %continue__4 - %input__5 = call double @__quantum__qis__infinity__body() - %d__3 = call double @__quantum__qis__log__body(double %input__5) - %12 = call i1 @__quantum__qis__isinf__body(double %d__3) - %13 = xor i1 %12, true - br i1 %13, label %then0__6, label %continue__6 - -then0__6: ; preds = %continue__5 - ret i64 6 - -continue__6: ; preds = %continue__5 - ret i64 0 -} - -define i64 @Microsoft__Quantum__Testing__QIR__Math__ArcTan2Test__body() { -entry: - %0 = call double @__quantum__qis__arctan2__body(double 0.000000e+00, double 1.000000e+00) - %1 = fcmp one double 0.000000e+00, %0 - br i1 %1, label %then0__1, label %continue__1 - -then0__1: ; preds = %entry - ret i64 1 - -continue__1: ; preds = %entry - %2 = call double @Microsoft__Quantum__Math__PI__body() - %3 = call double @__quantum__qis__arctan2__body(double 0.000000e+00, double -1.000000e+00) - %4 = fcmp one double %2, %3 - br i1 %4, label %then0__2, label %continue__2 - -then0__2: ; preds = %continue__1 - ret i64 2 - -continue__2: ; preds = %continue__1 - %5 = call double @Microsoft__Quantum__Math__PI__body() - %6 = fdiv double %5, 2.000000e+00 - %7 = call double @__quantum__qis__arctan2__body(double 1.000000e+00, double 0.000000e+00) - %8 = fcmp one double %6, %7 - br i1 %8, label %then0__3, label %continue__3 - -then0__3: ; preds = %continue__2 - ret i64 3 - -continue__3: ; preds = %continue__2 - %9 = call double @Microsoft__Quantum__Math__PI__body() - %10 = fneg double %9 - %11 = fdiv double %10, 2.000000e+00 - %12 = call double @__quantum__qis__arctan2__body(double -1.000000e+00, double 0.000000e+00) - %13 = fcmp one double %11, %12 - br i1 %13, label %then0__4, label %continue__4 - -then0__4: ; preds = %continue__3 - ret i64 4 - -continue__4: ; preds = %continue__3 - %14 = call double @Microsoft__Quantum__Math__PI__body() - %15 = fdiv double %14, 4.000000e+00 - %16 = call double @__quantum__qis__arctan2__body(double 1.000000e+00, double 1.000000e+00) - %17 = fcmp one double %15, %16 - br i1 %17, label %then0__5, label %continue__5 - -then0__5: ; preds = %continue__4 - ret i64 5 - -continue__5: ; preds = %continue__4 - %18 = call double @Microsoft__Quantum__Math__PI__body() - %19 = fmul double %18, 3.000000e+00 - %20 = fdiv double %19, 4.000000e+00 - %21 = call double @__quantum__qis__arctan2__body(double 1.000000e+00, double -1.000000e+00) - %22 = fcmp one double %20, %21 - br i1 %22, label %then0__6, label %continue__6 - -then0__6: ; preds = %continue__5 - ret i64 6 - -continue__6: ; preds = %continue__5 - %23 = call double @Microsoft__Quantum__Math__PI__body() - %24 = fneg double %23 - %25 = fmul double %24, 3.000000e+00 - %26 = fdiv double %25, 4.000000e+00 - %27 = call double @__quantum__qis__arctan2__body(double -1.000000e+00, double -1.000000e+00) - %28 = fcmp one double %26, %27 - br i1 %28, label %then0__7, label %continue__7 - -then0__7: ; preds = %continue__6 - ret i64 7 - -continue__7: ; preds = %continue__6 - %29 = call double @Microsoft__Quantum__Math__PI__body() - %30 = fneg double %29 - %31 = fdiv double %30, 4.000000e+00 - %32 = call double @__quantum__qis__arctan2__body(double -1.000000e+00, double 1.000000e+00) - %33 = fcmp one double %31, %32 - br i1 %33, label %then0__8, label %continue__8 - -then0__8: ; preds = %continue__7 - ret i64 8 - -continue__8: ; preds = %continue__7 - %34 = call double @__quantum__qis__arctan2__body(double 0.000000e+00, double 0.000000e+00) - %35 = fcmp one double 0.000000e+00, %34 - br i1 %35, label %then0__9, label %continue__9 - -then0__9: ; preds = %continue__8 - ret i64 9 - -continue__9: ; preds = %continue__8 - %y__9 = call double @__quantum__qis__nan__body() - %d = call double @__quantum__qis__arctan2__body(double %y__9, double 0.000000e+00) - %36 = call i1 @__quantum__qis__isnan__body(double %d) - %37 = xor i1 %36, true - br i1 %37, label %then0__10, label %continue__10 - -then0__10: ; preds = %continue__9 - ret i64 11 - -continue__10: ; preds = %continue__9 - %x__10 = call double @__quantum__qis__nan__body() - %d__1 = call double @__quantum__qis__arctan2__body(double 0.000000e+00, double %x__10) - %38 = call i1 @__quantum__qis__isnan__body(double %d__1) - %39 = xor i1 %38, true - br i1 %39, label %then0__11, label %continue__11 - -then0__11: ; preds = %continue__10 - ret i64 12 - -continue__11: ; preds = %continue__10 - %y__11 = call double @__quantum__qis__nan__body() - %x__11 = call double @__quantum__qis__nan__body() - %d__2 = call double @__quantum__qis__arctan2__body(double %y__11, double %x__11) - %40 = call i1 @__quantum__qis__isnan__body(double %d__2) - %41 = xor i1 %40, true - br i1 %41, label %then0__12, label %continue__12 - -then0__12: ; preds = %continue__11 - ret i64 13 - -continue__12: ; preds = %continue__11 - ret i64 0 -} - -define i64 @Microsoft__Quantum__Testing__QIR__Str__PauliToStringTest__body() { -entry: - %0 = call %String* @__quantum__rt__string_create(i32 19, i8* getelementptr inbounds ([20 x i8], [20 x i8]* @9, i32 0, i32 0)) - %1 = call %String* @__quantum__rt__string_create(i32 13, i8* getelementptr inbounds ([14 x i8], [14 x i8]* @10, i32 0, i32 0)) - %2 = load i2, i2* @PauliI - %3 = call %String* @__quantum__rt__pauli_to_string(i2 %2) - %4 = call %String* @__quantum__rt__string_concatenate(%String* %1, %String* %3) - call void @__quantum__rt__string_update_reference_count(%String* %1, i64 -1) - call void @__quantum__rt__string_update_reference_count(%String* %3, i64 -1) - %5 = call i1 @__quantum__rt__string_equal(%String* %0, %String* %4) - %6 = xor i1 %5, true - br i1 %6, label %then0__1, label %continue__1 - -then0__1: ; preds = %entry - call void @__quantum__rt__string_update_reference_count(%String* %0, i64 -1) - call void @__quantum__rt__string_update_reference_count(%String* %4, i64 -1) - ret i64 1 - -continue__1: ; preds = %entry - %7 = call %String* @__quantum__rt__string_create(i32 6, i8* getelementptr inbounds ([7 x i8], [7 x i8]* @11, i32 0, i32 0)) - %8 = load i2, i2* @PauliX - %9 = call %String* @__quantum__rt__pauli_to_string(i2 %8) - %10 = call i1 @__quantum__rt__string_equal(%String* %7, %String* %9) - %11 = xor i1 %10, true - br i1 %11, label %then0__2, label %continue__2 - -then0__2: ; preds = %continue__1 - call void @__quantum__rt__string_update_reference_count(%String* %0, i64 -1) - call void @__quantum__rt__string_update_reference_count(%String* %4, i64 -1) - call void @__quantum__rt__string_update_reference_count(%String* %7, i64 -1) - call void @__quantum__rt__string_update_reference_count(%String* %9, i64 -1) - ret i64 2 - -continue__2: ; preds = %continue__1 - %12 = call %String* @__quantum__rt__string_create(i32 6, i8* getelementptr inbounds ([7 x i8], [7 x i8]* @12, i32 0, i32 0)) - %13 = load i2, i2* @PauliY - %14 = call %String* @__quantum__rt__pauli_to_string(i2 %13) - %15 = call i1 @__quantum__rt__string_equal(%String* %12, %String* %14) - %16 = xor i1 %15, true - br i1 %16, label %then0__3, label %continue__3 - -then0__3: ; preds = %continue__2 - call void @__quantum__rt__string_update_reference_count(%String* %0, i64 -1) - call void @__quantum__rt__string_update_reference_count(%String* %4, i64 -1) - call void @__quantum__rt__string_update_reference_count(%String* %7, i64 -1) - call void @__quantum__rt__string_update_reference_count(%String* %9, i64 -1) - call void @__quantum__rt__string_update_reference_count(%String* %12, i64 -1) - call void @__quantum__rt__string_update_reference_count(%String* %14, i64 -1) - ret i64 3 - -continue__3: ; preds = %continue__2 - %17 = call %String* @__quantum__rt__string_create(i32 6, i8* getelementptr inbounds ([7 x i8], [7 x i8]* @13, i32 0, i32 0)) - %18 = load i2, i2* @PauliZ - %19 = call %String* @__quantum__rt__pauli_to_string(i2 %18) - %20 = call i1 @__quantum__rt__string_equal(%String* %17, %String* %19) - %21 = xor i1 %20, true - br i1 %21, label %then0__4, label %continue__4 - -then0__4: ; preds = %continue__3 - call void @__quantum__rt__string_update_reference_count(%String* %0, i64 -1) - call void @__quantum__rt__string_update_reference_count(%String* %4, i64 -1) - call void @__quantum__rt__string_update_reference_count(%String* %7, i64 -1) - call void @__quantum__rt__string_update_reference_count(%String* %9, i64 -1) - call void @__quantum__rt__string_update_reference_count(%String* %12, i64 -1) - call void @__quantum__rt__string_update_reference_count(%String* %14, i64 -1) - call void @__quantum__rt__string_update_reference_count(%String* %17, i64 -1) - call void @__quantum__rt__string_update_reference_count(%String* %19, i64 -1) - ret i64 4 - -continue__4: ; preds = %continue__3 - call void @__quantum__rt__string_update_reference_count(%String* %0, i64 -1) - call void @__quantum__rt__string_update_reference_count(%String* %4, i64 -1) - call void @__quantum__rt__string_update_reference_count(%String* %7, i64 -1) - call void @__quantum__rt__string_update_reference_count(%String* %9, i64 -1) - call void @__quantum__rt__string_update_reference_count(%String* %12, i64 -1) - call void @__quantum__rt__string_update_reference_count(%String* %14, i64 -1) - call void @__quantum__rt__string_update_reference_count(%String* %17, i64 -1) - call void @__quantum__rt__string_update_reference_count(%String* %19, i64 -1) - ret i64 0 -} - -define i64 @Microsoft__Quantum__Testing__QIR__Math__TestDrawRandomInt__body(i64 %min, i64 %max) { -entry: - %0 = call i64 @__quantum__qis__drawrandomint__body(i64 %min, i64 %max) - ret i64 %0 -} - -define void @Microsoft__Quantum__Testing__QIR__Out__MessageTest__body(%String* %msg) { -entry: - call void @__quantum__qis__message__body(%String* %msg) - ret void -} - -declare void @__quantum__rt__string_update_reference_count(%String*, i64) - -define void @Microsoft__Quantum__Testing__QIR__Qop__body(%Qubit* %q, i64 %n) { -entry: - %0 = srem i64 %n, 2 - %1 = icmp eq i64 %0, 1 - br i1 %1, label %then0__1, label %continue__1 - -then0__1: ; preds = %entry - call void @__quantum__qis__k__body(%Qubit* %q) - br label %continue__1 - -continue__1: ; preds = %then0__1, %entry - ret void -} - -declare void @__quantum__qis__k__body(%Qubit*) - -define void @Microsoft__Quantum__Testing__QIR__Qop__adj(%Qubit* %q, i64 %n) { -entry: - call void @Microsoft__Quantum__Testing__QIR__Qop__body(%Qubit* %q, i64 %n) - ret void -} - -define void @Microsoft__Quantum__Testing__QIR__Qop__ctl(%Array* %ctrls, { %Qubit*, i64 }* %0) { -entry: - call void @__quantum__rt__array_update_alias_count(%Array* %ctrls, i64 1) - %1 = getelementptr inbounds { %Qubit*, i64 }, { %Qubit*, i64 }* %0, i32 0, i32 0 - %q = load %Qubit*, %Qubit** %1 - %2 = getelementptr inbounds { %Qubit*, i64 }, { %Qubit*, i64 }* %0, i32 0, i32 1 - %n = load i64, i64* %2 - %3 = srem i64 %n, 2 - %4 = icmp eq i64 %3, 1 - br i1 %4, label %then0__1, label %continue__1 - -then0__1: ; preds = %entry - call void @__quantum__rt__array_update_alias_count(%Array* %ctrls, i64 1) - call void @__quantum__qis__k__ctl(%Array* %ctrls, %Qubit* %q) - call void @__quantum__rt__array_update_alias_count(%Array* %ctrls, i64 -1) - br label %continue__1 - -continue__1: ; preds = %then0__1, %entry - call void @__quantum__rt__array_update_alias_count(%Array* %ctrls, i64 -1) - ret void -} - -declare void @__quantum__qis__k__ctl(%Array*, %Qubit*) - -define void @Microsoft__Quantum__Testing__QIR__Qop__ctladj(%Array* %__controlQubits__, { %Qubit*, i64 }* %0) { -entry: - call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i64 1) - %1 = getelementptr inbounds { %Qubit*, i64 }, { %Qubit*, i64 }* %0, i32 0, i32 0 - %q = load %Qubit*, %Qubit** %1 - %2 = getelementptr inbounds { %Qubit*, i64 }, { %Qubit*, i64 }* %0, i32 0, i32 1 - %n = load i64, i64* %2 - %3 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Qubit*, i64 }* getelementptr ({ %Qubit*, i64 }, { %Qubit*, i64 }* null, i32 1) to i64)) - %4 = bitcast %Tuple* %3 to { %Qubit*, i64 }* - %5 = getelementptr inbounds { %Qubit*, i64 }, { %Qubit*, i64 }* %4, i32 0, i32 0 - %6 = getelementptr inbounds { %Qubit*, i64 }, { %Qubit*, i64 }* %4, i32 0, i32 1 - store %Qubit* %q, %Qubit** %5 - store i64 %n, i64* %6 - call void @Microsoft__Quantum__Testing__QIR__Qop__ctl(%Array* %__controlQubits__, { %Qubit*, i64 }* %4) - call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i64 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i64 -1) - ret void -} - -define i64 @Microsoft__Quantum__Testing__QIR__Subtract__body(i64 %from, i64 %what) { -entry: - %0 = sub i64 %from, %what - ret i64 %0 -} - -declare void @__quantum__qis__x__body(%Qubit*) - -declare void @__quantum__rt__qubit_release_array(%Array*) - -define void @Microsoft__Quantum__Testing__QIR__Subtract__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { -entry: - %0 = bitcast %Tuple* %arg-tuple to { i64, i64 }* - %1 = getelementptr inbounds { i64, i64 }, { i64, i64 }* %0, i32 0, i32 0 - %2 = getelementptr inbounds { i64, i64 }, { i64, i64 }* %0, i32 0, i32 1 - %3 = load i64, i64* %1 - %4 = load i64, i64* %2 - %5 = call i64 @Microsoft__Quantum__Testing__QIR__Subtract__body(i64 %3, i64 %4) - %6 = bitcast %Tuple* %result-tuple to { i64 }* - %7 = getelementptr inbounds { i64 }, { i64 }* %6, i32 0, i32 0 - store i64 %5, i64* %7 - ret void -} - -define void @Lifted__PartialApplication__2__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { -entry: - %0 = bitcast %Tuple* %capture-tuple to { %Callable*, i64 }* - %1 = getelementptr inbounds { %Callable*, i64 }, { %Callable*, i64 }* %0, i32 0, i32 1 - %2 = load i64, i64* %1 - %3 = bitcast %Tuple* %arg-tuple to { i64 }* - %4 = getelementptr inbounds { i64 }, { i64 }* %3, i32 0, i32 0 - %5 = load i64, i64* %4 - %6 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i64* getelementptr (i64, i64* null, i32 1) to i64), i64 2)) - %7 = bitcast %Tuple* %6 to { i64, i64 }* - %8 = getelementptr inbounds { i64, i64 }, { i64, i64 }* %7, i32 0, i32 0 - %9 = getelementptr inbounds { i64, i64 }, { i64, i64 }* %7, i32 0, i32 1 - store i64 %2, i64* %8 - store i64 %5, i64* %9 - %10 = getelementptr inbounds { %Callable*, i64 }, { %Callable*, i64 }* %0, i32 0, i32 0 - %11 = load %Callable*, %Callable** %10 - call void @__quantum__rt__callable_invoke(%Callable* %11, %Tuple* %6, %Tuple* %result-tuple) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i64 -1) - ret void -} - -define void @MemoryManagement__2__RefCount(%Tuple* %capture-tuple, i64 %count-change) { -entry: - %0 = bitcast %Tuple* %capture-tuple to { %Callable*, i64 }* - %1 = getelementptr inbounds { %Callable*, i64 }, { %Callable*, i64 }* %0, i32 0, i32 0 - %2 = load %Callable*, %Callable** %1 - call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %2, i64 %count-change) - call void @__quantum__rt__callable_update_reference_count(%Callable* %2, i64 %count-change) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %capture-tuple, i64 %count-change) - ret void -} - -define void @MemoryManagement__2__AliasCount(%Tuple* %capture-tuple, i64 %count-change) { -entry: - %0 = bitcast %Tuple* %capture-tuple to { %Callable*, i64 }* - %1 = getelementptr inbounds { %Callable*, i64 }, { %Callable*, i64 }* %0, i32 0, i32 0 - %2 = load %Callable*, %Callable** %1 - call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %2, i64 %count-change) - call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i64 %count-change) - call void @__quantum__rt__tuple_update_alias_count(%Tuple* %capture-tuple, i64 %count-change) - ret void -} - -define void @Microsoft__Quantum__Intrinsic__Message__body(%String* %msg) { -entry: - call void @__quantum__qis__message__body(%String* %msg) - ret void -} - -declare void @__quantum__qis__message__body(%String*) - -define void @Microsoft__Quantum__Intrinsic__K__body(%Qubit* %q) { -entry: - call void @__quantum__qis__k__body(%Qubit* %q) - ret void -} - -define void @Microsoft__Quantum__Intrinsic__K__adj(%Qubit* %q) { -entry: - call void @__quantum__qis__k__body(%Qubit* %q) - ret void -} - -define void @Microsoft__Quantum__Intrinsic__K__ctl(%Array* %__controlQubits__, %Qubit* %q) { -entry: - call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i64 1) - call void @__quantum__qis__k__ctl(%Array* %__controlQubits__, %Qubit* %q) - call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i64 -1) - ret void -} - -define void @Microsoft__Quantum__Intrinsic__K__ctladj(%Array* %__controlQubits__, %Qubit* %q) { -entry: - call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i64 1) - call void @__quantum__qis__k__ctl(%Array* %__controlQubits__, %Qubit* %q) - call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i64 -1) - ret void -} - -define i1 @Microsoft__Quantum__Intrinsic__IsNegativeInfinity__body(double %d) { -entry: - %0 = call i1 @__quantum__qis__isnegativeinfinity__body(double %d) - ret i1 %0 -} - -declare i1 @__quantum__qis__isnegativeinfinity__body(double) - -define %Result* @Microsoft__Quantum__Intrinsic__Measure__body(%Array* %bases, %Array* %qubits) { -entry: - call void @__quantum__rt__array_update_alias_count(%Array* %bases, i64 1) - call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i64 1) - %0 = call %Result* @__quantum__qis__measure__body(%Array* %bases, %Array* %qubits) - call void @__quantum__rt__array_update_alias_count(%Array* %bases, i64 -1) - call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i64 -1) - ret %Result* %0 -} - -declare %Result* @__quantum__qis__measure__body(%Array*, %Array*) - -define i1 @Microsoft__Quantum__Intrinsic__IsNan__body(double %d) { -entry: - %0 = call i1 @__quantum__qis__isnan__body(double %d) - ret i1 %0 -} - -declare i1 @__quantum__qis__isnan__body(double) - -define double @Microsoft__Quantum__Intrinsic__NAN__body() { -entry: - %0 = call double @__quantum__qis__nan__body() - ret double %0 -} - -declare double @__quantum__qis__nan__body() - -define i1 @Microsoft__Quantum__Intrinsic__IsInf__body(double %d) { -entry: - %0 = call i1 @__quantum__qis__isinf__body(double %d) - ret i1 %0 -} - -declare i1 @__quantum__qis__isinf__body(double) - -define double @Microsoft__Quantum__Intrinsic__INFINITY__body() { -entry: - %0 = call double @__quantum__qis__infinity__body() - ret double %0 -} - -declare double @__quantum__qis__infinity__body() - -define void @Microsoft__Quantum__Intrinsic__X__body(%Qubit* %qubit) { -entry: - call void @__quantum__qis__x__body(%Qubit* %qubit) - ret void -} - -define void @Microsoft__Quantum__Intrinsic__X__adj(%Qubit* %qubit) { -entry: - call void @__quantum__qis__x__body(%Qubit* %qubit) - ret void -} - -define void @Microsoft__Quantum__Intrinsic__X__ctl(%Array* %__controlQubits__, %Qubit* %qubit) { -entry: - call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i64 1) - call void @__quantum__qis__x__ctl(%Array* %__controlQubits__, %Qubit* %qubit) - call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i64 -1) - ret void -} - -declare void @__quantum__qis__x__ctl(%Array*, %Qubit*) - -define void @Microsoft__Quantum__Intrinsic__X__ctladj(%Array* %__controlQubits__, %Qubit* %qubit) { -entry: - call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i64 1) - call void @__quantum__qis__x__ctl(%Array* %__controlQubits__, %Qubit* %qubit) - call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i64 -1) - ret void -} - -declare %String* @__quantum__rt__pauli_to_string(i2) - -declare %String* @__quantum__rt__string_concatenate(%String*, %String*) - -declare i1 @__quantum__rt__string_equal(%String*, %String*) - -declare i64 @__quantum__qis__drawrandomint__body(i64, i64) - -define double @Microsoft__Quantum__Math__E__body() { -entry: - ret double 0x4005BF0A8B145769 -} - -declare double @__quantum__qis__log__body(double) - -declare double @__quantum__qis__arctan2__body(double, double) - -define double @Microsoft__Quantum__Math__PI__body() { -entry: - ret double 0x400921FB54442D18 -} - -declare double @__quantum__qis__sqrt__body(double) - -define double @Microsoft__Quantum__Math__Sqrt__body(double %d) { -entry: - %0 = call double @__quantum__qis__sqrt__body(double %d) - ret double %0 -} - -define double @Microsoft__Quantum__Math__Log__body(double %input) { -entry: - %0 = call double @__quantum__qis__log__body(double %input) - ret double %0 -} - -define double @Microsoft__Quantum__Math__ArcTan2__body(double %y, double %x) { -entry: - %0 = call double @__quantum__qis__arctan2__body(double %y, double %x) - ret double %0 -} - -define i64 @Microsoft__Quantum__Random__DrawRandomInt__body(i64 %min, i64 %max) { -entry: - %0 = call i64 @__quantum__qis__drawrandomint__body(i64 %min, i64 %max) - ret i64 %0 -} - -define i64 @Microsoft__Quantum__Testing__QIR__Test_Arrays(i64 %array__count, i64* %array, i64 %index, i64 %val, i1 %compilerDecoy) #0 { -entry: - %0 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 %array__count) - %1 = icmp sgt i64 %array__count, 0 - br i1 %1, label %copy, label %next - -copy: ; preds = %entry - %2 = ptrtoint i64* %array to i64 - %3 = sub i64 %array__count, 1 - br label %header__1 - -next: ; preds = %exit__1, %entry - %4 = call i64 @Microsoft__Quantum__Testing__QIR__Test_Arrays__body(%Array* %0, i64 %index, i64 %val, i1 %compilerDecoy) - call void @__quantum__rt__array_update_reference_count(%Array* %0, i64 -1) - ret i64 %4 - -header__1: ; preds = %exiting__1, %copy - %5 = phi i64 [ 0, %copy ], [ %13, %exiting__1 ] - %6 = icmp sle i64 %5, %3 - br i1 %6, label %body__1, label %exit__1 - -body__1: ; preds = %header__1 - %7 = mul i64 %5, 8 - %8 = add i64 %2, %7 - %9 = inttoptr i64 %8 to i64* - %10 = load i64, i64* %9 - %11 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %0, i64 %5) - %12 = bitcast i8* %11 to i64* - store i64 %10, i64* %12 - br label %exiting__1 - -exiting__1: ; preds = %body__1 - %13 = add i64 %5, 1 - br label %header__1 - -exit__1: ; preds = %header__1 - br label %next -} - -declare void @__quantum__rt__tuple_update_alias_count(%Tuple*, i64) - -attributes #0 = { "EntryPoint" } diff --git a/src/QirRuntime/test/QIR-static/qir-test-ouput.cpp b/src/QirRuntime/test/QIR-static/qir-test-ouput.cpp index 8456be356b0..1f74986dfdf 100644 --- a/src/QirRuntime/test/QIR-static/qir-test-ouput.cpp +++ b/src/QirRuntime/test/QIR-static/qir-test-ouput.cpp @@ -5,7 +5,7 @@ #include "catch.hpp" -#include "qirTypes.hpp" +#include "QirTypes.hpp" #include "quantum__qis_internal.hpp" extern "C" void Microsoft__Quantum__Testing__QIR__Out__MessageTest__body(void*); // NOLINT diff --git a/src/QirRuntime/test/unittests/QirRuntimeTests.cpp b/src/QirRuntime/test/unittests/QirRuntimeTests.cpp index 03ea4d536a5..2b0037aa4b5 100644 --- a/src/QirRuntime/test/unittests/QirRuntimeTests.cpp +++ b/src/QirRuntime/test/unittests/QirRuntimeTests.cpp @@ -4,18 +4,18 @@ #include "catch.hpp" #include +#include // for memcpy #include -#include // for memcpy #include #include -#include "qirTypes.hpp" +#include "QirTypes.hpp" #include "quantum__qis.hpp" #include "quantum__rt.hpp" #include "BitStates.hpp" +#include "QirContext.hpp" #include "SimulatorStub.hpp" -#include "context.hpp" using namespace Microsoft::Quantum; From 46ea64d7e722d6efe76ebe134ad86626625a9936 Mon Sep 17 00:00:00 2001 From: Irina Yatsenko <36858951+irinayat-MS@users.noreply.github.com> Date: Fri, 19 Feb 2021 19:18:17 -0800 Subject: [PATCH 10/30] QIR-based tracer (#475) Initial implementation of the QIR-based tracer. Done basic layering, output and support for [global] barrier. --- src/QirRuntime/CMakeLists.txt | 3 +- src/QirRuntime/lib/CMakeLists.txt | 1 + src/QirRuntime/lib/Tracer/CMakeLists.txt | 20 + src/QirRuntime/lib/Tracer/README.md | 214 +++ .../lib/Tracer/layering_example.png | Bin 0 -> 26209 bytes src/QirRuntime/lib/Tracer/tracer-bridge.ll | 85 + src/QirRuntime/lib/Tracer/tracer-qis.cpp | 66 + src/QirRuntime/lib/Tracer/tracer.cpp | 309 ++++ src/QirRuntime/lib/Tracer/tracer.hpp | 206 +++ src/QirRuntime/public/CoreTypes.hpp | 3 - src/QirRuntime/public/TracerTypes.hpp | 17 + src/QirRuntime/test.py | 135 +- src/QirRuntime/test/CMakeLists.txt | 1 + src/QirRuntime/test/QIR-tracer/CMakeLists.txt | 28 + src/QirRuntime/test/QIR-tracer/generate.py | 41 + .../test/QIR-tracer/qir-tracer-driver.cpp | 47 + .../test/QIR-tracer/tracer-config.cpp | 16 + .../test/QIR-tracer/tracer-config.hpp | 20 + src/QirRuntime/test/QIR-tracer/tracer-core.qs | 30 + .../test/QIR-tracer/tracer-intrinsics.qs | 63 + .../test/QIR-tracer/tracer-measurements.qs | 27 + src/QirRuntime/test/QIR-tracer/tracer-qir.ll | 1462 +++++++++++++++++ .../test/QIR-tracer/tracer-target.qs | 251 +++ src/QirRuntime/test/unittests/CMakeLists.txt | 3 + src/QirRuntime/test/unittests/TracerTests.cpp | 305 ++++ 25 files changed, 3283 insertions(+), 70 deletions(-) create mode 100644 src/QirRuntime/lib/Tracer/CMakeLists.txt create mode 100644 src/QirRuntime/lib/Tracer/README.md create mode 100644 src/QirRuntime/lib/Tracer/layering_example.png create mode 100644 src/QirRuntime/lib/Tracer/tracer-bridge.ll create mode 100644 src/QirRuntime/lib/Tracer/tracer-qis.cpp create mode 100644 src/QirRuntime/lib/Tracer/tracer.cpp create mode 100644 src/QirRuntime/lib/Tracer/tracer.hpp create mode 100644 src/QirRuntime/public/TracerTypes.hpp create mode 100644 src/QirRuntime/test/QIR-tracer/CMakeLists.txt create mode 100644 src/QirRuntime/test/QIR-tracer/generate.py create mode 100644 src/QirRuntime/test/QIR-tracer/qir-tracer-driver.cpp create mode 100644 src/QirRuntime/test/QIR-tracer/tracer-config.cpp create mode 100644 src/QirRuntime/test/QIR-tracer/tracer-config.hpp create mode 100644 src/QirRuntime/test/QIR-tracer/tracer-core.qs create mode 100644 src/QirRuntime/test/QIR-tracer/tracer-intrinsics.qs create mode 100644 src/QirRuntime/test/QIR-tracer/tracer-measurements.qs create mode 100644 src/QirRuntime/test/QIR-tracer/tracer-qir.ll create mode 100644 src/QirRuntime/test/QIR-tracer/tracer-target.qs create mode 100644 src/QirRuntime/test/unittests/TracerTests.cpp diff --git a/src/QirRuntime/CMakeLists.txt b/src/QirRuntime/CMakeLists.txt index 28ea5e8c349..63221cd4d5b 100644 --- a/src/QirRuntime/CMakeLists.txt +++ b/src/QirRuntime/CMakeLists.txt @@ -114,12 +114,13 @@ endmacro(compile_from_qir) if (WIN32) set(QIR_BRIDGE_UTILITY_LIB "${PROJECT_BINARY_DIR}/lib/QIR/bridge-rt-u.lib") set(QIR_BRIDGE_QIS_UTILITY_LIB "${PROJECT_BINARY_DIR}/lib/QIR/bridge-qis-u.lib") + set(QIR_BRIDGE_TRACER_UTILITY_LIB "${PROJECT_BINARY_DIR}/lib/Tracer/tracer-bridge-u.lib") else() set(QIR_BRIDGE_UTILITY_LIB "${PROJECT_BINARY_DIR}/lib/QIR/libbridge-rt-u.a") set(QIR_BRIDGE_QIS_UTILITY_LIB "${PROJECT_BINARY_DIR}/lib/QIR/libbridge-qis-u.a") + set(QIR_BRIDGE_TRACER_UTILITY_LIB "${PROJECT_BINARY_DIR}/lib/Tracer/libtracer-bridge-u.a") endif() - add_subdirectory(lib) add_subdirectory(test) diff --git a/src/QirRuntime/lib/CMakeLists.txt b/src/QirRuntime/lib/CMakeLists.txt index 789d0ba7503..ad1361d4074 100644 --- a/src/QirRuntime/lib/CMakeLists.txt +++ b/src/QirRuntime/lib/CMakeLists.txt @@ -1,3 +1,4 @@ add_subdirectory(QIR) add_subdirectory(Simulators) +add_subdirectory(Tracer) add_subdirectory(qdk) diff --git a/src/QirRuntime/lib/Tracer/CMakeLists.txt b/src/QirRuntime/lib/Tracer/CMakeLists.txt new file mode 100644 index 00000000000..78b5adf145b --- /dev/null +++ b/src/QirRuntime/lib/Tracer/CMakeLists.txt @@ -0,0 +1,20 @@ +# build the utility lib for tracer's bridge +compile_from_qir(tracer-bridge tracer-bridge) + +# build the native part of the tracer +set(component_name "tracer") + +set(source_files + "tracer-qis.cpp" + "tracer.cpp" +) + +set(includes + "${public_includes}" + "${PROJECT_SOURCE_DIR}/lib/QIR" +) + +add_library(${component_name} STATIC ${source_files}) +target_include_directories(${component_name} PUBLIC ${includes}) + +add_dependencies(${component_name} tracer-bridge) diff --git a/src/QirRuntime/lib/Tracer/README.md b/src/QirRuntime/lib/Tracer/README.md new file mode 100644 index 00000000000..3ba42c9ca35 --- /dev/null +++ b/src/QirRuntime/lib/Tracer/README.md @@ -0,0 +1,214 @@ +# Resource Tracer Design Document # + +The purpose of the Resource Tracer is to provide efficient and flexible way to estimate resources of a quantum program + in QIR representation. The estimates are calculated by simulating execution of the program (as opposed to the static + analysis). Please see [Resource Estimator](https://docs.microsoft.com/en-us/azure/quantum/user-guide/machines/resources-estimator) + for more background on resource estimation for quantum programs. + +To run against the tracer, the quantum program should comply with the + [QIR specifications](https://github.com/microsoft/qsharp-language/tree/main/Specifications/QIR) as well as: + +1. convert _each_ used intrinsic operation into one of the Quantum Instruction Set (_qis_) operations supported by the + tracer (see the last section of this readme); +1. (_optional_) provide callbacks for handling of conditional branches on a measurement (if not provided, the estimates + would cover only one branch of the execution); +1. (_optional_) provide callbacks for start/end of quantum operations (if not provided, all operations will be treated + as inlined as if the whole program consisted of a single operation); +1. (_optional_) provide callbacks for global barriers; +1. (_optional_) provide description of mapping for frame tracking; +1. (_optional_) provide names of operations for output (in the form of `tracer-config.hpp|cpp` files). + +The Resource Tracer will consist of: + +1. the bridge for the `__quantum__qis__*` methods listed below; +2. the native implementation to back the `__quantum__qis__*` methods; +3. the logic for partitioning gates into layers; +4. the logic for frame tracking; +5. output of the collected statistics; +6. (_lower priority_) the scheduling component to optimize depth and/or width of the circuit. + +## Layering ## + +One of the goals of the tracer is to compute which of the quantum operations can be executed in parallel. Further in + this section we provide the defintions of used concepts and the description of how we group the operations into + _layers_, however, we hope that the following example of layering is intuitively clear. + +### Example of layering ### + +The diagram below shows an example of how a sequential program, represented by the left circuit, could be layered. The gates in light gray are of duration zero, the preferrred layer duration is 1, and the barrier, + represented by a vertical squiggle, is set to have duration 0. + +![layering example](layering_example.png?raw=true "Layering example diagram") + +Notice, that gate 9 is dropped because it cannot cross the barrier to be added into L(2,1). + +### Definitions ### + +Each quantum operation in a program can be assigned an integer value, which we'll call its ___start time___. Some + operations might have non-zero duration, so they will also have ___end time___. For each qubit, there are also times + when the qubit is allocated and released. Start time of a gate cannot be less than allocation time of any of the qubits + the gate is using. If two gates or measurements use the same qubit, one of the gates must have start time greater than + or equal to the end time of the other. We'll call a particular assignment of times across a program its ___time function___. + +A sequentially executed quantum program can be assigned a trivial time function, when all quantum operations have + duration of 1 and unique start times, ordered to match the flow of the program. Layering compresses the timeline by + assuming that some operations might be executed simultaneously while allowing for different operations to have various + durations. + +Provided a valid _time_ function for the program a ___layer of duration N at time T, denoted as L(T,N),___ + is a subset of operations in the program such that all of these operations have start time greater or equal _T_ and + finish time less than _T + N_. The program is ___layered___ if all gates in it are partitioned into layers, that don't + overlap in time. The union of all qubits that are involved in operations of a given layer, will be denoted _Qubits(T,N)_. + +A sequential program can be trivially layered such that each layer contains exactly one operation. Notice, that the + definition of layer doesn't require the gates to be executed _in parallel_. For example, all gates in a fully sequential + program can be also placed into a single layer L(0, infinity). Some gates might be considered to be very cheap and take + zero time to execute, those gates can be added to a layer even if they act on the same qubit another gate in this layer + is acting on and have to be executed sequentially within the layer. + +### The Resource Tracer's Layering Algorithm ### + +As the tracer is executing a sequential quantum program, it will compute a time function and corresponding layering + using the _conceptual_ algorithm, described below (aka "tetris algorithm"). The actual implementation of layering might + be done differently, as long as the resulting layering is the same as if running the conceptual algorithm. + +A ___barrier___ is a layer that acts as if it was containing all currently allocated qubits and no operation can be added + into it. + +A user can inject _barriers_ by calling `__quantum__qis__global_barrier` function. The user can choose duration of + a barrier which would affect start time of the following layers but no operations will be added to a barrier, + independent of its duration. + +__Conditional execution on measurement results__: The Tracer will execute LLVM IR's branching structures "as is", + depending on the values of the corresponding variables at runtime. To enable estimation of branches that depend on a + measurement result, the source Q# program will have to be authored in such a way that the Q# compiler will translate the + conditionals into corresponding callbacks to the tracer. The tracer will add operations from _both branches_ into the + layers it creates to compute the upper bound estimate. + +The following operations are _not_ supported inside conditional callbacks and would cause a runtime failure: + +- nested conditional callbacks; +- measurements; +- opening and closing operations of tracked frames (if tracking is set up). + +__Caching__ (lower priority): It might be a huge perf win if the Resource Tracer could cache statistics for repeated + computations. The Tracer will have an option to cache layering results per quantum module if the boundaries of modules + are treated as barriers. + +#### The conceptual algorithm #### + +Note: The tracer assumes that the preferred layer duration is _P_. + +1. The first encountered operation of duration _N_, where either _N > 0_ or the operation involves multiple qubits, is + added into layer _L(0, max(P,N))_. The value of _conditional fence_ variable on the tracer is set to 0. +1. When conditional callback is encountered, the layer _L(t,N)_ of the measurement that produced the result used in the + conditional callback, is looked up and the _conditional fence_ is set to _t + N_. At the end of the conditional callback + _conditional fence_ is reset to 0. (Effectively, no operations, conditioned on the result of a measurement, can happen + before or in the same layer as the measurement, even if they don't involve the measured qubits.) +1. Suppose, there are already layers _L(0,N0), ... , L(k,Nk)_ and the operation being executed is a single-qubit _op_ of + duration __0__ (controlled and multi-qubit operations of duration 0 are treated the same as non-zero operations). + + - Scan from [boundaries included] _L(k,Nk)_ to _L(conditional fence,Nf)_ until find a layer _L(t,Nt)_ + such that _Qubits(t,Nt)_ contains the qubit of _op_. + - Add _op_ into this layer. + - If no such layer is found, add _op_ to the list of pending operations on the qubit. + - At the end of the program still pending operations will be ignored. + +1. Suppose, there are already layers _L(0,N0), ... , L(k,Nk)_ and the operation being executed is _op_ of duration _N > 0_ + or it involves more than one qubit. + + - Scan from [boundaries included] _L(k,Nk)_ to _L(conditional fence,Nf)_ until find a layer _L(w,Nw)_ + such that _Qubits(w,Nw)_ contain some of _op_'s qubits. + - If _L(w,Nw)_ is found and _op_ can be added into it without increasing the layer's duration, add _op_ into + _L(w,Nw)_, otherwise set _L(w,Nw) = L(conditional fence,Nf)_. + - If _op_ hasn't been added to a layer, scan from [boundaries included] _L(w,Nw)_ to _L(k,Nk)_ until find + a layer _L(t,Nt)_ such that _N <= Nt_ (notice, that this layer cannot contain any qubits from _op_). + - If _L(t,Nt)_ is found, add _op_ into this layer. + - If _op_ hasn't been added to a layer, add _op_ into a new layer _L(k+Nk, max(P, N))_. + - Add the pending operations of all _op_'s qubits into the same layer and clear the pending lists of these qubits. + +## Special handling of SWAP ## + +The tracer will provide a way to handle SWAP as, effectively, renaming of the involved qubits. The users will have the + choice of using the special handling versus treating the gate as a standard counted intrinsic. + +## Frame tracking ## + +A user might want to count differently operations that are applied in a different state. For example, if Hadamard gate + is applied to a qubit and then Rz gate, a user might want to count it as if Rz were executed instead. + The frame is closed when the state of the qubit is reset (in Hadamard's case, another Hadamard operator is applied to + the qubit). The user will be able to register the required frame tracking with the tracer via a C++ registration + callback. + +The descriptor of the frame will contain the following information and will be provided to the Tracer when initializing + it in C++. + +- openingOp: the operation id that opens the frame on the qubits this operation is applied to +- closingOp: the operation id that closes the frame on the qubits this operation is applied to +- vector of: { bitmask_ctls, bitmask_targets, operationIdOriginal, operationIdMapped } + +The closing operation will be ignored if the frame on the qubit hasn't been open. The bitmasks define which of the qubits + should be in an open frame to trigger the mapping. For non-controlled operations the first mask will be ignored. To + begin with, the tracer will support frame mapping for up to 8 control/target qubits. + +__TBD__: C++ definitions of the structure above + the interface to register frame tracking with the Tracer. + +## Output format ## + +The tracer will have options to output the estimates into command line or into a file, specified by the user. In both + cases the output will be in the same format: + +- column separator is configurable (the regex expressions below use comma as separator) +- the first column specifies the time _t_ of a layer _L(t, n)_ or of a barrier +- the second column contains the optional name of the layer or the barrier +- the remaining columns contain counts per operation in the layer (all zeros in case of a barrier) + +- The first row is a header row: `layer_id,name(,[0-9a-zA-Z]+)*`. The fragment `(,[0-9a-zA-Z]+)*` lists operation + names or their ids if the names weren't provided by the user. +- The following rows contain statistics per layer: `[0-9]+,[a-zA-Z]*(,([0-9]*))*`. +- The rows are sorted in order of increasing layer time. +- Zero counts for the statistics _can_ be replaced with empty string. + +The map of operation ids to names can be passed to the tracer's constructor as `std::unordered_map`. + The mapping can be partial, ids will be used in the ouput for unnamed operations. + +Example of valid output: + +```csv +layer_id,name,Y,Z,5 +0,,0,1,0 +1,,0,0,1 +2,b,0,0,0 +4,,0,1,0 +8,,1,0,0 +``` + +## Depth vs width optimizations ## + +TBD but lower priority. + +## List of `__quantum__qis__*` methods, supported by the Tracer ## + +| Signature | Description | +| :---------------------------------------------------- | :----------------------------------------------------------- | +| `void __quantum__qis__inject_barrier(i32 %id, i32 %duration)` | Function to insert a barrier. The first argument is the id of the barrier that can be used to map it to a user-friendly name in the output and the second argument specifies the duration of the barrier. See [Layering](#layering) section for details. | +| `void __quantum__qis__on_module_start(i64 %id)` | Function to identify the start of a quantum module. The argument is a unique _id_ of the module. The tracer will have an option to treat module boundaries as barriers between layers and (_lower priority_) option to cache estimates for a module, executed multiple times. For example, a call to the function might be inserted into QIR, generated by the Q# compiler, immediately before the body code of a Q# `operation`. | +| `void __quantum__qis__on_module_end(i64 %id)` | Function to identify the end of a quantum module. The argument is a unique _id_ of the module and must match the _id_ supplied on start of the module. For example, a call to the function might be inserted into QIR, generated by the Q# compiler, immediately after the body code of a Q# `operation`. | +| `void __quantum__qis__single_qubit_op(i32 %id, i32 %duration, %Qubit* %q)` | Function for counting operations that involve a single qubit. The first argument is the id of the operation. Multiple intrinsics can be assigned the same id, in which case they will be counted together. The second argument is duration to be assigned to the particular invocation of the operation. | +| `void __quantum__qis__multi_qubit_op(i32 %id, i32 %duration, %Array* %qs)` | Function for counting operations that involve multiple qubits.| +| `void __quantum__qis__single_qubit_op__ctl(i32 %id, i32 %duration, %Array* %ctls, %Qubit* %q)` | Function for counting controlled operations with single target qubit and `%ctls` array of controls. | +| `void __quantum__qis__multi_qubit_op__ctl(i32 %id, i32 %duration, %Array* %ctls, %Array* %qs)` | Function for counting controlled operations with multiple target qubits and `%ctls` array of controls. | +| `%Result* @__quantum__qis__single_qubit_measure(i32 %id, i32 %duration, %Qubit* %q)` | Function for counting measurements of a single qubit. The user can assign different operation ids for different measurement bases. | +| `%Result* @__quantum__qis__joint_measure(i32 %id, i32 %duration, %Array* %qs)` | Function for counting joint-measurements of qubits. The user can assign different operation ids for different measurement bases. | +| `void __quantum__qis__swap(%Qubit* %q1, %Qubit* %q2)` | See [Special handling of SWAP](#special-handling-of-swap) for details. | +| TODO: handling of conditionals on measurement results | | + +_Note on operation ids_: The user is responsible for using operation ids in a consistent manner. Operations with the + same id will be counted by the tracer as the _same_ operation, even accross invocations with different number of target + qubits or when different functors are applied. + +_Note on mapping Q# intrinsics to the methods above_: Q# compiler will support Tracer as a special target and will let + the user to either choose some default mapping or specify their custom mapping. For example, see QIR-tracer tests in + this project (`tracer-target.qs` specifies the mapping). + +The Resource Tracer will reuse qir-rt library while implementing the qis methods specified above. diff --git a/src/QirRuntime/lib/Tracer/layering_example.png b/src/QirRuntime/lib/Tracer/layering_example.png new file mode 100644 index 0000000000000000000000000000000000000000..5713843d8598169f80720dffc2b9fb714ab61527 GIT binary patch literal 26209 zcmbrmcU03^*EWoUfQo>M(n3)YP^nS^p{gjL2&nW9Mx=%kq&F)?2#5%XH0ja_C3GSJ zp@V?506_?$cci!P1ZU>HpXXicTkBo#A7jQ$@;m43v&*%wePx0ksw>l-Vmn1aK|yy< z<<4UY3aXP76vxy~`~}_-VDoJSFUOo8E8nKb>tLS&e>rY>OYIf~1v-p&&zuVU{iLIc zzB2^{M*#VsV_~Ef3knL0jeB=)X}Oy$jJTOGT2Q`NNu|AtxCR*vwEOtz%zMF$ii3fo z=g%H@IyTOEB2eJb!!sWPjOVPv;T? zN$3)6vAc59T8O!a1FTFzu?In%CciGb72hDgejDw=kzXnNlp&Pl*FBE^#}Cl!=h<%; zt2UmMnMiFk-t{nxWuD{=jfg0ys0=?xD$Zc_=Jpu0CCJ%EK&iocqL$@V8}ZVc5*(HM zIvvY@MYU$r^a}JZw(hSW{jqTFnHE~z48uO&qg|PX7Z8gwhDw%xZa3$RRJi$Fzq@H~ zTq!$Pt{LFEFly*&OwdlMPthD=%Kxz$9n1u)6H*-f>}!Et|hrA}GH zION`Y=Xq6GXtht~uBnM{^Vlm7?oPWaX>;1BE!7O+;g5cpRyxSN!;(xZ7hXb{`)X6T zrY_NCFWRC++87J9hJ&qdood})Q1F$U8e)x5s_ss;Dj2-->GdhO-}Ae0KD9&%uSGXn zUzxnD5PLh%e5J)z;oJ(|46#7D-*p9+Y?1uN)eR9EF4l(F$1$^E)#BD`7J{!gQ&or3 z>FkVi9rdi9)(pd5wY`MVD*VXlZ)%`{PjGY>#l`fLTa7_A!(dz!_M}Nvs%Q0eMlW|l zE5l?v!F|s<05U{^+*>M%X_zfG!iypv?i!F;B&0eYqR^-{@fc zD|-%#kSW^g(vhu&*%p4+&Qy8})jmJ-?uuzhpOasA$!k?5*Xyx8cA`@Pn-jW~3)uLm z=TEx1%a>-vcXL_^D$aua4p$I!5{&m&B=Zs!(3bn(Pdd0}>WT{7d}HOfuaR7&w{1V9 z;+dn?n7uZ+cyb}DZ*hC3h4I>yK_kcEOs_%tUu`cvxIAqeJhTt*sAcz*{PCt2OZSGi zO(M-~uO8PuG|NwK72~<<*Lz)~qauX??-zF6=`aJuO?jJZ&rBKL`P4v2a7J7DYT1NX zu+UYG8pAw+Io%|!*yi1CW9zFkKZp5(sXjlL{PkgEDiOVFg@#Z^(UzBX7Csmq*su{I zsHo@Y$_Oy?O0a{7VOS_fe#ch3gLZZ4e!YTF+T7wB zjwN|BAHs{j5E+TZhjmW^CVOXEO74*Z@7tyn$Ekk58EsjqQO31-ESEJ*AzSg_#fxE% znreZmJ-(SAf%w|VUr5S+{>m)-@w`hC@7xk>h0_Exc;(L7%V|Iho+gr9M}yR*r3zg! zc8?TYy;?~zy^ae?NSo?sq;B2zB7J6~dyq&7|`Oo~z37iUehKy83A{$90wn*Y&m@pK2 z(s1153PoeoBcMpyNc6>Q_U3(Uf}7Q$*GLc?+s+wD$W{plPuQtB`4)BD}i znP5-59eL49=EUdn%H}-+v8S#A*}t_SfuG5Y~exMdc~i4%+la~+w>$SH?v5^Ghp0+^=$Y%i6p5G* zMNLdNuoddXHw!rR!$rMqHM@f}W&;AbXq+8E{MuAee`@KS$@;Gi=~}uqSIueg(Mw_JoHPn~Ys{5k$0FYUr+aBlW(Hk5*e+m*xu^P8-5Co5|o)lIPH zo^z|x9Ng&?q0#l^?^z(ai|{*FWxD)x#R|#=^>Q1^Y1}l@7i`@&W4+IhRlhrH2yP zCu6B2OE69kXLKjnQRC3l4}8D8Xkv;;5zBv3##J#zcjD@>Z6=2Hl1B)?wRFT2;Y3*R_HLN8?ukyFGJ7m*W&G}R{%r5e=YQ>f_Ed9p4AjyVw*D?I zoKP|oW3)b7;Jqe0c zd%PFfLa2>|G#_i2>pixPm|sOrJb+{y#OwNU4t((}$`qnyPH;^Nn~gCgab$~}ZT(KmZJVc>rF=AuW|MTbi7=O~;xwvB&K>gQ6C^VKw7=)blQxQ)>)(e=}C$$c{P zwbC8i8-uCD~;|fD1Bhm(YH?x5zpywv$lfkI$Vjg)jSwfs+)b#aBlzJ{)8ozM%QDBAwVPk z2k6Ja!*~~qCC92tJs9& zA(ykzsLJ*J^tggY1=7UMr9JD(-&}5kyXolzONk-HHBNS-hS>fG9a@U(v(MZAiT{uO zuSk2kg0yl6BWf_~3)soGA6`!Wa4oWLVr8w*_f5KEw%Qo9fbDd(Fvy<2eBqU?xBJbG zN7H!!O&jLSopB9T`BRX!p6n3?h4+J4Zl|U9v$Zi;)Iy0-Mfl^MGO(>t+~3AmF z>^pO}>aZ|N>G>Mz49!J~2v{RrEUyKY**vAQo31llb22O{^M+YweAsthJ0G#Eh2Iab zrhL9G*zuQ>M)6Y_dCHWa=2ocg{^PxwMzP3l-t1mhY_YDn;ofJE_ga@@#vw&`9zSAf zGkf2Lt(U?%{r?fR7DR60%)%0pxpZPP*hiRpl zG@tRgN@j1_r}f3_8#QyhGe2J?JX$v~wcb=s66hjw){Ne=GEL~a9st`qaC3cXa>t~( z0n&Vb-_xZy@*t%KWn2@km8S+1v_MNUZqf%8%41+6opr>G48P?!2nX##6R(b^^bXs- zgW_|FgLp0Me-cYV{epuo=I!m0TCe%AM2GqPLLc%Q+(=MQE*(RI!f=#-&r+BAaKd0#Z{rvreNv8{WY9M6p{v9%% z1y|}D2qER(o`;2Qb9b*K9%p;_l~}A8?NEPTGT#*D^>p{0gEDrx$;Z4o<0W;ZuZ2&= zitp;K*)ghwgBw1Z=~3>E-?=>~!67iv^?RvNJ&Oy`YK>)M`MX7F-wX!DgDJ($5p zO4xU9DW5vdE3BGcfBPV6a0nOGlc*3k!j2!$UTYyB*Q6v7R|MxI)^e}tog+598ZC3Z z%e3lC;y9O<|} zC)+&5V@NHSAY*3EH<)*B|j^ z8O6>Vu0NmO>u5Hmp9_Q;_6Uo%znF8Dxl27^B`a?*2>HZ0m5xe9R6oN+zIh|)Ku8o5 z7q~3ymQc05O2YD+)>L^Ql_yUz&S(9XFTNo{{IzYB3_*m}c zG_>jpn@U+MGe%zis}1%}!q=rXJfy$OwE%h{x1)_<yzb&EZH6Hb-LP zCl1EFtI(UIT{MZ1-t>g;jgLeZZplPv6x%AOwNeGo2kY-WPx@An&ZsfF@1NSQSrab) z!;PQSoco$lyiXc(1-KPTMB4X7x2v{%|Ay2&l~P3oxD zV6XGELVdQ)E-*#Dph^X+-&+ELrjVx^kAJ5sBS|@O-eGkSSf8kD=!Y7^W)Vp9nS2l3 zJT*KE_;8+v!Pc6>4ga_e1))Ke9eW;b z#FZt#_R8mfR(@d|M1zBd2<&am7n=^wzKI=@w;HzlEg#>ZuQni7X#W(Q!GzymG{GkA zDs=;MAh|)Mlps@Ko@8T!$b^g6tPJR4v$G!JiU07JDUKOb_!TUvL)#MQ?aFZ)2D9B~5uiAUNFwS3>P2PK}Ek=xV zJZ`O1;qYgsCf}t?x6~o5M|{RKe7ru0H)5uN+4N)2viwv>JS{D4_LTldl;*j(!byMg)g!7Sev{Mf9Cx8=fA!kgE5;-##knt9a3@M9C0HFSh>s(px?hg zgBIn!d|Ak(>gp65)1S4>etmn1nLhVf(m`nId+gr!RN}LuK@6(UveS2U;+_Yk_{@)_ z*9ePKK0Cj|o#}#)*qJy9+dhGq6K>Uq2fM3K_W;cd^%1Y7YUJ9LY@J+uB){%N`$Oyc zz_Um>f`moOd5_o|M|9E)&1W5PtY^+#+`Sm-M(|K3P$n>Oa2RK4T`M<#oM})pdw8&^ z;K6rwV=REHMdH)DLAwN5kHV^eV}HJ_6u`jcJ08UMly#uz>!TDwLrjxb9pe)?Tp0y; z6J4S5d!kphzWzwP7hykA!E^W*J01;JE_&8JQBjoPv%9goxfHz#J%1D?52uF9or9SK zXS-6A&CI@>e)aUyrAx1A7&;>O!s2S?(2bn2a$XxN#75F+z})`s5*7lrW0ChRQ!y=P zU7Q_2!y|arChCpT?nko8xX$S|CN6M$s3Jpf<1Rq<{p^*-YSY>hfQmO*}}E{ic3C%fD(h(;p00y5+mOmY=Hf zzdEplJ@g$YFdy&j#`^h{4+~dcF-aRQ!KbUmhG0z#nj`u9fn^%F1G#YQFl1A?^#y*N z2j@$+2G1(oT6J2g5mne*u4@QpHZ60k#jtQ5F^%6ueD~M;<$ZRv@{9%-r@Z7;>dDIp zMXz?mD+bV)*bP1=OiJ93783lhRJl~MW|j~y5QK~Uf zUrE$-Y)!i!5%(jn3mO!w2&GlV!JN|_Mr&fKODps8QicgAj#Hh9 ze!ClpZO6sCNBmIs{{H4@d#q$M+;ZtXaGMI3xta~Ut-|4+bJS98o+;MkjXwEcNzK?L zj^yNI*Tvy$(cO`ELaMhXV`t<)%u)mYTIfn?j5hU+Aw_=GNR;0PDZp)}yCLSuPu|Oy zpR|3tfrJ@306Qcrkesv6GBIr|*D+vs7b|W1a>BzRPd>u5MhP+wnqqzT+tiC)b8LFE zhDsfx8K#tuny@Ne+)`@E$rC5sf4!vH*;tTyQXl9<#Hl^6z8AsU4s1lI@)*J(dR*1u zkGi}z7vmND4uHD}zEYFV<418R`2PI*Xn%JzUprezMJ3S4E%T0&(&7MGQ7F+iP=u-d zTB2}n^4(zhgDqm6KOIEIHOJ`5l=n&lGdMLlhF3|g!lpF>(Kpl9raFc73nzvgu;6yH z+;49GQ2!)i{=JdQZw~}&F=7aF^W|TwT#0?(UQ&{JD!boZ;GaM6zkNPudH{qE;`ek{ zqR&Q*(P9uIpGJa!N%r`KO0P|Ogr!Vy;`x=o7!@SOEtO?Idk+UOHWUTwrf=!?XKFSs z#%`QFdp1_mAu4QK=#M>q0tXfIO z3OS+D7|KDCR%1GQ)@gp=hDTA}o#mz}6`ijSlV*A{yf!>lO>Q3_+gc&^_{^>TNKz!V zdxASNtPol3&5kbJ_q<(oqGz9%M?heHd8}^rS%1nA!-FFD?jm3})j<}J{CrzVXY@AOrABoyKs$}ZkAA8$?VIY(?th? zwqZPJFI*Rg<4(hL6)vwyKc6=9I&~Ij*%22mc`*{Cd;F@@#fx7oI}@0NO+HFa^If}U zJzVA#9v+TC+(6*PJzkfx$$1td9yNT90WK3a=Lv4`n;#vAv=+PD=F+He#xeJp`uciM zCv-C(lgP!t=u7sq_RrP(8$-av?|Xy_>_?>0RFZZB7c;_ZLCu-#&EA|!^lP#1-x@tU z=+8IBf?CyVy#x-G4ZMCo(eEI<$NmkQl#F>mP6|DMK|ZvyBZ6`*OViMoZz`d)fmR*AR6=e z_AzuaFK~MDw_^iEg!Buqn8&5%8kW+ticUbLLO5h}WSdHC`zK!irOvOJewzDn?3XVW z(bp$Dk9`T+fC@cM4UBuC2=GZ7>ex40a`812UxyRJDM#Tx8Medbq#Yh{Lt$H=RP9Ub+Lx_=4L1F_(`Og6>p#tbwO0{dp?k z*=UW=_S%%gaM=xyoHPG^w&%;O*MCt`Hp8ob;DkV3d%=vabaSZj-sbe(GGEm@`S1F< zx~4wb6Dxb$Yb5cwU$zK#znR}}V^=#(UICo7^zlO@09g6JLQe;AQEwpqLZ&sY+_jf2 z2W~+W-AwSf1eUOF+<_fHa1&@D?@Sb&q0?)-wrUU(rm*nq+q^GuSNB|y1m8E_e~QPI z5{2fC<@{#LZX`bVywcvU$>+V=T(7TsD8=YyNJwBqoF4Hohj`h@-iPd#AETr`PDLfC zS8!8Zk@0n@)8jo<_LRoFKGyMjtnqB*X8K=`B`bJ7dGi;;;w`747^6== z1(Y$%dpulLKMuCTcsKJo1U62B?ca>&kZ}zpDuZl85*ID#!Ii@4g%0cn-X|prNjq}4 zSJs!R=f{cLH7#G8>)>sHog*JY5#rVC{t@c`FhiYVMRW_Gg9&jh(XCxvrtwae2NYcN z7xgDko`95BWZ8KD&|Ry3(?vzdXbpO47p)j@niXoX-|$;`Wa!Xt_u1C_OWfE&SDtpY z;v3JWe5SO#?0|vbAd8hFNj6={cW>W*6-O2Mw>#Pfi7Wy%ucyaiKCia}4rZia@#(}! zQlg;3wa#?)2Bh!8Q>VEC;eKNkpF3nj;H9V#^HXSP$3LwO zJ9n-x47p9V#W*=Rty1@^o+N_A)}-Hbrvf~DM3$)7Rj%l-Hu`bJxxsC*ejyHC?J+PR zG4*)lK5&s@r&CxPqH44did8Hfpgz!Q?13Z%iaC$NvMxZbXUvB|;@R4AhtYoi8hz_>7lgR2g%JZEv>U)%wlItl0LE^&JNyRg%rw{%5vsK<9; zV6yM!Tkc#*hQp1p*u#+re^x)VhNB^94$(Kiewh^w#(_%Qo2}a#yB{VPVu-Q3$08H< zC-COa#E zf9Ni0%a3^?`N)WG6N^^>9*p(=qM|tAq@vOU`XN&MUNb1}w^-%^|EwY^?RQXRWj0j6 zoAMXcsX*^Yw_Za4>=roS-}TCWFF_8gk;w}_#F*H8i*8(BY=F1aZs%bQYcSM*5#*JF zA2`4Lb=}oV-|52}(^R9QHrF7RL!3!1cmK1%xw=7+_=Y~=^6myRHCN6edb700S7Ie> zc{7r3?$|mtEFJoOEb$3Y8(CJ6p&9XW>tMC7hHbmT!l3@NKXBLfl2A0KSGXRdTnGQktT&?3%D zqL=8|!RaF0))|0Tlx>|oK}Uz^Nk_$zDko@18Xh1dGL6cSFaM&NfV-a*wkOhXy5G!` zqPSTI_K#N*bB$@A6t!q+s;uxo0bjXu;kwrQ9>=e``6kvwTk73|qv@xao%R~C#AdMd z*ADVcYxbnJ4U9JMWuVnuy_J**u%m|uq?M}IaB;ZY8V)gpXh{B5zVb6TB&7Jy-axbQ z(QO@)Yfy47o5yQoUM&_HS>*lEJE@XoN~@B6N{jNQ5`X}%jy7>D<885$8iZbp`VM{B zuE4Y06XK%L2XQvp^|Xf1P^{8Ve@Yq(r^zq!#+k@0O^y17Cn_h@%w)xN!g?K#b!4iK zF-&P;x5k4*J2MZ~`%Ov4`#}SleZby&=qI3pMi+g#fS5;uZ1(i`=BOXd@#6zZVyr=O z8D2H{$nMm86Xh9cqa@w@kz`wv}(|}W^0^wWqtxpNsk{09l9}8bMWx#@{bkO6ipxMs6RpG z+l~7Uiszlv8F5A&XU?4AkufO-s;fk=L~eKAXm{ zvCTUy{aM{~I#F>r|f?r8|HPp=lB2UV^Mcg+g!$NRku@B(jXnGXOwnGmoVmqFOUB+>T*T}*%f zm`<6&T6d)!t)Pq_V8U^EFTadaniMTwS(r0?Mqh+k`4Ndir%CK00 zcb^Jw4WI6q!w)zUAK?ca&f=s+-64kOh{Pujgn#L0>UTc3ZzkZD4y)$s-)7&CgcHZl z$sRI^0;Hyo^n-MMy75{>M~JwKixRWtr8{)r5%x>i-o#R4j1;!knCk+3Wo_3yWK3(x zu0`m=ncUz^L+xumc<=zg!xQjomP!;Orpm*w<3~YY)3C?tWNY+cp7-|ZWHUz$zpQxi z?_k>rW4Qn*^$UUWauk5{fL9VBgI83}KqZszaOUW<&|U97VeFEuySVhuz)KEWW?u=< ztpQdnXIb1BcT+R)gbseZUbDNn38CM`3a;?+98NiuJ-p}_`)jO&-EnffHpe5XfaBeG zW!Ie=6zBgJh|_}MEw4_0Qt(v!!DQ^<8ndwR=2TGj$yHt%@&k9}9p-Noqq%Gb3)2Rs zK|34S$t5WY7GZVlU*X*>2ZDfQ?WKPKBp=#2G%)athd`(;k}2-yA6Tl7JS-_| zf@@pB-T992MJdK^@HXe%+dpNnaN+>UR4?iF;y86#`opU2_x-(IX&Kym{3;O;@Qe$$J8 zuzCUM2A{6X>wgxng))&@2;4`|KScHX=VNDP`*QhBsDXtddLG{Mf&{M<=uZV+06fPF zIHw&EZ~)9#AA;OK4Q%kS=>1D8czDSuO!+h?uwV9r7|{?F#P{#t!98x`AP&c=sXG$n z4Am9oPztyCb#kWvYS76y5hOP-oEjzM%y|ON6UrfjdB}%GqgQVo%cusl39{hrQ)KlP z4PL55a@0FQ#5KWh-}5BJuXx#R91R1ffL?*w{?7VIIE4s1UaG;+q58s^GdIZ~hMK}X zVRuN1Cj@i?mn@G_guI4gJa!jK@3Q!TKap(eWacPgY7#2{eEMP1?YFbN*~%=GFK%_= zzRN6CDUtyM`4KWhdT*SptgHa9cmgaUgxA|!Q{(LcwAE41K}jLo3492U!?GT$9|a9J zz+;e16yL0@h>uc~f}jWE!NUK3uzgA^SQaq7?pj*sJx)_l--Xa#4r_n52I$eFwut<& z{r>U$&F~s~W{QwND25e=yyB6=4B+uEk3|PKx!ngnHFwytZ-D^*a4UtJ@c`>Sh0*0& z!AAkln5Hb(_ypZVa*T#KPc}`tRsve5`RxnQYR~l#a~%t=O9VmYzX*-o2!VmsGQc<5 ztVzkso75D@zWhYA!mVq7A>*63LG=cEA1P zNwN|BCIH4^cR2mFNzq7yw56I|^#jV`#Rv`gaN02{@?K_tUW;hf0&pl}BpaA_++zf* z$s+4s;BHcW#{Nn|v2qwE^3sF_j;MO-*E%Kzm0*Quu1@-I-{Q)jfDqrmCg&(Vf7p9NP4@=~uiP z;jG*HO;_%f)K2-`9HSGa(fW`?n6pw&0N{^WBFBX=XVtyEtBmMNA%H)gl;xCu{x!n{ zJyPiw{b%z}ZHXWdo2}jStK8ACz(B2I($ zdgL{<4}`HX2tPm~my$u(>crXC#xT`mc$VdJ#ZETG=wGD>0UQhz`RTsgSpP7|ZXC22 z?U4NMcN1*jXK?_00VJEBCBkF{|EBr_m)V7T<%dBFhTOHZsa#}0*eODGdGP~sR&Y8i z_y8#!s(unq*KZ9PeBoP=3?&Gy?*Oi&3JyLt^aEED1a)P=>rvuaT+6X2;`sPy30nYB z6F`D&*0%xNK=c=sW(tbR&z4Ml1d1$zUI4)SxHQiHz0M@mRWyX^KS$9;RDo*xdae zcqy7^Y^R(U5SCEE5^-_r>yt95dX>}4L^yXw9?o{xks6Wn5&)Aj-}PtDAdTmoNm<|f zuOa65!5Sd_G3EyVCbPv_EzXMg=F?i2vW8JeZ}Dgn>gDs|5_AM5XFvFpn-Q<~a0n-j zl92s@yDZQkb^vHwB>p6RXdn{~TKQ6-ez+HnQG*Or7>pJCX!0(1J|08q%;ft;KR-N} z0oNpR^~opDLjM1WL9-OEfQ@EBLw9Rb6XNbBb9ybl7G@SWM9d}BHHL8vSG%+)Tc^;t z*cxF}>bht??k9YNZur{EXY7EUn&H$bHW9NIaXK1`5W~EMm|gp+ipUn7K$~U9l+PDb zqp$CpRld}O8f&f1`Qlkrlr120V!yhuC>OSvAGlX7&_3Iov6oPc4^5JPzCn|U4`dGc z05CjE*N3G3r?Rx)`TtOsI^Y+hB;ds>?IVnM?c9;Io=CnYq&OL;MnBIIojeL>GeQ8t zz{zAqxCS_!R*BPmRbyTIxI|<)tk@`Qu<>I}w-tP8>cG~C%zsR*kO|@9`!+*TD_g^s zwm;pG^I43C3y+BEd2AzSyS&SS@&r%gjuyZ@q^6{!Tp-KS!?+#P11tC~a43ns<+dpl z!UHtF@_cK0;2oEvbJ*YqkQVybQ)&1I1ZF-fIF)s8C2H{AoUxXWs1?8&VCSD zKy~-WHO}It4#JX3T3zDZpu}wGwVgTow~C8Ci_!coCSAoh-iMAu`<%a}*h1!DqDd$! zIlNw-UY!_1AW+tKYb+qX4X%yI8NGpm`Wt?=BGwx#+MHjK-Fy|VTR{4ms{Fx?=_Svu zfbR3GaL|qT-c3yIqck&n85KdF0sB#(?}JV)yE{*3i;_y6KXV6S`vrqqyR&}q{HKiY zrf-&WV`JsI=XUkV8dQn@py9w6?0XG>jLTsS-*$R+O(T2WW!ez=ouw~)UB*A#wN&C= zI4pRkqPN!E;y*#&c{jAr|F7KVA$s32)|`!D!2&Q^e&tu zEeR1A^WEC}>_x-FaUf zGtWkhR?xjt1{7ZJHe|@#{0{AN``_D)0NYG?OvIrirqJ)8eQTeTAa#NtOMwq|oW*b; zJD5xpR1YBuZ`5A5Q0Y6+wW#05A#P%knPKW`6zDV6Seqj}Fo8N3r^|N>d2n?TzSq8K1DOZTW(qd^VC+UPF zts;_X7k}VxN}n3i8$A}5fXJC^dIM$JD?(8@nPq_@3LxVKptvosr6;4-|FI-HcGhFg z(yl(~a!x}ut31?fpT~ApSm5>I9QIbN>M%?Hcm+JDk~3ng^6Uq7@5c&v*d^L)FN_Xn z%{RtC`(A-kl7KkED!NU~rrwt_Ec-q_2S7hVNZ{=t*;m=SW;07_oPKJFV~-Eefu6ib#?E}aYW8Z zkmO|ta4E`R(Af5@?|NWs&5Be*Uu3<4lnKV(;=N1w{5-GqmvMKc+ojXyi{5}6PU!9g zKKQGv61C7JPTty~9jJDf?ZHGW`%(y%M51Kh%FzXD6{ER>WC)pd`+`}pvTnnnk9*&{ z3lMS7ZPYDSG&#>~|1z}IGWzfyxS1-1FHV1dCfHco)oL`$DVOH@U5Po^QvxT+l;PqJ z9HCPaks}yFrH%0Bo7pOcQbo`Zo`kWz&qnq;K|1(G?#UMIjZnW0E@iJ&+h)%023LH4 zXy9xwC>y0{T=+?ZfR!ygqmeCvyi?GWUe` z)$4`*okmAQ+&MD(5ftsBqbRDTYZXMd=9W zmiiECo;rqhbMP%Z@}|3oAK3LsMK!2Cdb0w?(WGLwS^U6G%ZPZoOS4@wm)rTa+P5~) zTy6QRxyHGUYx@hHqU3rI0M$|WB z5$YZ<-H*G+$6MOQagqK!SumR{;)*~F7nm!SvoBsoW&e@Hz-fZn$gL1oY zqf2Km$W$M$`}b@JUw@yelWPE2C>5dOA5Stc=yu^~6Pk<*NK-S|V9{p_troA_Ya1#D zcWvan><|L<$z*k859|W(4q*JdwfXRiSbPqM>0+52Y;x%tfcF z^4s5=s()ou8pLlU1#o_7J$7YxB0^*MFxXmq;#I|qK|9wxRLN;~JA@G!dX=AH{do1! z?TL3)rH`dU9+2UUsOZTxV~n2}Xs#MOG5do`Imd=LAMfdUK<32z-Xr2XSp=AMbyt&? zvJ6Y@K?f1a4r6C$o)|4wCUAQQ_>B5&Hv0vMdb0N{7K4G4S?w$2B~iYm1I%8z4(jUcaX=MN_IkLA#%N#l@k&EXL>|vtuEugbrA4I8BuqSWYY71WdJBRtaoY z95G^4^g;i<)tVL+AZoa_V%mVn>DN=lIDzS?-JI!r+lRnx5prv7VgTC)fnNIX zDXf)qTXG?MbGknt4D$dP6n=P)=Qk{U#2WCMv|poEMecxURZC;hf!N|dfSbLYgCG?-~qmivKc#I?sp+S zfULf-R?|w*W=SC{1(DSz4x+~+8GJi+CBa^z@c{a`_ zmy*-XC|I?p>5X$1RpgYG<$aP~9p#i37$s}T636NSNX3jl<27@KZlF3rm5H+@&S9~J z+yWwneRHj1-iM}4A;UN~f#_$Lc8mQ?`lkM870Esq5jk(=LCthi(OR|8yo~p8zT4sa z0sy;3J0ckD2czMjG(-s+@`5N#M@6W2_$IrnjBSX8rV(Jg3A+msLXpl?R~UHzT9t*a zr+(N6WK}8=Kme}gvNTjl2pJaTLv>(%%QqohQjTB1(4CG=yv##$yD@p;KWUs1%)*vi z``#*^QV_Lq#yJSIen=YH3gy{}*l9Mkx1!HmC2%3r6^qf-Hh8JA*=<$Hg&O*ZP(LGK z4>*;=GJ#7UOCW=|Nn5A1i4~v%APGc%EJc&PSMe>KZ0Ygbv6c~@9H<+WGZno2tztBD z=RlomSDl=AY#}ldznnB6wk%{IS^@1tD5kjT(ka8@qhm5q?7!^w#1g9nOIuzgr)b za(&(a*u~`8FWUVRRfl@&VGit1Ms#jjP}?=_%t4RP=bRRhu1g0I4Ul9Uk|R zDsgAbKQ26hG_Fy-NGBSr$maLVAvK(A;*?BOJje3%Ag&^mY+{fYr2Cd1Kxn}XF-0e!UiB^Q;CaL;!Q&k{cPIL0V>L!lbKzeaiLRsRtAOgfFX`|{yp zmienDIAM{H-ArZQxm;dN$Dbmw&P$;8N<*;Al*08^-7glnW3^f7#qI?t_hI&hN;jIg z_5D)c0(arursoT9?37&#X(9qLzDfVxHGAS#@BTX)>V7V-%WKS8r1|Lfy0+R!iklYI zYpI~nix;EUC^<*)dQ0Rwb=3iG+{ngh3%@ntbg;J_-waPfx-L}$JNP{yP-A88GICmhOT=Me|yr(iIz-SJq- z1CoD1xbK&A3OilSIbSY4-mA*lnbD+TN;1~NL;Ebgr5Hly6}O#y|1$9qk@86aUDvJuVTQZlC5zX_WIm%!WkHpRcM$X$obX`7|}=wA&+W z@f}loT_3wMdZfA5hxL%n-!P?R8rOyKE_DbqE#0)aFAYt%4;*|-pSRbi!$W2Xc0ICC zRN;-|G=p}~?3N=3QdmF-zG&Jg5^833HHsa&=4O@RMDI(N2u97_k9u*Qbnee>;nm8# z#po~NaW-Vnx! zr)^gvUnjmebl;ysMbL+4MrS{rYL@A+9B$@0SOGu6E0Q{Rqvg5aW)Uq$Mi0J%d2`s` zHHTvw^aC)?285ro9lx_1CQw*mj~&5eV*UCdTwxYIJE*LW<<7ho@sr?(-rHhB$cM7_E-gg2Pkf?E2{ z;*9bsnCgv&{_*b3zrmT)aKcL1`4bQ1P5Cr$rI~K4bqTkHI11*;5o4Jv^B2BKWb_;a z+b?nsi95V#X4hH{cdCp$C?q!p9*|KWLLHtvpz&-QVCJ^GqAI<%M!PvBLNke29!nRO zl)4G=QeeQW01*t3UV_O%#QQH+%9bg~s3kq2d{6G{x{h6qIQtIAZj&~`jE3%2t=*CJ zQ~4iNA(S&$5pZJ~*$R_j)lPCie6@yi99_#<9N^emq6A|LDK$O8ar^&t`~)0&O@`0U zIe~l9P7N!SKYhXe1(eC2^V1O4ylap2S#GyuOFuX|-=xN|5q3I%!eyn zY_-kFyQ)Q645eDyPIT&j#S!yQvns3wkXrq=o_$w-T_LYkXracjDoVX;qg zzb(9Gh2443s2nTdRZQ&3_m=V@nO~3@dHScV432*a1XcA6 zg?JGrEeyIH1-Gr?P?$$eRhHVWG^KDPcv?^Esxla9ZdJMjk%PfYP{`S=EIe4OYh&mH}&1V+woY#7&0{Nc+|@mqrHCM*uamWILP>XA2I)ch4wRE zkA|EVI*ZXlARntj=4t^G`hBZ>D#00$PJ)OW8jywoWB(7oG>ik))s6SR`6w`@`d$Vk zc^v`hoBx~nDnhvd7Ttv5$v_tDe|~2e$U;{_fQ&G_2k_4S2aCllWOOFK?i|nkIJxZU z-P;Sv|4n8A)f{8z)j}P08Nj_Hp?nw-^PIrU#W&`WYp~wSVf+iwDqFR(0Nn&WifU^>6b^~)99Rpb%XOyM66G}8{) zeI500rS5=52IKE{wGYV(X>p2ga~6HL4xnrfkYc~1tlVmyVhu)KdIw-O+F}i6pp+tD zSQ!AgXR;&%du+{bpn)qnF@r0B^*rZM25}km~g__Js_ZG{O2K(r7J+xq6xYtuvADel!?q;{rdGQ z+QjpodL0Z5@#?;yd_EmbR!VGoPhGfv2Mk`Ex&Upf#>;4)!;NQoVg3Ur-v%aq9&68T zeEMo!iP^=5a8P_<#+LyJ?zkhyyp9@1mq9*Nog<5Ka-aNo_!5gPdqu`KhT9-uR-cH%DJ{EkRjqNGw6x z<-XAIV$YehXmN*{z`#J@wM{1@>mxc66-UE$?*1>+5%tOU}5XfT^3@0TSH z)t{pe7mXn+$V*4~H4O z=r>4<-;6oE=5MT2ktd$C$8rsdt;RAb$#QYh?Np1LTK^^@n2x=ZB>>rI92MUW^4Y&2 zyw_*7VGBY)RO18`2NQ1~K~SL6td4U`0Ua0kLE=kUYVVt7X1~ZHxi^*bgGB+*zPDCH z%S0a?^|D7m3*rrE`S3;#>E{>2T!6?C_>>-iGv#D_HQ^M-Dc1!5_QeXWn1XU~n(GsE z+M{D&IMOlv^T|j6B;PlZug_lzGRMOc20EQ9mbc{oT;i!NoDGno8D;?)36kxF(~qN1 zsZ2NlnxmfI8R)%`Oj;n_u>k)43?iow3=`|4kcZ1`WHljq(eoS}KAm!#S~+@M`;-ic z|Jh+APyn}c!)+-lzM$wp*$+MecbW!`rZjnmf8;=5?Uw3;ow+Aw`y+0n&DIo3iVz4J zA}2V4PvafY7q}j%L6b>xLvh28DR*8BO%b}yjnTMLohwPTwSzJ*KKOQ#PU>#fjDu0A^>p>wPG zPt^?}jJOvLKl*U$i_fg#)nBdP#tiK8J-9b$z>ib-k8vT`*BVpvA}@1acRBQ~8$1pZ zo*7TS5vJ`sQS`dS*I3A(nn1usrx!QbGx0axa5h7mVC$Q(A7FHdy~$XIcPqbv%p2sG zy2qn9>G}UGQPYLXP>;V3grFdvPLjovz{?o$QqP8_)2Ut;>iT*#q#Y8fRiqCPVY_Q@ zjvtTYD4tmq>A1AyKzxJBkD@E2gNs_y3-vm$P*E+7e?<@hNf8|2(3=$*M{V}>`jB4Y zhyWso8G!MnmvxeOkQum`4y{ZAq{&X;pAXu>mhPHOs#rx%ciS~J82?}iYZ%XAmh45h z{<}`d1VzFqPB&d4suaiXtm80@ao!~O>@uX5`i(TDziq*m-5&hhvs{B-IA;|1kZy}Y z0+BOh9|fFa723D?Z4832lS%r#iE=qgo?H&ZPZ##@)5Nxy8~D2LF6385Bv1X$G7-^D z`kcS_bs~BUbdGNX^L1_IFO4XyDctK48;Z`_!1?EByesP3@PMzfEYn@b>tT;h1X_u; zmvxr%4tBego`y4gE`$;S*~Yl zu=OjD(^0onGlbA$9H5i5alib=g}_f`JbIK6K$?KrY@^#&F}djknJbS?L#-#5pD0S*iI$M$h$9#L@ql>Sx5F1-XkNC~YdryA;46mNpPn7IwE^JlTcDyL_~98%snsJD!K>Vb-W zU>4jAw>UWx!Gi`r_(Vg4bfE zR^~a69Pmr8ZzA5MC`Pv$Qy#tH-t*mCJ);?$R=fBQJ#IACPRiK1N6;(N3Cc={nyl$l zH(uQsl5#ryL>(SU7 zpzb8dpf-ob0!jQ8zK}0~Iv+oi_60@M{eA766-P0Za*5Tb^>9-0l1cqu zAJ)47R?tKoO?bQ!462alOu6byP_u_Ii}{?OpgzmO z0+5O_%=|cL&d$?>TD=^!9+fN!kALJ$fmD5g?ifLiHM*`M*sSB5U z0ieS!FkR0tK@m%9v|&Xm%_e3M9_ZUI(UZ3vb1~>*3c<^hZ$RY|#gNo|;t~?yd_HB>6C~Hxuv9$aoC0jMf;I&so)SD3zLzYYom3?`$C0kLJ2=A+G36m|7 zt&FS@Y0PBF*q4l5*6(^ozwi71o1>17qnYP^?&p54`&!QXJg?Yzaq+UpToXH3SXelz zxY6Fc*HM70$O`Sa7CzEJB^!z=eKekf1N%6x$MHXdqQ(J-c-H;skk;|5dBXhsUx56r zd}7}(#Z!_qqrF4%?(zWO-4L#Vz6a}HEJFxErHoQ5TDiw}zTWwVv{vianJK2Ohd&AAE3~jwdwZR}x;Wu=;r&64rdlaQyel0XTdPT)!fbr5jS3p2O<<8h{LBUcYU00dw8Z)Eupeb2@ z2vpGM)-b&mD#^}%aIN&VkBf;>L#C^LE?#nGd>F(2$I#Q+?~*7@@Zg!X=Ew;H8xXp zC9N-=Fp1vh98{VOK+yNpXcFKT#8tfY&B=OvLt1xtjuXujI1@J4L5L9{$L<^W2Gi}O z2t}KYIa(!PfDwfO7Qrh|QAAN?_`#7m^B(|qCoxK#V7{w=l1^wyNu zzf2Q`upE(J$)t$1H$2#ock$CdfOT(z~MUo9l+v5<61Iph62ii$~=r%On@9K312pkSk?r>BGN%j|ca%oxVN zO>BJWmx`pa2b1@!tJndRF#N8zkcl6|kPVN)m&~@#oYhIJcjA=(;^S4Bk`-IZgjaz< zGbD{-e^DHDx0m_CyU1rnkQdH2htp%O)i|cgeU#cH)bvMkkhrIZUWIY;@It~#u&J7V zd#g?F+2M|q5WmdEOiEaZO9G_$^)>+kpIV=tX~fsG?=2&xGF|C4DlUeZ0TsIb(0trb zP&xN7CG;xu4zrjle0k;6aMM35j~CUx$4{J);x}q>4lqhr&YO63LiR-E;Dg6hCF1gc zQ|TeYUCU*p8_jfZFOZRInmdd{idz8*LL*+1xf}VYlqNT?Z*@jiGd;5% zH)F6=QbV9|kz%TeSI!XXs?H~a)G1QV^rV8iLDxWVA@>nHlD~R}{IYEf;@a92#7CBxZ zuwWtAMBodx9x^De(B4X@KFD3C))LbWRT#+ z{a&+Tk|z8KmLFi<&aLI|PF$(O2_f$)J!;>-QBMm)h(Oxi1+SO+gXi{KbP#8P80T+f z$&t&6@xWgL4z&(wwvba^l&}E7*`b&e2Y}$wI?qp!4!Ip@`{%#;{8Zvjy2;B6;eSDa zrxu98oTgItcV-z06=>CM8zJ+yqO@}NEQ$;yocia^J%S+xnM0m}O3G{Y>^juX+YCB6 z?x0elt*x!`GQZKax31O*k&KGNmRRgmnUe} z{?JT->kc2jk7Y-_fI=BW5Fqlcw~@VGai#4(bBCmZP{LfS%&{{RezQ|o#Q6dCC#d&u za{x*-nr$0;vAl&foL!e+kFW~>0~6L7wb}GGC<^N5;nw6bcurtm z{%=9X-3!VYAOk{h1Ab-TjrLkHXt99E(w}X7q>i$G+iw0TKu)0K1Pfk)$u^sT0`TU? z+R&YJkaGZw?UCF);NT$W!Pv3kHxj|}sDmLt*wGxkB84Hdz&`^SJg`a0$Uz@S38RV8 zp1~6O@t^cg@1zQQABtqDIR@YX$MsNE5E(p;8GA%xN0-XoZck^i&f}sA%cTMz+$cqf znCu?Py?+k!h-T)M6}zolmjB!lXx#E##RCzpt^E5HUS2~KT2j2@5GxOcR}6bjcoLNJ zk@g_M2i?nKfom`ukbDHJf?VL@(aQ*t4SWCzbT5| z3l2p+SmL0zG7pMzp_s%9YRA1^7MTV~fD1I4x$Yov3xjfStS-L6%6B#-1(>MfREj*) zb>F}i%cmy*1opTAHPGrnmk!6~<4A|Sz(+7A(%$ju@ev&{qi3hj8?vG*cM^ZJVGQe+RMGlI*(e*aNn;+l~8kfUUwgOFu~UY){ufp85=gg$(N$^gh-fyF$s0+8ukytnA#6K!Z7xksehfwzT33bv9CZ(i) z{_EM-UDM(yMEdE<;f!izyjvhhnm5q*O2 z*WoQ@V4`C-@2*b0-YW7hN^>cmMKD~!lLv%_k1cItpq0R2H8|UZWz?025fQgQMGI&u z0HP8=hOT^?fw_vEB7P@>GXXBMuC)EP{OfVCvA>N}avo!-RN_P~OP1}an(qVl_0glc zBCkNY?97pS2hPR+GsBVnpSibYMK^(*E&+zqY}ZFV9;ym5lJ+2;#|_)$Q?%FiE^UGH z;Dw3({mzxH9{ppi1f-=!)~3X5r<+CPnjxHX{(;_7;+<6Rq=m977O}D4yzGf4+I{ksaD+OZh8o zisI|XzT`uBfWX=Bb{JC1T+%#sN*p>g5f`Dm>YjUs$44aP^7N@7o@l!^Ao z|Nf-a-&Z0E|bh2cn7P5d!*_$dVG! zbcLN=r8@yR7B+f@+4T6l9;2T2e!)~F-e1K(Tj?BkGNbq5(D>6^5Q&wC+kmiQkSM1i zmTt1AAs6y9kj88E?P{L@@Dp@XF%KWw$g!aMq%NXwi6Vp7jv>?ZhCG9Q9_W5q_wp{N z{b5^ZQ#-V0P}rJ*5xfaKKHk`#Cq7X~ZXK+KoMy5!i_e;SR6Y^5enMtc39pX`WSie> z*&7s?JCw4;o5jt)wd$C+c+h&lJ@@kWDnE0PuOB;z&=_KB^&c+UjeortCjiBSoCnn> z2`8ziet1&i?9f~gw&6*I5Gxfgl}|4i8yf@1DTmx?v=oC8iMFdi*3+0u4hvYPcs~+W za{<@BuE%{8oQZj+!G3lD)WF7*Qql+On3)p!1_e`qTIjGf%T9)u@ejIKss{`tobR4I z!@GaAhwojR#k>6pt|vO&%t0_6Dh}1-rEPh_O4nJGr+Nb+&y_;=A+(TM_90GFq!b7i z6;k3n;__~wmV$E*(tcI%uI$H0Liy4P2W?|3q0eyjJy2bN2oU7xX@}TQp2}f_ygL+n z+~^f|=}u@fEF4qko+}De-%*p$y-ckKtbyJS<*(7dY|&gI%?n5j*Fv*aD7g**d4x^a z1Qn>55hY5;WdfLwM920B!nuxg4c5@TAl-nNMU?L3f^m@;=diW8VRNnnXko~gAr5#6 zrTB5g!$PE7C~+S(jBD(IV@)Jd9)J{7q&WrT+sNI{!ePtRWDLP=q-A_PW*_QLk75$P zNb?=Qh>@?o_XLHP5@@>G+RS&MjkXWRCs`tE;(-5C$VRRMwhx2BfUu8bEwWl@6*9>H zN+Tq`4q)XI9r~+OgLa`9ZV;Y-KjJBGU^{uINBt=Kc;jC{BW&bIj-5x9xljX7wb9Lg zN4S%j8HOp?1F~wLf$ps5(ZkCmpDCbwDpF(ariU}rTc+oAe0IH5w36UL7~)( zm?ZmlB(;^0XoU3;=4?=SpZIn{5mlr98c=mOlz@ICc-V$=$D-}dDiC_!=pa{ClbpUD zmropn9uM_4fG?rs_D0*SDiEMo1oHJ(q1*5U`9#mv@vQv@E0vI2b1y$agt#Fnlu-L5 z4&&5yteocd4o(-FLm0f7AT9r-z5_MDf%Ke#(~9M>jv%=u4)B`tNe08chFC|WD~2X2 zu^A|Fw%EkzgGcgx7X;kRhpjB5->F z2!7Dg%R;*!Zw7*77=IYfL}+E7(KN)nZC|;yF%tk-nbcpoUC?M_Sb7B#yug``Wt=WL zOK!UNohfs{4ut-0 zDB_ULojb)TY-wQH3__cDGYEh(t2bBmnbUEHcn3n{!TN|aC8B2AeHZYJD_F@GJJ6eV zB3OVp3kvkjATJLaY3FaqjW85~n?IsHcIJ>{TlF>KhX>kLTf&OS(T9z(4KUCFJ3GuVC$n z+c&Y5jm=w)9tKKlYr!(ZbrGk%Tz(}*9#Mzs`X~iRHy1};FHczbf*GNSvY`COU7mFrEb|_CIm|2}$7FzhxTUL=vV#-@iyGC9SKYQ2untr6GIHhw{U~ATotb z+^!%>KvuNZeSlIv8QvYPZ*}H#{MT&Rn>zhItgY4;B~x2m6M~`TA}=Pad(**XE(7pf zmbcF=e-Hd0*ye$wqH4Z4?xSMfV~6S};)S^;<8vNqn8ha$6b=C$xRo2=b>PXL%>yNE&2C<|p`AC;;*z;biE|g`N%}o49?GKJE zv>>vDFBagF;s|+8vQqYlEA)(klXC#@vi&z!eHoN87=q+Z4SV)Fql%oCMpB=kozBj? z+a+|TKagkZx#2ljPuAfGVWi5zQ8VuA*X*Go-VuB_q1>^inH6;>c;G-X??R-Bf%Gdg-;=_8Nh^Zl3*wx zJ>e|0Q59~RI6-0IF=z)&)o(KMpy8w|Egs~3svfcF#`v>?3+<}%TZ31*7qpbXI)GP+ z=-J!}zFrr&XxRnhFSw%h{B*o!^*hND7oGs$Ko_VO@%jUHp?u;pLVE)HQn2yLJ>=Y_ zShwz83}6PjEJpn(Vj7sO^?9RdY6+K+%sQ}P8_gIe+XDJLZ#T%%B*)2VrRo!Lrb)Yw zt1vv&T)1mkktwWs;lVbQk$!YFuPmzl-vfOcT)nFILN?eIef@4O!Ai}BGhbySal=K@ zY|SQTNTurlF>&$=tE=K@j9r^$*0okklQ!6iD25nhl&_$?AlcNzS;ur%4NC(^HNaj0`GGT@u)RkL4zddX)OXCX5~=UUhstcjJ)`!z%Vm}BvL)8Sr`XJGarpUY94$S%qJMrvuqbeC@Ll1hlm zx;i>fj~Ro^QvhC10ua|moAQ<(-;7f(Zf&{cX6O|Dj@^_8lXU+=^_=;H#vyRyFCjoF z)5y=3VgV;Xi5+DZK$!g?{@nKBnb6#%Tw$wm2TNx9+N#US%r$eVeBWNocW2JyTdVP> zL9~am(Cf!1brSOIWRc!N0|?(`4+njuH5ppmn77?GYR-*1IeHYcixj;o243T*j%^FOQZt@=Q4Dd@yaZeboQ=E3^_+#kyFS|R`>Wg6YqJA-U(KL$A_5yu2e`6ai%&{=u z8HQcrSfWv4pCl&k4xl%U`<>7&KIZV=-w6ceK?y%q{f2cdK>BGCx~$938|m^)QqF!+ zF+kssoIc!AnO`4t|6y^Ozw`#11LPVqFX^NfOX&Q%J@0+~pb~}lC5u0Mld(%)OH?K6 z=H%s3Zk~t-Ezc#99y~Km39>G!-yfIPX=&+KC(hnih(BSjHe+(=&7ZH;1WZLMce^ik zwTY{FS8Ce!aae6`CO&`l84d>#FJMN-Tz!-9#A73-;yu&5mO<~WBw>?W78lsq5^CL;nw%ILxE>xIm!e9Phbsh&=o z#;Li(a#%ka4q6d0!%t^X)IP}OQ4ATVdp@PAt^t^)CAM}PzW(e!hXhk__b7h?AQyv- z?`1Al$~^txycryAB1pgnQ>Y1$kdS~TSrKvCv&~xLG4F~dSKIs;of|m!~c74C@i!qhFX&5 z%liG}iUyEg163Ca@cR*kI6G=30n$*U!Cvjh)x}A;T&9C;v-0%w^v5`+#WmoQC|2bG z>W)4pNR)|zB--%#*^rPB0;Ny`Y>oy1m8sYpNczfmqtZ`+H6etWcPGo<`EOKM?40=% z>4NnM@(>V7gB(~Y3lhcy(RMPh2&naDC}u(L9K1^aur(wFMQRjOVE?ZbdQAPJL5Sc; zf2{w@Zx + +#include "CoreTypes.hpp" +#include "QirTypes.hpp" +#include "tracer.hpp" + +namespace Microsoft +{ +namespace Quantum +{ + extern thread_local std::shared_ptr tracer; +} +} // namespace Microsoft + +using namespace Microsoft::Quantum; +extern "C" +{ + void quantum__qis__on_operation_start(int64_t id) // NOLINT + { + } + void quantum__qis__on_operation_end(int64_t id) // NOLINT + { + } + + void quantum__qis__swap(Qubit q1, Qubit q2) // NOLINT + { + } + + void quantum__qis__single_qubit_op(int32_t id, int32_t duration, Qubit target) // NOLINT + { + (void)tracer->TraceSingleQubitOp(id, duration, target); + } + void quantum__qis__single_qubit_op_ctl(int32_t id, int32_t duration, QirArray* ctls, Qubit target) // NOLINT + { + (void)tracer->TraceMultiQubitOp(id, duration, ctls->count, reinterpret_cast(ctls->buffer), 1, &target); + } + void quantum__qis__multi_qubit_op(int32_t id, int32_t duration, QirArray* targets) // NOLINT + { + (void)tracer->TraceMultiQubitOp( + id, duration, 0, nullptr, targets->count, reinterpret_cast(targets->buffer)); + } + void quantum__qis__multi_qubit_op_ctl(int32_t id, int32_t duration, QirArray* ctls, QirArray* targets) // NOLINT + { + (void)tracer->TraceMultiQubitOp( + id, duration, ctls->count, reinterpret_cast(ctls->buffer), targets->count, + reinterpret_cast(targets->buffer)); + } + + void quantum__qis__inject_barrier(int32_t id, int32_t duration) // NOLINT + { + (void)tracer->InjectGlobalBarrier(id, duration); + } + + RESULT* quantum__qis__single_qubit_measure(int32_t id, int32_t duration, QUBIT* q) // NOLINT + { + return tracer->TraceSingleQubitMeasurement(id, duration, q); + } + + RESULT* quantum__qis__joint_measure(int32_t id, int32_t duration, QirArray* qs) // NOLINT + { + return tracer->TraceMultiQubitMeasurement(id, duration, qs->count, reinterpret_cast(qs->buffer)); + } +} \ No newline at end of file diff --git a/src/QirRuntime/lib/Tracer/tracer.cpp b/src/QirRuntime/lib/Tracer/tracer.cpp new file mode 100644 index 00000000000..c9e906f4a29 --- /dev/null +++ b/src/QirRuntime/lib/Tracer/tracer.cpp @@ -0,0 +1,309 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +#include +#include +#include + +#include "tracer.hpp" + +using namespace std; + +namespace Microsoft +{ +namespace Quantum +{ + thread_local std::shared_ptr tracer = nullptr; + std::shared_ptr CreateTracer(int preferredLayerDuration) + { + tracer = std::make_shared(preferredLayerDuration); + return tracer; + } + std::shared_ptr CreateTracer(int preferredLayerDuration, const std::unordered_map& opNames) + { + tracer = std::make_shared(preferredLayerDuration, opNames); + return tracer; + } + + //------------------------------------------------------------------------------------------------------------------ + // CTracer's ISimulator implementation + //------------------------------------------------------------------------------------------------------------------ + Qubit CTracer::AllocateQubit() + { + size_t qubit = qubits.size(); + qubits.emplace_back(QubitState{}); + return reinterpret_cast(qubit); + } + + void CTracer::ReleaseQubit(Qubit /*qubit*/) + { + // nothing for now + } + + // TODO: what would be meaningful information we could printout for a qubit? + std::string CTracer::QubitToString(Qubit q) + { + size_t qubitIndex = reinterpret_cast(q); + const QubitState& qstate = this->UseQubit(q); + + stringstream str(std::to_string(qubitIndex)); + str << " last used in layer " << qstate.layer << "(pending zero ops: " << qstate.pendingZeroDurationOps.size() << ")"; + return str.str(); + } + + void CTracer::ReleaseResult(Result /*result*/) + { + // nothing to do, we don't allocate results on measurement [yet] + } + + // Although the tracer should never compare results or get their values, it still has to implement UseZero and + // UseOne methods as they are invoked by the QIR initialization. + Result CTracer::UseZero() + { + return reinterpret_cast(INVALID); + } + + Result CTracer::UseOne() + { + return reinterpret_cast(INVALID); + } + + //------------------------------------------------------------------------------------------------------------------ + // CTracer::CreateNewLayer + //------------------------------------------------------------------------------------------------------------------ + LayerId CTracer::CreateNewLayer(Duration minRequiredDuration) + { + // Create a new layer for the operation. + Time layerStartTime = 0; + if (!this->metricsByLayer.empty()) + { + const Layer& lastLayer = this->metricsByLayer.back(); + layerStartTime = lastLayer.startTime + lastLayer.duration; + } + this->metricsByLayer.emplace_back( + Layer {layerStartTime, max(this->preferredLayerDuration, minRequiredDuration)}); + + return this->metricsByLayer.size() - 1; + } + + //------------------------------------------------------------------------------------------------------------------ + // CTracer::FindLayerToInsertOperationInto + //------------------------------------------------------------------------------------------------------------------ + LayerId CTracer::FindLayerToInsertOperationInto(Qubit q, Duration opDuration) const + { + const QubitState& qstate = this->UseQubit(q); + + LayerId layerToInsertInto = INVALID; + + const LayerId firstLayerAfterBarrier = + this->globalBarrier == INVALID + ? this->metricsByLayer.empty() ? INVALID : 0 + : this->globalBarrier + 1 == this->metricsByLayer.size() ? INVALID : this->globalBarrier + 1; + + LayerId candidate = max(qstate.layer, firstLayerAfterBarrier); + + if (candidate != INVALID) + { + // Find the earliest layer that the operation fits in by duration + const Layer& candidateLayer = this->metricsByLayer[candidate]; + const Time lastUsedTime = max(qstate.lastUsedTime, candidateLayer.startTime); + if (lastUsedTime + opDuration <= candidateLayer.startTime + candidateLayer.duration) + { + layerToInsertInto = candidate; + } + else + { + for (candidate += 1; candidate < this->metricsByLayer.size(); ++candidate) + { + if (opDuration <= this->metricsByLayer[candidate].duration) + { + layerToInsertInto = candidate; + break; + } + } + } + } + else if (opDuration <= this->preferredLayerDuration) + { + layerToInsertInto = firstLayerAfterBarrier; + } + + return layerToInsertInto; + } + + //------------------------------------------------------------------------------------------------------------------ + // CTracer::AddOperationToLayer + //------------------------------------------------------------------------------------------------------------------ + void CTracer::AddOperationToLayer(OpId id, LayerId layer) + { + assert(layer < this->metricsByLayer.size()); + this->metricsByLayer[layer].operations[id] += 1; + } + + //------------------------------------------------------------------------------------------------------------------ + // CTracer::UpdateQubitState + //------------------------------------------------------------------------------------------------------------------ + void CTracer::UpdateQubitState(Qubit q, LayerId layer, Duration opDuration) + { + QubitState& qstate = this->UseQubit(q); + for (OpId idPending : qstate.pendingZeroDurationOps) + { + this->AddOperationToLayer(idPending, layer); + } + + // Update the qubit state. + qstate.layer = layer; + const Time layerStart = this->metricsByLayer[layer].startTime; + qstate.lastUsedTime = max(layerStart, qstate.lastUsedTime) + opDuration; + qstate.pendingZeroDurationOps.clear(); + } + + //------------------------------------------------------------------------------------------------------------------ + // CTracer::TraceSingleQubitOp + //------------------------------------------------------------------------------------------------------------------ + LayerId CTracer::TraceSingleQubitOp(OpId id, Duration opDuration, Qubit target) + { + this->seenOps.insert(id); + + QubitState& qstate = this->UseQubit(target); + if (opDuration == 0 && + (qstate.layer == INVALID || (this->globalBarrier != INVALID && qstate.layer < this->globalBarrier))) + { + qstate.pendingZeroDurationOps.push_back(id); + return INVALID; + } + + // Figure out the layer this operation should go into. + LayerId layerToInsertInto = this->FindLayerToInsertOperationInto(target, opDuration); + if (layerToInsertInto == INVALID) + { + layerToInsertInto = this->CreateNewLayer(opDuration); + } + + // Add the operation and the pending zero-duration ones into the layer. + this->AddOperationToLayer(id, layerToInsertInto); + this->UpdateQubitState(target, layerToInsertInto, opDuration); + + return layerToInsertInto; + } + + //------------------------------------------------------------------------------------------------------------------ + // CTracer::TraceControlledSingleQubitOp + //------------------------------------------------------------------------------------------------------------------ + LayerId CTracer::TraceMultiQubitOp( + OpId id, + Duration opDuration, + long nFirstGroup, + Qubit* firstGroup, + long nSecondGroup, + Qubit* secondGroup) + { + assert(nFirstGroup >= 0); + assert(nSecondGroup > 0); + + // Special-casing operations of duration zero enables potentially better reuse of qubits, when we'll start + // optimizing for circuit width. However, tracking _the same_ pending operation across _multiple_ qubits is + // tricky and not worth the effort, so we only do single qubit case. + if (opDuration == 0 && nFirstGroup == 0 && nSecondGroup == 1) + { + return this->TraceSingleQubitOp(id, opDuration, secondGroup[0]); + } + + this->seenOps.insert(id); + + // Figure out the layer this operation should go into. + LayerId layerToInsertInto = this->FindLayerToInsertOperationInto(secondGroup[0], opDuration); + for (long i = 1; i < nSecondGroup && layerToInsertInto != INVALID; i++) + { + layerToInsertInto = + max(layerToInsertInto, this->FindLayerToInsertOperationInto(secondGroup[i], opDuration)); + } + for (long i = 0; i < nFirstGroup && layerToInsertInto != INVALID; i++) + { + layerToInsertInto = max(layerToInsertInto, this->FindLayerToInsertOperationInto(firstGroup[i], opDuration)); + } + if (layerToInsertInto == INVALID) + { + layerToInsertInto = this->CreateNewLayer(opDuration); + } + + // Add the operation into the layer. + this->AddOperationToLayer(id, layerToInsertInto); + + // Update the state of the involved qubits. + for (long i = 0; i < nFirstGroup; i++) + { + this->UpdateQubitState(firstGroup[i], layerToInsertInto, opDuration); + } + for (long i = 0; i < nSecondGroup; i++) + { + this->UpdateQubitState(secondGroup[i], layerToInsertInto, opDuration); + } + + return layerToInsertInto; + } + + LayerId CTracer::InjectGlobalBarrier(OpId id, Duration duration) + { + LayerId layer = this->CreateNewLayer(duration); + this->metricsByLayer[layer].barrierId = id; + this->globalBarrier = layer; + return layer; + } + + Result CTracer::TraceSingleQubitMeasurement(OpId id, Duration duration, Qubit target) + { + LayerId layerId = this->TraceSingleQubitOp(id, duration, target); + return reinterpret_cast(layerId); + } + + Result CTracer::TraceMultiQubitMeasurement(OpId id, Duration duration, long nTargets, Qubit* targets) + { + LayerId layerId = this->TraceMultiQubitOp(id, duration, 0, nullptr, nTargets, targets); + return reinterpret_cast(layerId); + } + + //------------------------------------------------------------------------------------------------------------------ + // CTracer::PrintLayerMetrics + //------------------------------------------------------------------------------------------------------------------ + static std::string GetOperationName(OpId opId, const std::unordered_map& opNames) + { + if (opId < 0) + { + return ""; + } + + auto nameIt = opNames.find(opId); + return nameIt == opNames.end() ? std::to_string(opId) : nameIt->second; + } + void CTracer::PrintLayerMetrics(std::ostream& out, const std::string& separator, bool printZeroMetrics) const + { + // Sort the operations by id so the output is deterministic. + std::set seenOpsOrderedById(this->seenOps.begin(), this->seenOps.end()); + + // header row + out << "layer_id" << separator << "name"; + for (OpId opId : seenOpsOrderedById) + { + out << separator << GetOperationName(opId, this->opNames); + } + out << std::endl; + + // data rows + const std::string zeroString = printZeroMetrics ? "0" : ""; + for (const Layer& layer : this->metricsByLayer) + { + out << layer.startTime; + out << separator << GetOperationName(layer.barrierId, this->opNames); + + for (OpId opId : seenOpsOrderedById) + { + auto foundInLayer = layer.operations.find(opId); + out << separator + << ((foundInLayer == layer.operations.end()) ? zeroString : std::to_string(foundInLayer->second)); + } + out << std::endl; + } + } +} // namespace Quantum +} // namespace Microsoft \ No newline at end of file diff --git a/src/QirRuntime/lib/Tracer/tracer.hpp b/src/QirRuntime/lib/Tracer/tracer.hpp new file mode 100644 index 00000000000..c5548e4bd18 --- /dev/null +++ b/src/QirRuntime/lib/Tracer/tracer.hpp @@ -0,0 +1,206 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. +#pragma once + +#include +#include +#include +#include +#include + +#include "CoreTypes.hpp" +#include "TracerTypes.hpp" +#include "QuantumApi_I.hpp" + +namespace Microsoft +{ +namespace Quantum +{ + /*================================================================================================================== + Layer + ==================================================================================================================*/ + struct Layer + { + // Start time of the layer. + const Time startTime; + + // Width of the layer on the time axis. + const Duration duration; + + // Quantum operations, assigned to this layer. + std::unordered_map operations; + + // Optional id, if the layer represents a global barrier. + OpId barrierId = -1; + + Layer(Time startTime, Duration duration) + : startTime(startTime) + , duration(duration) + { + } + }; + + /*================================================================================================================== + QubitState + ==================================================================================================================*/ + struct QubitState + { + // The last layer this qubit was used in, `INVALID` means the qubit haven't been used yet in any + // operations of non-zero duration. + LayerId layer = INVALID; + + // `lastUsedTime` stores the end time of the last operation, the qubit participated in. It might not match the + // end time of a layer, if the duration of the last operation is less than duration of the layer. Tracking this + // time allows us to possibly fit multiple short operations on the same qubit into a single layer. + Time lastUsedTime = 0; + + std::vector pendingZeroDurationOps; + }; + + /*================================================================================================================== + The tracer implements resource estimation. See readme in this folder for details. + ==================================================================================================================*/ + class CTracer : public ISimulator + { + // For now the tracer assumes no reuse of qubits. + std::vector qubits; + + // The preferred duration of a layer. An operation with longer duration will make the containing layer longer. + const int preferredLayerDuration = 0; + + // The index into the vector is treated as implicit id of the layer. + std::vector metricsByLayer; + + // The last global barrier, injected by the user. No new operations can be added to the barrier or to any of the + // layer that preceeded it, even if the new operations involve completely new qubits. + LayerId globalBarrier = INVALID; + + // Mapping of operation ids to user-chosen names, for operations that user didn't name, the output will use + // operation ids. + std::unordered_map opNames; + + // Operations we've seen so far (to be able to trim output to include only those that were encounted). + std::unordered_set seenOps; + + private: + QubitState& UseQubit(Qubit q) + { + size_t qubitIndex = reinterpret_cast(q); + assert(qubitIndex < this->qubits.size()); + return this->qubits[qubitIndex]; + } + const QubitState& UseQubit(Qubit q) const + { + size_t qubitIndex = reinterpret_cast(q); + assert(qubitIndex < this->qubits.size()); + return this->qubits[qubitIndex]; + } + + // If no appropriate layer found, return `INVALID` + LayerId FindLayerToInsertOperationInto(Qubit q, Duration opDuration) const; + + // Returns the index of the created layer. + LayerId CreateNewLayer(Duration minRequiredDuration); + + // Adds operation with given id into the given layer. Assumes that duration contraints have been satisfied. + void AddOperationToLayer(OpId id, LayerId layer); + + // Update the qubit state with the new layer information + void UpdateQubitState(Qubit q, LayerId layer, Duration opDuration); + + public: + explicit CTracer(int preferredLayerDuration) + : preferredLayerDuration(preferredLayerDuration) + { + } + + CTracer(int preferredLayerDuration, const std::unordered_map& opNames) + : preferredLayerDuration(preferredLayerDuration) + , opNames(opNames) + { + } + + // ------------------------------------------------------------------------------------------------------------- + // ISimulator interface + // ------------------------------------------------------------------------------------------------------------- + Qubit AllocateQubit() override; + void ReleaseQubit(Qubit qubit) override; + std::string QubitToString(Qubit qubit) override; + void ReleaseResult(Result result) override; + + IQuantumGateSet* AsQuantumGateSet() override + { + throw std::logic_error("Not supported: all intrinsics must be converted to tracing operations"); + } + IDiagnostics* AsDiagnostics() override + { + return nullptr; + } + Result M(Qubit target) override + { + throw std::logic_error("Not supported: all measurements must be converted to tracing operations"); + } + Result Measure(long numBases, PauliId bases[], long numTargets, Qubit targets[]) override + { + throw std::logic_error("Not supported: all measurements must be converted to tracing operations"); + } + bool AreEqualResults(Result r1, Result r2) override + { + throw std::logic_error("Cannot compare results while tracing!"); + } + ResultValue GetResultValue(Result result) override + { + throw std::logic_error("Result values aren't available while tracing!"); + } + Result UseZero() override; + Result UseOne() override; + + // ------------------------------------------------------------------------------------------------------------- + // Instead of implementing IQuantumGateSet, the tracer provides 'tracing-by-id' methods. The QIR generation + // should translate all intrinsics to invoke these methods. + // The tracer doesn't differentiate between control and target qubits. However, While it could provide a single + // generic tracing method for and array of qubits, that would require the clients to copy control and target + // qubits into the same array. To avoid the copy, the tracer provides a method that takes two groups of qubits, + // where the first one can be empty or can be viewed as the set of controls. + // ------------------------------------------------------------------------------------------------------------- + LayerId TraceSingleQubitOp(OpId id, Duration duration, Qubit target); + LayerId TraceMultiQubitOp( + OpId id, + Duration duration, + long nFirstGroup, + Qubit* firstGroup, + long nSecondGroup, + Qubit* secondGroup); + + Result TraceSingleQubitMeasurement(OpId id, Duration duration, Qubit target); + Result TraceMultiQubitMeasurement(OpId id, Duration duration, long nTargets, Qubit* targets); + LayerId GetLayerIdOfSourceMeasurement(Result r) const + { + return reinterpret_cast(r); + } + + // ------------------------------------------------------------------------------------------------------------- + // Backing of the rest of the bridge methods. + // ------------------------------------------------------------------------------------------------------------- + LayerId InjectGlobalBarrier(OpId id, Duration duration); + + // ------------------------------------------------------------------------------------------------------------- + // Configuring the tracer and getting data back from it. + // ------------------------------------------------------------------------------------------------------------- + // Temporary method for initial testing + // TODO: replace with a safer accessor + const std::vector& UseLayers() + { + return this->metricsByLayer; + } + + void PrintLayerMetrics(std::ostream& out, const std::string& separator, bool printZeroMetrics) const; + }; + + std::shared_ptr CreateTracer(int preferredLayerDuration); + std::shared_ptr CreateTracer( + int preferredLayerDuration, + const std::unordered_map& opNames); + +} // namespace Quantum +} // namespace Microsoft \ No newline at end of file diff --git a/src/QirRuntime/public/CoreTypes.hpp b/src/QirRuntime/public/CoreTypes.hpp index 99319f8beaf..28abfb2a9eb 100644 --- a/src/QirRuntime/public/CoreTypes.hpp +++ b/src/QirRuntime/public/CoreTypes.hpp @@ -5,7 +5,6 @@ // The core types will be exposed in the C-interfaces for interop, thus no // namespaces or scoped enums can be used to define them. - /*============================================================================== Qubit & Result @@ -36,5 +35,3 @@ enum PauliId : int32_t PauliId_Z = 2, PauliId_Y = 3, }; - - diff --git a/src/QirRuntime/public/TracerTypes.hpp b/src/QirRuntime/public/TracerTypes.hpp new file mode 100644 index 00000000000..9b7d242f9f7 --- /dev/null +++ b/src/QirRuntime/public/TracerTypes.hpp @@ -0,0 +1,17 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. +#pragma once + +#include +namespace Microsoft +{ +namespace Quantum +{ + using OpId = int; + using Time = int; + using Duration = int; + using LayerId = size_t; + + constexpr LayerId INVALID = std::numeric_limits::max(); +} +} \ No newline at end of file diff --git a/src/QirRuntime/test.py b/src/QirRuntime/test.py index 7c3d3450806..869eea365cb 100644 --- a/src/QirRuntime/test.py +++ b/src/QirRuntime/test.py @@ -20,80 +20,83 @@ def log(message): print(current_time + ": " + message) # ============================================================================= -root_dir = os.path.dirname(os.path.abspath(__file__)) +if __name__ == '__main__': + # this script is executed as script + root_dir = os.path.dirname(os.path.abspath(__file__)) -# parameters -flavor = "Debug" -nobuild = False -noqirgen = False -for arg in sys.argv: - arg = arg.lower() - if arg == "test.py": - continue - elif arg == "debug": - flavor = "Debug" - elif arg == "release": - flavor = "Release" - elif arg == "nobuild": - nobuild = True - noqirgen = True - elif arg == "noqirgen": - noqirgen = True - else: - log("unrecognized argument: " + arg) - sys.exit() + # parameters + flavor = "Debug" + nobuild = False + noqirgen = False + for arg in sys.argv: + arg = arg.lower() + if arg == "test.py": + continue + elif arg == "debug": + flavor = "Debug" + elif arg == "release": + flavor = "Release" + elif arg == "nobuild": + nobuild = True + noqirgen = True + elif arg == "noqirgen": + noqirgen = True + else: + log("unrecognized argument: " + arg) + sys.exit() -if not noqirgen: - if generateqir.do_generate_all(root_dir) != 0: - log("build failed to generate QIR => won't execute the tests") - log("to execute the tests from the last successful build run `test.py nobuild`") - sys.exit() + if not noqirgen: + if generateqir.do_generate_all(root_dir) != 0: + log("build failed to generate QIR => won't execute the tests") + log("to execute the tests from the last successful build run `test.py nobuild`") + sys.exit() -if not nobuild: - result = build.do_build(root_dir, True, True, flavor) # should_make, should_build - if result.returncode != 0: - log("build failed with exit code {0} => won't execute the tests".format(result.returncode)) - log("to execute the tests from the last successful build run `test.py nobuild`") - sys.exit() + if not nobuild: + result = build.do_build(root_dir, True, True, flavor) # should_make, should_build + if result.returncode != 0: + log("build failed with exit code {0} => won't execute the tests".format(result.returncode)) + log("to execute the tests from the last successful build run `test.py nobuild`") + sys.exit() -install_dir = os.path.join(root_dir, "build", platform.system(), flavor, "bin") -if not os.path.isdir(install_dir): - log("please build first: 'build.py [debug|release] [ir]'") - sys.exit() + install_dir = os.path.join(root_dir, "build", platform.system(), flavor, "bin") + if not os.path.isdir(install_dir): + log("please build first: 'build.py [debug|release] [ir]'") + sys.exit() -print("\n") + print("\n") -# Configure DLL lookup locations to include full state simulator and qdk -exe_ext = "" -fullstate_sim_dir = os.path.join(root_dir, "..", "Simulation", "Native", "build", flavor) -if platform.system() == "Windows": - exe_ext = ".exe" - os.environ['PATH'] = os.environ['PATH'] + ";" + fullstate_sim_dir + ";" + install_dir -else: - # add the folder to the list of locations to load libraries from - old = os.environ.get("LD_LIBRARY_PATH") - if old: - os.environ["LD_LIBRARY_PATH"] = old + ":" + fullstate_sim_dir + ":" + install_dir + # Configure DLL lookup locations to include full state simulator and qdk + exe_ext = "" + fullstate_sim_dir = os.path.join(root_dir, "..", "Simulation", "Native", "build", flavor) + if platform.system() == "Windows": + exe_ext = ".exe" + os.environ['PATH'] = os.environ['PATH'] + ";" + fullstate_sim_dir + ";" + install_dir else: - os.environ["LD_LIBRARY_PATH"] = fullstate_sim_dir + ":" + install_dir + # add the folder to the list of locations to load libraries from + old = os.environ.get("LD_LIBRARY_PATH") + if old: + os.environ["LD_LIBRARY_PATH"] = old + ":" + fullstate_sim_dir + ":" + install_dir + else: + os.environ["LD_LIBRARY_PATH"] = fullstate_sim_dir + ":" + install_dir - old = os.environ.get("DYLD_LIBRARY_PATH") - if old: - os.environ["DYLD_LIBRARY_PATH"] = old + ":" + fullstate_sim_dir + ":" + install_dir - else: - os.environ["DYLD_LIBRARY_PATH"] = fullstate_sim_dir + ":" + install_dir + old = os.environ.get("DYLD_LIBRARY_PATH") + if old: + os.environ["DYLD_LIBRARY_PATH"] = old + ":" + fullstate_sim_dir + ":" + install_dir + else: + os.environ["DYLD_LIBRARY_PATH"] = fullstate_sim_dir + ":" + install_dir -log("========= Running native tests =========") -test_binaries = [ - "fullstate-simulator-tests", - "qir-runtime-unittests", - "qir-static-tests", - "qir-dynamic-tests" -] + log("========= Running native tests =========") + test_binaries = [ + "fullstate-simulator-tests", + "qir-runtime-unittests", + "qir-static-tests", + "qir-dynamic-tests", + "qir-tracer-tests" + ] -for name in test_binaries: - test_binary = os.path.join(install_dir, name + exe_ext) - log(test_binary) - subprocess.run(test_binary + " ~[skip]", shell = True) + for name in test_binaries: + test_binary = os.path.join(install_dir, name + exe_ext) + log(test_binary) + subprocess.run(test_binary + " ~[skip]", shell = True) -print("\n") \ No newline at end of file + print("\n") \ No newline at end of file diff --git a/src/QirRuntime/test/CMakeLists.txt b/src/QirRuntime/test/CMakeLists.txt index 8d2fac8c254..e5541f78589 100644 --- a/src/QirRuntime/test/CMakeLists.txt +++ b/src/QirRuntime/test/CMakeLists.txt @@ -1,4 +1,5 @@ add_subdirectory(FullstateSimulator) add_subdirectory(QIR-dynamic) add_subdirectory(QIR-static) +add_subdirectory(QIR-tracer) add_subdirectory(unittests) diff --git a/src/QirRuntime/test/QIR-tracer/CMakeLists.txt b/src/QirRuntime/test/QIR-tracer/CMakeLists.txt new file mode 100644 index 00000000000..e48b4c619f7 --- /dev/null +++ b/src/QirRuntime/test/QIR-tracer/CMakeLists.txt @@ -0,0 +1,28 @@ + +compile_from_qir(tracer-qir tracer_qir) + +#============================================================================== +# The executable target for QIR tests triggers the custom actions to compile ll files +# +add_executable(qir-tracer-tests + qir-tracer-driver.cpp + tracer-config.cpp +) + +target_link_libraries(qir-tracer-tests PUBLIC + ${QIR_UTILITY_LIB} # set by compile_from_qir + ${QIR_BRIDGE_UTILITY_LIB} + ${QIR_BRIDGE_TRACER_UTILITY_LIB} + tracer + qir-rt-support +) + +target_include_directories(qir-tracer-tests PUBLIC + "${test_includes}" + "${public_includes}" + "${PROJECT_SOURCE_DIR}/lib/Tracer" +) +add_dependencies(qir-tracer-tests tracer_qir) + +install(TARGETS qir-tracer-tests RUNTIME DESTINATION "${CMAKE_BINARY_DIR}/bin") +add_unit_test(qir-tracer-tests) diff --git a/src/QirRuntime/test/QIR-tracer/generate.py b/src/QirRuntime/test/QIR-tracer/generate.py new file mode 100644 index 00000000000..206afb6b6d6 --- /dev/null +++ b/src/QirRuntime/test/QIR-tracer/generate.py @@ -0,0 +1,41 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. + +import sys, os, platform, subprocess, datetime, shutil + +# ============================================================================= +# Generates QIR files for all *.qs files in this folder +# Accepts arguments: +# path to qsc.exe (absolute or rely on Path env) +# +# For example: "generate.py qsc.exe" or "generate.py c:\qsharp-compiler\qsc.exe" +# ============================================================================= + +# ============================================================================= +def log(message): + now = datetime.datetime.now() + current_time = now.strftime("%H:%M:%S") + print(current_time + ": " + message) +# ============================================================================= + +if __name__ == '__main__': + # this script is executed as script + root_dir = os.path.dirname(os.path.abspath(__file__)) + + # parameters + qsc = sys.argv[1] # argv[0] is the name of this script file + + # find all qs files in this folder + files_to_process = "" + output_file = "tracer-qir" + for file in os.listdir(root_dir): + (file_name, ext) = os.path.splitext(file) + if ext == ".qs": + files_to_process = files_to_process + " " + file + + # Compile as a lib so all functions are retained and don't have to workaround the current limitations of + # @EntryPoint attribute. + command = (qsc + " build --qir s --input " + files_to_process + " --proj " + output_file) + log("Executing: " + command) + subprocess.run(command, shell = True) + diff --git a/src/QirRuntime/test/QIR-tracer/qir-tracer-driver.cpp b/src/QirRuntime/test/QIR-tracer/qir-tracer-driver.cpp new file mode 100644 index 00000000000..ba2fd6e361a --- /dev/null +++ b/src/QirRuntime/test/QIR-tracer/qir-tracer-driver.cpp @@ -0,0 +1,47 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +#include +#include + +#define CATCH_CONFIG_MAIN // This tells Catch to provide a main() - only do this in one cpp file +#include "catch.hpp" + +#include "QirContext.hpp" +#include "tracer-config.hpp" +#include "tracer.hpp" + +using namespace std; +using namespace Microsoft::Quantum; + +namespace TracerUser +{ + +TEST_CASE("Invoke each intrinsic from Q# core once", "[qir-tracer]") +{ + shared_ptr tr = CreateTracer(1 /*layer duration*/, g_operationNames); + QirContextScope qirctx(tr.get(), false /*trackAllocatedObjects*/); + + REQUIRE_NOTHROW(Microsoft__Quantum__Testing__Tracer__TestCoreIntrinsics__body()); + const vector& layers = tr->UseLayers(); + + std::stringstream out; + tr->PrintLayerMetrics(out, ",", true /*printZeroMetrics*/); + INFO(out.str()); + + // TestCoreIntrinsics happens to produce 24 layers right now and we are not checking whether that's expected -- as + // testing of layering logic is better done by unit tests. + CHECK(layers.size() == 24); +} + +TEST_CASE("Measurements can be counted but cannot be compared", "[qir-tracer]") +{ + shared_ptr tr = CreateTracer(1 /*layer duration*/, g_operationNames); + QirContextScope qirctx(tr.get(), false /*trackAllocatedObjects*/); + + REQUIRE_NOTHROW(Microsoft__Quantum__Testing__Tracer__TestMeasurements__body(false /*compare*/)); + CHECK(tr->UseLayers().size() == 1); + + REQUIRE_THROWS(Microsoft__Quantum__Testing__Tracer__TestMeasurements__body(true /*compare*/)); +} +} \ No newline at end of file diff --git a/src/QirRuntime/test/QIR-tracer/tracer-config.cpp b/src/QirRuntime/test/QIR-tracer/tracer-config.cpp new file mode 100644 index 00000000000..eab02e878af --- /dev/null +++ b/src/QirRuntime/test/QIR-tracer/tracer-config.cpp @@ -0,0 +1,16 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +// TODO: ideally, this file should be generated by the Q# compiler alongside the qir, using the mappings specified in +// target.qs. + +#include + +#include "QuantumApi_I.hpp" +#include "tracer-config.hpp" + +namespace TracerUser +{ +const std::unordered_map g_operationNames = { + {0, "X"}, {1, "CX"}, {2, "MCX"}, {3, "Y"}, {4, "CY"}, {5, "MCY"} /*etc.*/}; +} diff --git a/src/QirRuntime/test/QIR-tracer/tracer-config.hpp b/src/QirRuntime/test/QIR-tracer/tracer-config.hpp new file mode 100644 index 00000000000..8163231286d --- /dev/null +++ b/src/QirRuntime/test/QIR-tracer/tracer-config.hpp @@ -0,0 +1,20 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +// TODO: ideally, this file should be generated by the Q# compiler alongside the qir + +#pragma once + +#include +#include + +#include "TracerTypes.hpp" + +namespace TracerUser +{ +extern const std::unordered_map g_operationNames; +} // namespace TracerUser + +// Available function in generated QIR +extern "C" void Microsoft__Quantum__Testing__Tracer__TestCoreIntrinsics__body(); // NOLINT +extern "C" void Microsoft__Quantum__Testing__Tracer__TestMeasurements__body(bool compare); // NOLINT diff --git a/src/QirRuntime/test/QIR-tracer/tracer-core.qs b/src/QirRuntime/test/QIR-tracer/tracer-core.qs new file mode 100644 index 00000000000..84e57ae32e3 --- /dev/null +++ b/src/QirRuntime/test/QIR-tracer/tracer-core.qs @@ -0,0 +1,30 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +namespace Microsoft.Quantum.Core { + + @Attribute() + newtype Attribute = Unit; + + @Attribute() + newtype Inline = Unit; + + @Attribute() + newtype EntryPoint = Unit; + + function Length<'T> (array : 'T[]) : Int { body intrinsic; } + + function RangeStart (range : Range) : Int { body intrinsic; } + + function RangeStep (range : Range) : Int { body intrinsic; } + + function RangeEnd (range : Range) : Int { body intrinsic; } + + function RangeReverse (range : Range) : Range { body intrinsic; } +} + +namespace Microsoft.Quantum.Targeting { + + @Attribute() + newtype TargetInstruction = String; +} \ No newline at end of file diff --git a/src/QirRuntime/test/QIR-tracer/tracer-intrinsics.qs b/src/QirRuntime/test/QIR-tracer/tracer-intrinsics.qs new file mode 100644 index 00000000000..1e23bf5f613 --- /dev/null +++ b/src/QirRuntime/test/QIR-tracer/tracer-intrinsics.qs @@ -0,0 +1,63 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +namespace Microsoft.Quantum.Testing.Tracer { + open Microsoft.Quantum.Intrinsic; + open Microsoft.Quantum.Tracer; + + operation TestCoreIntrinsics() : Unit { + use qs = Qubit[3]; + + X(qs[0]); + Y(qs[0]); + Z(qs[1]); + H(qs[1]); + CNOT(qs[1], qs[2]); + Rx(0.3, qs[0]); + Ry(0.4, qs[1]); + Rz(0.5, qs[2]); + //SWAP(qs[0], qs[2]); + S(qs[1]); + T(qs[2]); + + Barrier(42, 0); + + Adjoint X(qs[0]); + Adjoint Y(qs[0]); + Adjoint Z(qs[1]); + Adjoint H(qs[1]); + Adjoint CNOT(qs[1], qs[2]); + Adjoint Rx(0.3, qs[0]); + Adjoint Ry(0.4, qs[1]); + Adjoint Rz(0.5, qs[2]); + //Adjoint SWAP(qs[0], qs[2]); + Adjoint S(qs[1]); + Adjoint T(qs[2]); + + use c = Qubit() { + Controlled X([c], (qs[0])); + Controlled Y([c], (qs[0])); + Controlled Z([c], (qs[1])); + Controlled H([c], (qs[1])); + Controlled Rx([c], (0.3, qs[0])); + Controlled Ry([c], (0.4, qs[1])); + Controlled Rz([c], (0.5, qs[2])); + //Controlled SWAP([c], (qs[0], qs[2])); + Controlled S([c], (qs[1])); + Controlled T([c], (qs[2])); + } + + use cc = Qubit[2] { + Controlled X(cc, (qs[0])); + Controlled Y(cc, (qs[0])); + Controlled Z(cc, (qs[1])); + Controlled H(cc, (qs[1])); + Controlled Rx(cc, (0.3, qs[0])); + Controlled Ry(cc, (0.4, qs[1])); + Controlled Rz(cc, (0.5, qs[2])); + //Controlled SWAP(cc, (qs[0], qs[2])); + Controlled S(cc, (qs[1])); + Controlled T(cc, (qs[2])); + } + } +} diff --git a/src/QirRuntime/test/QIR-tracer/tracer-measurements.qs b/src/QirRuntime/test/QIR-tracer/tracer-measurements.qs new file mode 100644 index 00000000000..7c4aab5eea1 --- /dev/null +++ b/src/QirRuntime/test/QIR-tracer/tracer-measurements.qs @@ -0,0 +1,27 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +namespace Microsoft.Quantum.Testing.Tracer { + open Microsoft.Quantum.Intrinsic; + + operation Fixup(qs : Qubit[]) : Unit { + for i in 0..Length(qs)-1 { + X(qs[i]); + } + } + + operation TestMeasurements(compare : Bool) : Unit { + use qs = Qubit[3]; + let r0 = M(qs[0]); + let qs12 = [qs[1], qs[2]]; + let r12 = Measure([PauliY, PauliX], qs12); + + if compare { + if r0 == Zero { + X(qs[1]); + } + + //ApplyIfOne(r12, (Fixup, qs12)); + } + } +} \ No newline at end of file diff --git a/src/QirRuntime/test/QIR-tracer/tracer-qir.ll b/src/QirRuntime/test/QIR-tracer/tracer-qir.ll new file mode 100644 index 00000000000..73d9cf34372 --- /dev/null +++ b/src/QirRuntime/test/QIR-tracer/tracer-qir.ll @@ -0,0 +1,1462 @@ + +%Result = type opaque +%Range = type { i64, i64, i64 } +%Tuple = type opaque +%Qubit = type opaque +%Array = type opaque +%String = type opaque + +@ResultZero = external global %Result* +@ResultOne = external global %Result* +@PauliI = constant i2 0 +@PauliX = constant i2 1 +@PauliY = constant i2 -1 +@PauliZ = constant i2 -2 +@EmptyRange = internal constant %Range { i64 0, i64 1, i64 -1 } + +define %Tuple* @Microsoft__Quantum__Core__Attribute__body() { +entry: + ret %Tuple* null +} + +define %Tuple* @Microsoft__Quantum__Core__EntryPoint__body() { +entry: + ret %Tuple* null +} + +define %Tuple* @Microsoft__Quantum__Core__Inline__body() { +entry: + ret %Tuple* null +} + +define void @Microsoft__Quantum__Intrinsic__CNOT__body(%Qubit* %control, %Qubit* %target) { +entry: + %ctls = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %0 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ctls, i64 0) + %1 = bitcast i8* %0 to %Qubit** + store %Qubit* %control, %Qubit** %1 + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 1) + br i1 true, label %then0__1, label %else__1 + +then0__1: ; preds = %entry + call void @__quantum__qis__single_qubit_op_ctl(i64 1, i64 1, %Array* %ctls, %Qubit* %target) + br label %continue__1 + +else__1: ; preds = %entry + call void @__quantum__qis__single_qubit_op_ctl(i64 2, i64 1, %Array* %ctls, %Qubit* %target) + br label %continue__1 + +continue__1: ; preds = %else__1, %then0__1 + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %ctls, i64 -1) + ret void +} + +declare %Array* @__quantum__rt__array_create_1d(i32, i64) + +declare i8* @__quantum__rt__array_get_element_ptr_1d(%Array*, i64) + +declare void @__quantum__rt__array_update_alias_count(%Array*, i64) + +declare void @__quantum__qis__single_qubit_op_ctl(i64, i64, %Array*, %Qubit*) + +declare void @__quantum__rt__array_update_reference_count(%Array*, i64) + +define void @Microsoft__Quantum__Intrinsic__CNOT__adj(%Qubit* %control, %Qubit* %target) { +entry: + %ctls = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %0 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ctls, i64 0) + %1 = bitcast i8* %0 to %Qubit** + store %Qubit* %control, %Qubit** %1 + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 1) + br i1 true, label %then0__1, label %else__1 + +then0__1: ; preds = %entry + call void @__quantum__qis__single_qubit_op_ctl(i64 1, i64 1, %Array* %ctls, %Qubit* %target) + br label %continue__1 + +else__1: ; preds = %entry + call void @__quantum__qis__single_qubit_op_ctl(i64 2, i64 1, %Array* %ctls, %Qubit* %target) + br label %continue__1 + +continue__1: ; preds = %else__1, %then0__1 + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %ctls, i64 -1) + ret void +} + +define void @Microsoft__Quantum__Intrinsic__H__body(%Qubit* %qb) { +entry: + call void @__quantum__qis__single_qubit_op(i64 9, i64 1, %Qubit* %qb) + ret void +} + +declare void @__quantum__qis__single_qubit_op(i64, i64, %Qubit*) + +define void @Microsoft__Quantum__Intrinsic__H__adj(%Qubit* %qb) { +entry: + call void @__quantum__qis__single_qubit_op(i64 9, i64 1, %Qubit* %qb) + ret void +} + +define void @Microsoft__Quantum__Intrinsic__H__ctl(%Array* %ctls, %Qubit* %qb) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 1) + call void @__quantum__qis__single_qubit_op_ctl(i64 10, i64 1, %Array* %ctls, %Qubit* %qb) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 -1) + ret void +} + +define void @Microsoft__Quantum__Intrinsic__H__ctladj(%Array* %ctls, %Qubit* %qb) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 1) + call void @__quantum__qis__single_qubit_op_ctl(i64 10, i64 1, %Array* %ctls, %Qubit* %qb) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 -1) + ret void +} + +define %Result* @Microsoft__Quantum__Intrinsic__M__body(%Qubit* %qb) { +entry: + %0 = call %Result* @__quantum__qis__single_qubit_measure(i64 100, i64 1, %Qubit* %qb) + ret %Result* %0 +} + +declare %Result* @__quantum__qis__single_qubit_measure(i64, i64, %Qubit*) + +define %Result* @Microsoft__Quantum__Intrinsic__Measure__body(%Array* %paulis, %Array* %qubits) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i64 1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i64 1) + %0 = load %Result*, %Result** @ResultOne + %res = alloca %Result* + store %Result* %0, %Result** %res + call void @__quantum__rt__result_update_reference_count(%Result* %0, i64 1) + %haveY = alloca i1 + store i1 false, i1* %haveY + %1 = call i64 @__quantum__rt__array_get_size_1d(%Array* %paulis) + %2 = sub i64 %1, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %i = phi i64 [ 0, %entry ], [ %15, %exiting__1 ] + %3 = icmp sle i64 %i, %2 + br i1 %3, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 %i) + %5 = bitcast i8* %4 to i2* + %6 = load i2, i2* %5 + %7 = load i2, i2* @PauliY + %8 = icmp eq i2 %6, %7 + %9 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 %i) + %10 = bitcast i8* %9 to i2* + %11 = load i2, i2* %10 + %12 = load i2, i2* @PauliI + %13 = icmp eq i2 %11, %12 + %14 = or i1 %8, %13 + br i1 %14, label %then0__1, label %continue__1 + +then0__1: ; preds = %body__1 + store i1 true, i1* %haveY + br label %continue__1 + +continue__1: ; preds = %then0__1, %body__1 + br label %exiting__1 + +exiting__1: ; preds = %continue__1 + %15 = add i64 %i, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + %16 = load i1, i1* %haveY + br i1 %16, label %then0__2, label %test1__1 + +then0__2: ; preds = %exit__1 + %17 = call %Result* @__quantum__qis__joint_measure(i64 106, i64 1, %Array* %qubits) + call void @__quantum__rt__result_update_reference_count(%Result* %17, i64 1) + store %Result* %17, %Result** %res + call void @__quantum__rt__result_update_reference_count(%Result* %17, i64 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %0, i64 -1) + br label %continue__2 + +test1__1: ; preds = %exit__1 + %18 = icmp sgt i64 %1, 2 + br i1 %18, label %then1__1, label %test2__1 + +then1__1: ; preds = %test1__1 + %19 = call %Result* @__quantum__qis__joint_measure(i64 107, i64 1, %Array* %qubits) + call void @__quantum__rt__result_update_reference_count(%Result* %19, i64 1) + %20 = load %Result*, %Result** %res + store %Result* %19, %Result** %res + call void @__quantum__rt__result_update_reference_count(%Result* %19, i64 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %20, i64 -1) + br label %continue__2 + +test2__1: ; preds = %test1__1 + %21 = icmp eq i64 %1, 1 + br i1 %21, label %then2__1, label %test3__1 + +then2__1: ; preds = %test2__1 + %22 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 0) + %23 = bitcast i8* %22 to i2* + %24 = load i2, i2* %23 + %25 = load i2, i2* @PauliX + %26 = icmp eq i2 %24, %25 + br i1 %26, label %then0__3, label %else__1 + +then0__3: ; preds = %then2__1 + %27 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qubits, i64 0) + %28 = bitcast i8* %27 to %Qubit** + %qb = load %Qubit*, %Qubit** %28 + %29 = call %Result* @__quantum__qis__single_qubit_measure(i64 101, i64 1, %Qubit* %qb) + call void @__quantum__rt__result_update_reference_count(%Result* %29, i64 1) + %30 = load %Result*, %Result** %res + store %Result* %29, %Result** %res + call void @__quantum__rt__result_update_reference_count(%Result* %29, i64 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %30, i64 -1) + br label %continue__3 + +else__1: ; preds = %then2__1 + %31 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qubits, i64 0) + %32 = bitcast i8* %31 to %Qubit** + %qb__1 = load %Qubit*, %Qubit** %32 + %33 = call %Result* @__quantum__qis__single_qubit_measure(i64 100, i64 1, %Qubit* %qb__1) + call void @__quantum__rt__result_update_reference_count(%Result* %33, i64 1) + %34 = load %Result*, %Result** %res + store %Result* %33, %Result** %res + call void @__quantum__rt__result_update_reference_count(%Result* %33, i64 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %34, i64 -1) + br label %continue__3 + +continue__3: ; preds = %else__1, %then0__3 + br label %continue__2 + +test3__1: ; preds = %test2__1 + %35 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 0) + %36 = bitcast i8* %35 to i2* + %37 = load i2, i2* %36 + %38 = load i2, i2* @PauliX + %39 = icmp eq i2 %37, %38 + %40 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 1) + %41 = bitcast i8* %40 to i2* + %42 = load i2, i2* %41 + %43 = load i2, i2* @PauliX + %44 = icmp eq i2 %42, %43 + %45 = and i1 %39, %44 + br i1 %45, label %then3__1, label %test4__1 + +then3__1: ; preds = %test3__1 + %46 = call %Result* @__quantum__qis__joint_measure(i64 108, i64 1, %Array* %qubits) + call void @__quantum__rt__result_update_reference_count(%Result* %46, i64 1) + %47 = load %Result*, %Result** %res + store %Result* %46, %Result** %res + call void @__quantum__rt__result_update_reference_count(%Result* %46, i64 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %47, i64 -1) + br label %continue__2 + +test4__1: ; preds = %test3__1 + %48 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 0) + %49 = bitcast i8* %48 to i2* + %50 = load i2, i2* %49 + %51 = load i2, i2* @PauliX + %52 = icmp eq i2 %50, %51 + %53 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 1) + %54 = bitcast i8* %53 to i2* + %55 = load i2, i2* %54 + %56 = load i2, i2* @PauliZ + %57 = icmp eq i2 %55, %56 + %58 = and i1 %52, %57 + br i1 %58, label %then4__1, label %test5__1 + +then4__1: ; preds = %test4__1 + %59 = call %Result* @__quantum__qis__joint_measure(i64 109, i64 1, %Array* %qubits) + call void @__quantum__rt__result_update_reference_count(%Result* %59, i64 1) + %60 = load %Result*, %Result** %res + store %Result* %59, %Result** %res + call void @__quantum__rt__result_update_reference_count(%Result* %59, i64 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %60, i64 -1) + br label %continue__2 + +test5__1: ; preds = %test4__1 + %61 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 0) + %62 = bitcast i8* %61 to i2* + %63 = load i2, i2* %62 + %64 = load i2, i2* @PauliZ + %65 = icmp eq i2 %63, %64 + %66 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 1) + %67 = bitcast i8* %66 to i2* + %68 = load i2, i2* %67 + %69 = load i2, i2* @PauliX + %70 = icmp eq i2 %68, %69 + %71 = and i1 %65, %70 + br i1 %71, label %then5__1, label %test6__1 + +then5__1: ; preds = %test5__1 + %72 = call %Result* @__quantum__qis__joint_measure(i64 110, i64 1, %Array* %qubits) + call void @__quantum__rt__result_update_reference_count(%Result* %72, i64 1) + %73 = load %Result*, %Result** %res + store %Result* %72, %Result** %res + call void @__quantum__rt__result_update_reference_count(%Result* %72, i64 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %73, i64 -1) + br label %continue__2 + +test6__1: ; preds = %test5__1 + %74 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 0) + %75 = bitcast i8* %74 to i2* + %76 = load i2, i2* %75 + %77 = load i2, i2* @PauliZ + %78 = icmp eq i2 %76, %77 + %79 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 1) + %80 = bitcast i8* %79 to i2* + %81 = load i2, i2* %80 + %82 = load i2, i2* @PauliZ + %83 = icmp eq i2 %81, %82 + %84 = and i1 %78, %83 + br i1 %84, label %then6__1, label %continue__2 + +then6__1: ; preds = %test6__1 + %85 = call %Result* @__quantum__qis__joint_measure(i64 111, i64 1, %Array* %qubits) + call void @__quantum__rt__result_update_reference_count(%Result* %85, i64 1) + %86 = load %Result*, %Result** %res + store %Result* %85, %Result** %res + call void @__quantum__rt__result_update_reference_count(%Result* %85, i64 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %86, i64 -1) + br label %continue__2 + +continue__2: ; preds = %then6__1, %test6__1, %then5__1, %then4__1, %then3__1, %continue__3, %then1__1, %then0__2 + %87 = load %Result*, %Result** %res + call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i64 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i64 -1) + ret %Result* %87 +} + +declare void @__quantum__rt__result_update_reference_count(%Result*, i64) + +declare i64 @__quantum__rt__array_get_size_1d(%Array*) + +declare %Result* @__quantum__qis__joint_measure(i64, i64, %Array*) + +define %Result* @Microsoft__Quantum__Intrinsic__Mx__body(%Qubit* %qb) { +entry: + %0 = call %Result* @__quantum__qis__single_qubit_measure(i64 101, i64 1, %Qubit* %qb) + ret %Result* %0 +} + +define %Result* @Microsoft__Quantum__Intrinsic__Mxx__body(%Array* %qubits) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i64 1) + %0 = call %Result* @__quantum__qis__joint_measure(i64 105, i64 1, %Array* %qubits) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i64 -1) + ret %Result* %0 +} + +define %Result* @Microsoft__Quantum__Intrinsic__Mxz__body(%Array* %qubits) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i64 1) + %0 = call %Result* @__quantum__qis__joint_measure(i64 103, i64 1, %Array* %qubits) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i64 -1) + ret %Result* %0 +} + +define %Result* @Microsoft__Quantum__Intrinsic__Mz__body(%Qubit* %qb) { +entry: + %0 = call %Result* @__quantum__qis__single_qubit_measure(i64 100, i64 1, %Qubit* %qb) + ret %Result* %0 +} + +define %Result* @Microsoft__Quantum__Intrinsic__Mzx__body(%Array* %qubits) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i64 1) + %0 = call %Result* @__quantum__qis__joint_measure(i64 104, i64 1, %Array* %qubits) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i64 -1) + ret %Result* %0 +} + +define %Result* @Microsoft__Quantum__Intrinsic__Mzz__body(%Array* %qubits) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i64 1) + %0 = call %Result* @__quantum__qis__joint_measure(i64 102, i64 1, %Array* %qubits) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i64 -1) + ret %Result* %0 +} + +define void @Microsoft__Quantum__Intrinsic__Rx__body(double %theta, %Qubit* %qb) { +entry: + call void @__quantum__qis__single_qubit_op(i64 19, i64 1, %Qubit* %qb) + ret void +} + +define void @Microsoft__Quantum__Intrinsic__Rx__adj(double %theta, %Qubit* %qb) { +entry: + call void @__quantum__qis__single_qubit_op(i64 19, i64 1, %Qubit* %qb) + ret void +} + +define void @Microsoft__Quantum__Intrinsic__Rx__ctl(%Array* %ctls, { double, %Qubit* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 1) + %1 = getelementptr { double, %Qubit* }, { double, %Qubit* }* %0, i64 0, i32 0 + %theta = load double, double* %1 + %2 = getelementptr { double, %Qubit* }, { double, %Qubit* }* %0, i64 0, i32 1 + %qb = load %Qubit*, %Qubit** %2 + call void @__quantum__qis__single_qubit_op_ctl(i64 20, i64 1, %Array* %ctls, %Qubit* %qb) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 -1) + ret void +} + +define void @Microsoft__Quantum__Intrinsic__Rx__ctladj(%Array* %ctls, { double, %Qubit* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 1) + %1 = getelementptr { double, %Qubit* }, { double, %Qubit* }* %0, i64 0, i32 0 + %theta = load double, double* %1 + %2 = getelementptr { double, %Qubit* }, { double, %Qubit* }* %0, i64 0, i32 1 + %qb = load %Qubit*, %Qubit** %2 + call void @__quantum__qis__single_qubit_op_ctl(i64 20, i64 1, %Array* %ctls, %Qubit* %qb) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 -1) + ret void +} + +define void @Microsoft__Quantum__Intrinsic__Ry__body(double %theta, %Qubit* %qb) { +entry: + call void @__quantum__qis__single_qubit_op(i64 21, i64 1, %Qubit* %qb) + ret void +} + +define void @Microsoft__Quantum__Intrinsic__Ry__adj(double %theta, %Qubit* %qb) { +entry: + call void @__quantum__qis__single_qubit_op(i64 21, i64 1, %Qubit* %qb) + ret void +} + +define void @Microsoft__Quantum__Intrinsic__Ry__ctl(%Array* %ctls, { double, %Qubit* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 1) + %1 = getelementptr { double, %Qubit* }, { double, %Qubit* }* %0, i64 0, i32 0 + %theta = load double, double* %1 + %2 = getelementptr { double, %Qubit* }, { double, %Qubit* }* %0, i64 0, i32 1 + %qb = load %Qubit*, %Qubit** %2 + call void @__quantum__qis__single_qubit_op_ctl(i64 22, i64 1, %Array* %ctls, %Qubit* %qb) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 -1) + ret void +} + +define void @Microsoft__Quantum__Intrinsic__Ry__ctladj(%Array* %ctls, { double, %Qubit* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 1) + %1 = getelementptr { double, %Qubit* }, { double, %Qubit* }* %0, i64 0, i32 0 + %theta = load double, double* %1 + %2 = getelementptr { double, %Qubit* }, { double, %Qubit* }* %0, i64 0, i32 1 + %qb = load %Qubit*, %Qubit** %2 + call void @__quantum__qis__single_qubit_op_ctl(i64 22, i64 1, %Array* %ctls, %Qubit* %qb) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 -1) + ret void +} + +define void @Microsoft__Quantum__Intrinsic__Rz__body(double %theta, %Qubit* %qb) { +entry: + call void @__quantum__qis__single_qubit_op(i64 23, i64 1, %Qubit* %qb) + ret void +} + +define void @Microsoft__Quantum__Intrinsic__Rz__adj(double %theta, %Qubit* %qb) { +entry: + call void @__quantum__qis__single_qubit_op(i64 24, i64 1, %Qubit* %qb) + ret void +} + +define void @Microsoft__Quantum__Intrinsic__Rz__ctl(%Array* %ctls, { double, %Qubit* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 1) + %1 = getelementptr { double, %Qubit* }, { double, %Qubit* }* %0, i64 0, i32 0 + %theta = load double, double* %1 + %2 = getelementptr { double, %Qubit* }, { double, %Qubit* }* %0, i64 0, i32 1 + %qb = load %Qubit*, %Qubit** %2 + call void @__quantum__qis__single_qubit_op_ctl(i64 25, i64 1, %Array* %ctls, %Qubit* %qb) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 -1) + ret void +} + +define void @Microsoft__Quantum__Intrinsic__Rz__ctladj(%Array* %ctls, { double, %Qubit* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 1) + %1 = getelementptr { double, %Qubit* }, { double, %Qubit* }* %0, i64 0, i32 0 + %theta = load double, double* %1 + %2 = getelementptr { double, %Qubit* }, { double, %Qubit* }* %0, i64 0, i32 1 + %qb = load %Qubit*, %Qubit** %2 + call void @__quantum__qis__single_qubit_op_ctl(i64 25, i64 1, %Array* %ctls, %Qubit* %qb) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 -1) + ret void +} + +define void @Microsoft__Quantum__Intrinsic__S__body(%Qubit* %qb) { +entry: + call void @__quantum__qis__single_qubit_op(i64 15, i64 1, %Qubit* %qb) + ret void +} + +define void @Microsoft__Quantum__Intrinsic__S__adj(%Qubit* %qb) { +entry: + call void @__quantum__qis__single_qubit_op(i64 15, i64 1, %Qubit* %qb) + ret void +} + +define void @Microsoft__Quantum__Intrinsic__S__ctl(%Array* %ctls, %Qubit* %qb) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 1) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 1) + call void @__quantum__qis__single_qubit_op_ctl(i64 16, i64 1, %Array* %ctls, %Qubit* %qb) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 -1) + ret void +} + +define void @Microsoft__Quantum__Intrinsic__S__ctladj(%Array* %ctls, %Qubit* %qb) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 1) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 1) + call void @__quantum__qis__single_qubit_op_ctl(i64 16, i64 1, %Array* %ctls, %Qubit* %qb) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 -1) + ret void +} + +define void @Microsoft__Quantum__Intrinsic__Sx__body(%Qubit* %qb) { +entry: + call void @__quantum__qis__single_qubit_op(i64 17, i64 1, %Qubit* %qb) + ret void +} + +define void @Microsoft__Quantum__Intrinsic__Sx__adj(%Qubit* %qb) { +entry: + call void @__quantum__qis__single_qubit_op(i64 17, i64 1, %Qubit* %qb) + ret void +} + +define void @Microsoft__Quantum__Intrinsic__Sx__ctl(%Array* %ctls, %Qubit* %qb) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 1) + call void @__quantum__qis__single_qubit_op_ctl(i64 18, i64 1, %Array* %ctls, %Qubit* %qb) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 -1) + ret void +} + +define void @Microsoft__Quantum__Intrinsic__Sx__ctladj(%Array* %ctls, %Qubit* %qb) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 1) + call void @__quantum__qis__single_qubit_op_ctl(i64 18, i64 1, %Array* %ctls, %Qubit* %qb) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 -1) + ret void +} + +define void @Microsoft__Quantum__Intrinsic__Sz__body(%Qubit* %qb) { +entry: + call void @__quantum__qis__single_qubit_op(i64 15, i64 1, %Qubit* %qb) + ret void +} + +define void @Microsoft__Quantum__Intrinsic__Sz__adj(%Qubit* %qb) { +entry: + call void @__quantum__qis__single_qubit_op(i64 15, i64 1, %Qubit* %qb) + ret void +} + +define void @Microsoft__Quantum__Intrinsic__Sz__ctl(%Array* %ctls, %Qubit* %qb) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 1) + call void @__quantum__qis__single_qubit_op_ctl(i64 16, i64 1, %Array* %ctls, %Qubit* %qb) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 -1) + ret void +} + +define void @Microsoft__Quantum__Intrinsic__Sz__ctladj(%Array* %ctls, %Qubit* %qb) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 1) + call void @__quantum__qis__single_qubit_op_ctl(i64 16, i64 1, %Array* %ctls, %Qubit* %qb) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 -1) + ret void +} + +define void @Microsoft__Quantum__Intrinsic__T__body(%Qubit* %qb) { +entry: + call void @__quantum__qis__single_qubit_op(i64 11, i64 1, %Qubit* %qb) + ret void +} + +define void @Microsoft__Quantum__Intrinsic__T__adj(%Qubit* %qb) { +entry: + call void @__quantum__qis__single_qubit_op(i64 11, i64 1, %Qubit* %qb) + ret void +} + +define void @Microsoft__Quantum__Intrinsic__T__ctl(%Array* %ctls, %Qubit* %qb) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 1) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 1) + call void @__quantum__qis__single_qubit_op_ctl(i64 12, i64 1, %Array* %ctls, %Qubit* %qb) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 -1) + ret void +} + +define void @Microsoft__Quantum__Intrinsic__T__ctladj(%Array* %ctls, %Qubit* %qb) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 1) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 1) + call void @__quantum__qis__single_qubit_op_ctl(i64 12, i64 1, %Array* %ctls, %Qubit* %qb) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 -1) + ret void +} + +define void @Microsoft__Quantum__Intrinsic__Tx__body(%Qubit* %qb) { +entry: + call void @__quantum__qis__single_qubit_op(i64 13, i64 1, %Qubit* %qb) + ret void +} + +define void @Microsoft__Quantum__Intrinsic__Tx__adj(%Qubit* %qb) { +entry: + call void @__quantum__qis__single_qubit_op(i64 13, i64 1, %Qubit* %qb) + ret void +} + +define void @Microsoft__Quantum__Intrinsic__Tx__ctl(%Array* %ctls, %Qubit* %qb) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 1) + call void @__quantum__qis__single_qubit_op_ctl(i64 14, i64 1, %Array* %ctls, %Qubit* %qb) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 -1) + ret void +} + +define void @Microsoft__Quantum__Intrinsic__Tx__ctladj(%Array* %ctls, %Qubit* %qb) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 1) + call void @__quantum__qis__single_qubit_op_ctl(i64 14, i64 1, %Array* %ctls, %Qubit* %qb) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 -1) + ret void +} + +define void @Microsoft__Quantum__Intrinsic__Tz__body(%Qubit* %qb) { +entry: + call void @__quantum__qis__single_qubit_op(i64 11, i64 1, %Qubit* %qb) + ret void +} + +define void @Microsoft__Quantum__Intrinsic__Tz__adj(%Qubit* %qb) { +entry: + call void @__quantum__qis__single_qubit_op(i64 11, i64 1, %Qubit* %qb) + ret void +} + +define void @Microsoft__Quantum__Intrinsic__Tz__ctl(%Array* %ctls, %Qubit* %qb) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 1) + call void @__quantum__qis__single_qubit_op_ctl(i64 12, i64 1, %Array* %ctls, %Qubit* %qb) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 -1) + ret void +} + +define void @Microsoft__Quantum__Intrinsic__Tz__ctladj(%Array* %ctls, %Qubit* %qb) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 1) + call void @__quantum__qis__single_qubit_op_ctl(i64 12, i64 1, %Array* %ctls, %Qubit* %qb) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 -1) + ret void +} + +define void @Microsoft__Quantum__Intrinsic__X__body(%Qubit* %qb) { +entry: + call void @__quantum__qis__single_qubit_op(i64 0, i64 1, %Qubit* %qb) + ret void +} + +define void @Microsoft__Quantum__Intrinsic__X__adj(%Qubit* %qb) { +entry: + call void @__quantum__qis__single_qubit_op(i64 0, i64 1, %Qubit* %qb) + ret void +} + +define void @Microsoft__Quantum__Intrinsic__X__ctl(%Array* %ctls, %Qubit* %qb) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 1) + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %ctls) + %1 = icmp eq i64 %0, 1 + br i1 %1, label %then0__1, label %else__1 + +then0__1: ; preds = %entry + call void @__quantum__qis__single_qubit_op_ctl(i64 1, i64 1, %Array* %ctls, %Qubit* %qb) + br label %continue__1 + +else__1: ; preds = %entry + call void @__quantum__qis__single_qubit_op_ctl(i64 2, i64 1, %Array* %ctls, %Qubit* %qb) + br label %continue__1 + +continue__1: ; preds = %else__1, %then0__1 + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 -1) + ret void +} + +define void @Microsoft__Quantum__Intrinsic__X__ctladj(%Array* %ctls, %Qubit* %qb) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 1) + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %ctls) + %1 = icmp eq i64 %0, 1 + br i1 %1, label %then0__1, label %else__1 + +then0__1: ; preds = %entry + call void @__quantum__qis__single_qubit_op_ctl(i64 1, i64 1, %Array* %ctls, %Qubit* %qb) + br label %continue__1 + +else__1: ; preds = %entry + call void @__quantum__qis__single_qubit_op_ctl(i64 2, i64 1, %Array* %ctls, %Qubit* %qb) + br label %continue__1 + +continue__1: ; preds = %else__1, %then0__1 + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 -1) + ret void +} + +define void @Microsoft__Quantum__Intrinsic__Y__body(%Qubit* %qb) { +entry: + call void @__quantum__qis__single_qubit_op(i64 3, i64 1, %Qubit* %qb) + ret void +} + +define void @Microsoft__Quantum__Intrinsic__Y__adj(%Qubit* %qb) { +entry: + call void @__quantum__qis__single_qubit_op(i64 3, i64 1, %Qubit* %qb) + ret void +} + +define void @Microsoft__Quantum__Intrinsic__Y__ctl(%Array* %ctls, %Qubit* %qb) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 1) + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %ctls) + %1 = icmp eq i64 %0, 1 + br i1 %1, label %then0__1, label %else__1 + +then0__1: ; preds = %entry + call void @__quantum__qis__single_qubit_op_ctl(i64 4, i64 1, %Array* %ctls, %Qubit* %qb) + br label %continue__1 + +else__1: ; preds = %entry + call void @__quantum__qis__single_qubit_op_ctl(i64 5, i64 1, %Array* %ctls, %Qubit* %qb) + br label %continue__1 + +continue__1: ; preds = %else__1, %then0__1 + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 -1) + ret void +} + +define void @Microsoft__Quantum__Intrinsic__Y__ctladj(%Array* %ctls, %Qubit* %qb) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 1) + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %ctls) + %1 = icmp eq i64 %0, 1 + br i1 %1, label %then0__1, label %else__1 + +then0__1: ; preds = %entry + call void @__quantum__qis__single_qubit_op_ctl(i64 4, i64 1, %Array* %ctls, %Qubit* %qb) + br label %continue__1 + +else__1: ; preds = %entry + call void @__quantum__qis__single_qubit_op_ctl(i64 5, i64 1, %Array* %ctls, %Qubit* %qb) + br label %continue__1 + +continue__1: ; preds = %else__1, %then0__1 + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 -1) + ret void +} + +define void @Microsoft__Quantum__Intrinsic__Z__body(%Qubit* %qb) { +entry: + call void @__quantum__qis__single_qubit_op(i64 6, i64 1, %Qubit* %qb) + ret void +} + +define void @Microsoft__Quantum__Intrinsic__Z__adj(%Qubit* %qb) { +entry: + call void @__quantum__qis__single_qubit_op(i64 6, i64 1, %Qubit* %qb) + ret void +} + +define void @Microsoft__Quantum__Intrinsic__Z__ctl(%Array* %ctls, %Qubit* %qb) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 1) + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %ctls) + %1 = icmp eq i64 %0, 1 + br i1 %1, label %then0__1, label %else__1 + +then0__1: ; preds = %entry + call void @__quantum__qis__single_qubit_op_ctl(i64 7, i64 1, %Array* %ctls, %Qubit* %qb) + br label %continue__1 + +else__1: ; preds = %entry + call void @__quantum__qis__single_qubit_op_ctl(i64 8, i64 1, %Array* %ctls, %Qubit* %qb) + br label %continue__1 + +continue__1: ; preds = %else__1, %then0__1 + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 -1) + ret void +} + +define void @Microsoft__Quantum__Intrinsic__Z__ctladj(%Array* %ctls, %Qubit* %qb) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 1) + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %ctls) + %1 = icmp eq i64 %0, 1 + br i1 %1, label %then0__1, label %else__1 + +then0__1: ; preds = %entry + call void @__quantum__qis__single_qubit_op_ctl(i64 7, i64 1, %Array* %ctls, %Qubit* %qb) + br label %continue__1 + +else__1: ; preds = %entry + call void @__quantum__qis__single_qubit_op_ctl(i64 8, i64 1, %Array* %ctls, %Qubit* %qb) + br label %continue__1 + +continue__1: ; preds = %else__1, %then0__1 + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 -1) + ret void +} + +define void @Microsoft__Quantum__Testing__Tracer__Fixup__body(%Array* %qs) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %qs, i64 1) + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %qs) + %1 = sub i64 %0, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %i = phi i64 [ 0, %entry ], [ %5, %exiting__1 ] + %2 = icmp sle i64 %i, %1 + br i1 %2, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %3 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 %i) + %4 = bitcast i8* %3 to %Qubit** + %qb = load %Qubit*, %Qubit** %4 + call void @__quantum__qis__single_qubit_op(i64 0, i64 1, %Qubit* %qb) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %5 = add i64 %i, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %qs, i64 -1) + ret void +} + +define void @Microsoft__Quantum__Testing__Tracer__TestCoreIntrinsics__body() { +entry: + %qs = call %Array* @__quantum__rt__qubit_allocate_array(i64 3) + call void @__quantum__rt__array_update_alias_count(%Array* %qs, i64 1) + %0 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 0) + %1 = bitcast i8* %0 to %Qubit** + %qb = load %Qubit*, %Qubit** %1 + call void @__quantum__qis__single_qubit_op(i64 0, i64 1, %Qubit* %qb) + %2 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 0) + %3 = bitcast i8* %2 to %Qubit** + %qb__1 = load %Qubit*, %Qubit** %3 + call void @__quantum__qis__single_qubit_op(i64 3, i64 1, %Qubit* %qb__1) + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 1) + %5 = bitcast i8* %4 to %Qubit** + %qb__2 = load %Qubit*, %Qubit** %5 + call void @__quantum__qis__single_qubit_op(i64 6, i64 1, %Qubit* %qb__2) + %6 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 1) + %7 = bitcast i8* %6 to %Qubit** + %qb__3 = load %Qubit*, %Qubit** %7 + call void @__quantum__qis__single_qubit_op(i64 9, i64 1, %Qubit* %qb__3) + %8 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 1) + %9 = bitcast i8* %8 to %Qubit** + %10 = load %Qubit*, %Qubit** %9 + %11 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 2) + %12 = bitcast i8* %11 to %Qubit** + %13 = load %Qubit*, %Qubit** %12 + call void @Microsoft__Quantum__Intrinsic__CNOT__body(%Qubit* %10, %Qubit* %13) + %14 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 0) + %15 = bitcast i8* %14 to %Qubit** + %qb__4 = load %Qubit*, %Qubit** %15 + call void @__quantum__qis__single_qubit_op(i64 19, i64 1, %Qubit* %qb__4) + %16 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 1) + %17 = bitcast i8* %16 to %Qubit** + %qb__5 = load %Qubit*, %Qubit** %17 + call void @__quantum__qis__single_qubit_op(i64 21, i64 1, %Qubit* %qb__5) + %18 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 2) + %19 = bitcast i8* %18 to %Qubit** + %qb__6 = load %Qubit*, %Qubit** %19 + call void @__quantum__qis__single_qubit_op(i64 23, i64 1, %Qubit* %qb__6) + %20 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 1) + %21 = bitcast i8* %20 to %Qubit** + %qb__7 = load %Qubit*, %Qubit** %21 + call void @__quantum__qis__single_qubit_op(i64 15, i64 1, %Qubit* %qb__7) + %22 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 2) + %23 = bitcast i8* %22 to %Qubit** + %qb__9 = load %Qubit*, %Qubit** %23 + call void @__quantum__qis__single_qubit_op(i64 11, i64 1, %Qubit* %qb__9) + call void @__quantum__qis__inject_barrier(i64 42, i64 1) + %24 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 0) + %25 = bitcast i8* %24 to %Qubit** + %qb__11 = load %Qubit*, %Qubit** %25 + call void @__quantum__qis__single_qubit_op(i64 0, i64 1, %Qubit* %qb__11) + %26 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 0) + %27 = bitcast i8* %26 to %Qubit** + %qb__12 = load %Qubit*, %Qubit** %27 + call void @__quantum__qis__single_qubit_op(i64 3, i64 1, %Qubit* %qb__12) + %28 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 1) + %29 = bitcast i8* %28 to %Qubit** + %qb__13 = load %Qubit*, %Qubit** %29 + call void @__quantum__qis__single_qubit_op(i64 6, i64 1, %Qubit* %qb__13) + %30 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 1) + %31 = bitcast i8* %30 to %Qubit** + %qb__14 = load %Qubit*, %Qubit** %31 + call void @__quantum__qis__single_qubit_op(i64 9, i64 1, %Qubit* %qb__14) + %32 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 1) + %33 = bitcast i8* %32 to %Qubit** + %34 = load %Qubit*, %Qubit** %33 + %35 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 2) + %36 = bitcast i8* %35 to %Qubit** + %37 = load %Qubit*, %Qubit** %36 + call void @Microsoft__Quantum__Intrinsic__CNOT__adj(%Qubit* %34, %Qubit* %37) + %38 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 0) + %39 = bitcast i8* %38 to %Qubit** + %qb__15 = load %Qubit*, %Qubit** %39 + call void @__quantum__qis__single_qubit_op(i64 19, i64 1, %Qubit* %qb__15) + %40 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 1) + %41 = bitcast i8* %40 to %Qubit** + %qb__16 = load %Qubit*, %Qubit** %41 + call void @__quantum__qis__single_qubit_op(i64 21, i64 1, %Qubit* %qb__16) + %42 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 2) + %43 = bitcast i8* %42 to %Qubit** + %qb__17 = load %Qubit*, %Qubit** %43 + call void @__quantum__qis__single_qubit_op(i64 24, i64 1, %Qubit* %qb__17) + %44 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 1) + %45 = bitcast i8* %44 to %Qubit** + %qb__18 = load %Qubit*, %Qubit** %45 + call void @__quantum__qis__single_qubit_op(i64 15, i64 1, %Qubit* %qb__18) + %46 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 2) + %47 = bitcast i8* %46 to %Qubit** + %qb__20 = load %Qubit*, %Qubit** %47 + call void @__quantum__qis__single_qubit_op(i64 11, i64 1, %Qubit* %qb__20) + %c = call %Qubit* @__quantum__rt__qubit_allocate() + %ctls = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %48 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ctls, i64 0) + %49 = bitcast i8* %48 to %Qubit** + store %Qubit* %c, %Qubit** %49 + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 1) + %50 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 0) + %51 = bitcast i8* %50 to %Qubit** + %qb__22 = load %Qubit*, %Qubit** %51 + br i1 true, label %then0__1, label %else__1 + +then0__1: ; preds = %entry + call void @__quantum__qis__single_qubit_op_ctl(i64 1, i64 1, %Array* %ctls, %Qubit* %qb__22) + br label %continue__1 + +else__1: ; preds = %entry + call void @__quantum__qis__single_qubit_op_ctl(i64 2, i64 1, %Array* %ctls, %Qubit* %qb__22) + br label %continue__1 + +continue__1: ; preds = %else__1, %then0__1 + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %ctls, i64 -1) + %ctls__1 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %52 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ctls__1, i64 0) + %53 = bitcast i8* %52 to %Qubit** + store %Qubit* %c, %Qubit** %53 + call void @__quantum__rt__array_update_alias_count(%Array* %ctls__1, i64 1) + %54 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 0) + %55 = bitcast i8* %54 to %Qubit** + %qb__23 = load %Qubit*, %Qubit** %55 + br i1 true, label %then0__2, label %else__2 + +then0__2: ; preds = %continue__1 + call void @__quantum__qis__single_qubit_op_ctl(i64 4, i64 1, %Array* %ctls__1, %Qubit* %qb__23) + br label %continue__2 + +else__2: ; preds = %continue__1 + call void @__quantum__qis__single_qubit_op_ctl(i64 5, i64 1, %Array* %ctls__1, %Qubit* %qb__23) + br label %continue__2 + +continue__2: ; preds = %else__2, %then0__2 + call void @__quantum__rt__array_update_alias_count(%Array* %ctls__1, i64 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %ctls__1, i64 -1) + %ctls__2 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %56 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ctls__2, i64 0) + %57 = bitcast i8* %56 to %Qubit** + store %Qubit* %c, %Qubit** %57 + call void @__quantum__rt__array_update_alias_count(%Array* %ctls__2, i64 1) + %58 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 1) + %59 = bitcast i8* %58 to %Qubit** + %qb__24 = load %Qubit*, %Qubit** %59 + br i1 true, label %then0__3, label %else__3 + +then0__3: ; preds = %continue__2 + call void @__quantum__qis__single_qubit_op_ctl(i64 7, i64 1, %Array* %ctls__2, %Qubit* %qb__24) + br label %continue__3 + +else__3: ; preds = %continue__2 + call void @__quantum__qis__single_qubit_op_ctl(i64 8, i64 1, %Array* %ctls__2, %Qubit* %qb__24) + br label %continue__3 + +continue__3: ; preds = %else__3, %then0__3 + call void @__quantum__rt__array_update_alias_count(%Array* %ctls__2, i64 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %ctls__2, i64 -1) + %ctls__3 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %60 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ctls__3, i64 0) + %61 = bitcast i8* %60 to %Qubit** + store %Qubit* %c, %Qubit** %61 + call void @__quantum__rt__array_update_alias_count(%Array* %ctls__3, i64 1) + %62 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 1) + %63 = bitcast i8* %62 to %Qubit** + %qb__25 = load %Qubit*, %Qubit** %63 + call void @__quantum__qis__single_qubit_op_ctl(i64 10, i64 1, %Array* %ctls__3, %Qubit* %qb__25) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls__3, i64 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %ctls__3, i64 -1) + %ctls__4 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %64 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ctls__4, i64 0) + %65 = bitcast i8* %64 to %Qubit** + store %Qubit* %c, %Qubit** %65 + call void @__quantum__rt__array_update_alias_count(%Array* %ctls__4, i64 1) + %66 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 0) + %67 = bitcast i8* %66 to %Qubit** + %qb__26 = load %Qubit*, %Qubit** %67 + call void @__quantum__qis__single_qubit_op_ctl(i64 20, i64 1, %Array* %ctls__4, %Qubit* %qb__26) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls__4, i64 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %ctls__4, i64 -1) + %ctls__5 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %68 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ctls__5, i64 0) + %69 = bitcast i8* %68 to %Qubit** + store %Qubit* %c, %Qubit** %69 + call void @__quantum__rt__array_update_alias_count(%Array* %ctls__5, i64 1) + %70 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 1) + %71 = bitcast i8* %70 to %Qubit** + %qb__27 = load %Qubit*, %Qubit** %71 + call void @__quantum__qis__single_qubit_op_ctl(i64 22, i64 1, %Array* %ctls__5, %Qubit* %qb__27) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls__5, i64 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %ctls__5, i64 -1) + %ctls__6 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %72 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ctls__6, i64 0) + %73 = bitcast i8* %72 to %Qubit** + store %Qubit* %c, %Qubit** %73 + call void @__quantum__rt__array_update_alias_count(%Array* %ctls__6, i64 1) + %74 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 2) + %75 = bitcast i8* %74 to %Qubit** + %qb__28 = load %Qubit*, %Qubit** %75 + call void @__quantum__qis__single_qubit_op_ctl(i64 25, i64 1, %Array* %ctls__6, %Qubit* %qb__28) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls__6, i64 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %ctls__6, i64 -1) + %ctls__7 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %76 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ctls__7, i64 0) + %77 = bitcast i8* %76 to %Qubit** + store %Qubit* %c, %Qubit** %77 + call void @__quantum__rt__array_update_alias_count(%Array* %ctls__7, i64 1) + %78 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 1) + %79 = bitcast i8* %78 to %Qubit** + %qb__29 = load %Qubit*, %Qubit** %79 + call void @__quantum__rt__array_update_alias_count(%Array* %ctls__7, i64 1) + call void @__quantum__qis__single_qubit_op_ctl(i64 16, i64 1, %Array* %ctls__7, %Qubit* %qb__29) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls__7, i64 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls__7, i64 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %ctls__7, i64 -1) + %ctls__9 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %80 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ctls__9, i64 0) + %81 = bitcast i8* %80 to %Qubit** + store %Qubit* %c, %Qubit** %81 + call void @__quantum__rt__array_update_alias_count(%Array* %ctls__9, i64 1) + %82 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 2) + %83 = bitcast i8* %82 to %Qubit** + %qb__31 = load %Qubit*, %Qubit** %83 + call void @__quantum__rt__array_update_alias_count(%Array* %ctls__9, i64 1) + call void @__quantum__qis__single_qubit_op_ctl(i64 12, i64 1, %Array* %ctls__9, %Qubit* %qb__31) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls__9, i64 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls__9, i64 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %ctls__9, i64 -1) + call void @__quantum__rt__qubit_release(%Qubit* %c) + %cc = call %Array* @__quantum__rt__qubit_allocate_array(i64 2) + call void @__quantum__rt__array_update_alias_count(%Array* %cc, i64 1) + call void @__quantum__rt__array_update_alias_count(%Array* %cc, i64 1) + %84 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 0) + %85 = bitcast i8* %84 to %Qubit** + %qb__33 = load %Qubit*, %Qubit** %85 + %86 = call i64 @__quantum__rt__array_get_size_1d(%Array* %cc) + %87 = icmp eq i64 %86, 1 + br i1 %87, label %then0__4, label %else__4 + +then0__4: ; preds = %continue__3 + call void @__quantum__qis__single_qubit_op_ctl(i64 1, i64 1, %Array* %cc, %Qubit* %qb__33) + br label %continue__4 + +else__4: ; preds = %continue__3 + call void @__quantum__qis__single_qubit_op_ctl(i64 2, i64 1, %Array* %cc, %Qubit* %qb__33) + br label %continue__4 + +continue__4: ; preds = %else__4, %then0__4 + call void @__quantum__rt__array_update_alias_count(%Array* %cc, i64 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %cc, i64 1) + %88 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 0) + %89 = bitcast i8* %88 to %Qubit** + %qb__34 = load %Qubit*, %Qubit** %89 + %90 = icmp eq i64 %86, 1 + br i1 %90, label %then0__5, label %else__5 + +then0__5: ; preds = %continue__4 + call void @__quantum__qis__single_qubit_op_ctl(i64 4, i64 1, %Array* %cc, %Qubit* %qb__34) + br label %continue__5 + +else__5: ; preds = %continue__4 + call void @__quantum__qis__single_qubit_op_ctl(i64 5, i64 1, %Array* %cc, %Qubit* %qb__34) + br label %continue__5 + +continue__5: ; preds = %else__5, %then0__5 + call void @__quantum__rt__array_update_alias_count(%Array* %cc, i64 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %cc, i64 1) + %91 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 1) + %92 = bitcast i8* %91 to %Qubit** + %qb__35 = load %Qubit*, %Qubit** %92 + %93 = icmp eq i64 %86, 1 + br i1 %93, label %then0__6, label %else__6 + +then0__6: ; preds = %continue__5 + call void @__quantum__qis__single_qubit_op_ctl(i64 7, i64 1, %Array* %cc, %Qubit* %qb__35) + br label %continue__6 + +else__6: ; preds = %continue__5 + call void @__quantum__qis__single_qubit_op_ctl(i64 8, i64 1, %Array* %cc, %Qubit* %qb__35) + br label %continue__6 + +continue__6: ; preds = %else__6, %then0__6 + call void @__quantum__rt__array_update_alias_count(%Array* %cc, i64 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %cc, i64 1) + %94 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 1) + %95 = bitcast i8* %94 to %Qubit** + %qb__36 = load %Qubit*, %Qubit** %95 + call void @__quantum__qis__single_qubit_op_ctl(i64 10, i64 1, %Array* %cc, %Qubit* %qb__36) + call void @__quantum__rt__array_update_alias_count(%Array* %cc, i64 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %cc, i64 1) + %96 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 0) + %97 = bitcast i8* %96 to %Qubit** + %qb__37 = load %Qubit*, %Qubit** %97 + call void @__quantum__qis__single_qubit_op_ctl(i64 20, i64 1, %Array* %cc, %Qubit* %qb__37) + call void @__quantum__rt__array_update_alias_count(%Array* %cc, i64 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %cc, i64 1) + %98 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 1) + %99 = bitcast i8* %98 to %Qubit** + %qb__38 = load %Qubit*, %Qubit** %99 + call void @__quantum__qis__single_qubit_op_ctl(i64 22, i64 1, %Array* %cc, %Qubit* %qb__38) + call void @__quantum__rt__array_update_alias_count(%Array* %cc, i64 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %cc, i64 1) + %100 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 2) + %101 = bitcast i8* %100 to %Qubit** + %qb__39 = load %Qubit*, %Qubit** %101 + call void @__quantum__qis__single_qubit_op_ctl(i64 25, i64 1, %Array* %cc, %Qubit* %qb__39) + call void @__quantum__rt__array_update_alias_count(%Array* %cc, i64 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %cc, i64 1) + %102 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 1) + %103 = bitcast i8* %102 to %Qubit** + %qb__40 = load %Qubit*, %Qubit** %103 + call void @__quantum__rt__array_update_alias_count(%Array* %cc, i64 1) + call void @__quantum__qis__single_qubit_op_ctl(i64 16, i64 1, %Array* %cc, %Qubit* %qb__40) + call void @__quantum__rt__array_update_alias_count(%Array* %cc, i64 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %cc, i64 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %cc, i64 1) + %104 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 2) + %105 = bitcast i8* %104 to %Qubit** + %qb__42 = load %Qubit*, %Qubit** %105 + call void @__quantum__rt__array_update_alias_count(%Array* %cc, i64 1) + call void @__quantum__qis__single_qubit_op_ctl(i64 12, i64 1, %Array* %cc, %Qubit* %qb__42) + call void @__quantum__rt__array_update_alias_count(%Array* %cc, i64 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %cc, i64 -1) + call void @__quantum__rt__qubit_release_array(%Array* %cc) + call void @__quantum__rt__array_update_alias_count(%Array* %cc, i64 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %cc, i64 -1) + call void @__quantum__rt__qubit_release_array(%Array* %qs) + call void @__quantum__rt__array_update_alias_count(%Array* %qs, i64 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qs, i64 -1) + ret void +} + +declare %Qubit* @__quantum__rt__qubit_allocate() + +declare %Array* @__quantum__rt__qubit_allocate_array(i64) + +declare void @__quantum__qis__inject_barrier(i64, i64) + +declare void @__quantum__rt__qubit_release(%Qubit*) + +declare void @__quantum__rt__qubit_release_array(%Array*) + +define void @Microsoft__Quantum__Testing__Tracer__TestMeasurements__body(i1 %compare) { +entry: + %qs = call %Array* @__quantum__rt__qubit_allocate_array(i64 3) + call void @__quantum__rt__array_update_alias_count(%Array* %qs, i64 1) + %0 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 0) + %1 = bitcast i8* %0 to %Qubit** + %qb = load %Qubit*, %Qubit** %1 + %r0 = call %Result* @__quantum__qis__single_qubit_measure(i64 100, i64 1, %Qubit* %qb) + %qs12 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 2) + %2 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs12, i64 0) + %3 = bitcast i8* %2 to %Qubit** + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs12, i64 1) + %5 = bitcast i8* %4 to %Qubit** + %6 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 1) + %7 = bitcast i8* %6 to %Qubit** + %8 = load %Qubit*, %Qubit** %7 + %9 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 2) + %10 = bitcast i8* %9 to %Qubit** + %11 = load %Qubit*, %Qubit** %10 + store %Qubit* %8, %Qubit** %3 + store %Qubit* %11, %Qubit** %5 + call void @__quantum__rt__array_update_alias_count(%Array* %qs12, i64 1) + %paulis = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 2) + %12 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 0) + %13 = bitcast i8* %12 to i2* + %14 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 1) + %15 = bitcast i8* %14 to i2* + %16 = load i2, i2* @PauliY + %17 = load i2, i2* @PauliX + store i2 %16, i2* %13 + store i2 %17, i2* %15 + call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i64 1) + call void @__quantum__rt__array_update_alias_count(%Array* %qs12, i64 1) + %18 = load %Result*, %Result** @ResultOne + %res = alloca %Result* + store %Result* %18, %Result** %res + call void @__quantum__rt__result_update_reference_count(%Result* %18, i64 1) + %haveY = alloca i1 + store i1 false, i1* %haveY + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %i = phi i64 [ 0, %entry ], [ %31, %exiting__1 ] + %19 = icmp sle i64 %i, 1 + br i1 %19, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %20 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 %i) + %21 = bitcast i8* %20 to i2* + %22 = load i2, i2* %21 + %23 = load i2, i2* @PauliY + %24 = icmp eq i2 %22, %23 + %25 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 %i) + %26 = bitcast i8* %25 to i2* + %27 = load i2, i2* %26 + %28 = load i2, i2* @PauliI + %29 = icmp eq i2 %27, %28 + %30 = or i1 %24, %29 + br i1 %30, label %then0__1, label %continue__1 + +then0__1: ; preds = %body__1 + store i1 true, i1* %haveY + br label %continue__1 + +continue__1: ; preds = %then0__1, %body__1 + br label %exiting__1 + +exiting__1: ; preds = %continue__1 + %31 = add i64 %i, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + %32 = load i1, i1* %haveY + br i1 %32, label %then0__2, label %test1__1 + +then0__2: ; preds = %exit__1 + %33 = call %Result* @__quantum__qis__joint_measure(i64 106, i64 1, %Array* %qs12) + call void @__quantum__rt__result_update_reference_count(%Result* %33, i64 1) + store %Result* %33, %Result** %res + call void @__quantum__rt__result_update_reference_count(%Result* %33, i64 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %18, i64 -1) + br label %continue__2 + +test1__1: ; preds = %exit__1 + br i1 false, label %then1__1, label %test2__1 + +then1__1: ; preds = %test1__1 + %34 = call %Result* @__quantum__qis__joint_measure(i64 107, i64 1, %Array* %qs12) + call void @__quantum__rt__result_update_reference_count(%Result* %34, i64 1) + %35 = load %Result*, %Result** %res + store %Result* %34, %Result** %res + call void @__quantum__rt__result_update_reference_count(%Result* %34, i64 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %35, i64 -1) + br label %continue__2 + +test2__1: ; preds = %test1__1 + br i1 false, label %then2__1, label %test3__1 + +then2__1: ; preds = %test2__1 + %36 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 0) + %37 = bitcast i8* %36 to i2* + %38 = load i2, i2* %37 + %39 = load i2, i2* @PauliX + %40 = icmp eq i2 %38, %39 + br i1 %40, label %then0__3, label %else__1 + +then0__3: ; preds = %then2__1 + %41 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs12, i64 0) + %42 = bitcast i8* %41 to %Qubit** + %qb__2 = load %Qubit*, %Qubit** %42 + %43 = call %Result* @__quantum__qis__single_qubit_measure(i64 101, i64 1, %Qubit* %qb__2) + call void @__quantum__rt__result_update_reference_count(%Result* %43, i64 1) + %44 = load %Result*, %Result** %res + store %Result* %43, %Result** %res + call void @__quantum__rt__result_update_reference_count(%Result* %43, i64 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %44, i64 -1) + br label %continue__3 + +else__1: ; preds = %then2__1 + %45 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs12, i64 0) + %46 = bitcast i8* %45 to %Qubit** + %qb__3 = load %Qubit*, %Qubit** %46 + %47 = call %Result* @__quantum__qis__single_qubit_measure(i64 100, i64 1, %Qubit* %qb__3) + call void @__quantum__rt__result_update_reference_count(%Result* %47, i64 1) + %48 = load %Result*, %Result** %res + store %Result* %47, %Result** %res + call void @__quantum__rt__result_update_reference_count(%Result* %47, i64 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %48, i64 -1) + br label %continue__3 + +continue__3: ; preds = %else__1, %then0__3 + br label %continue__2 + +test3__1: ; preds = %test2__1 + %49 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 0) + %50 = bitcast i8* %49 to i2* + %51 = load i2, i2* %50 + %52 = load i2, i2* @PauliX + %53 = icmp eq i2 %51, %52 + %54 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 1) + %55 = bitcast i8* %54 to i2* + %56 = load i2, i2* %55 + %57 = load i2, i2* @PauliX + %58 = icmp eq i2 %56, %57 + %59 = and i1 %53, %58 + br i1 %59, label %then3__1, label %test4__1 + +then3__1: ; preds = %test3__1 + %60 = call %Result* @__quantum__qis__joint_measure(i64 108, i64 1, %Array* %qs12) + call void @__quantum__rt__result_update_reference_count(%Result* %60, i64 1) + %61 = load %Result*, %Result** %res + store %Result* %60, %Result** %res + call void @__quantum__rt__result_update_reference_count(%Result* %60, i64 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %61, i64 -1) + br label %continue__2 + +test4__1: ; preds = %test3__1 + %62 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 0) + %63 = bitcast i8* %62 to i2* + %64 = load i2, i2* %63 + %65 = load i2, i2* @PauliX + %66 = icmp eq i2 %64, %65 + %67 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 1) + %68 = bitcast i8* %67 to i2* + %69 = load i2, i2* %68 + %70 = load i2, i2* @PauliZ + %71 = icmp eq i2 %69, %70 + %72 = and i1 %66, %71 + br i1 %72, label %then4__1, label %test5__1 + +then4__1: ; preds = %test4__1 + %73 = call %Result* @__quantum__qis__joint_measure(i64 109, i64 1, %Array* %qs12) + call void @__quantum__rt__result_update_reference_count(%Result* %73, i64 1) + %74 = load %Result*, %Result** %res + store %Result* %73, %Result** %res + call void @__quantum__rt__result_update_reference_count(%Result* %73, i64 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %74, i64 -1) + br label %continue__2 + +test5__1: ; preds = %test4__1 + %75 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 0) + %76 = bitcast i8* %75 to i2* + %77 = load i2, i2* %76 + %78 = load i2, i2* @PauliZ + %79 = icmp eq i2 %77, %78 + %80 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 1) + %81 = bitcast i8* %80 to i2* + %82 = load i2, i2* %81 + %83 = load i2, i2* @PauliX + %84 = icmp eq i2 %82, %83 + %85 = and i1 %79, %84 + br i1 %85, label %then5__1, label %test6__1 + +then5__1: ; preds = %test5__1 + %86 = call %Result* @__quantum__qis__joint_measure(i64 110, i64 1, %Array* %qs12) + call void @__quantum__rt__result_update_reference_count(%Result* %86, i64 1) + %87 = load %Result*, %Result** %res + store %Result* %86, %Result** %res + call void @__quantum__rt__result_update_reference_count(%Result* %86, i64 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %87, i64 -1) + br label %continue__2 + +test6__1: ; preds = %test5__1 + %88 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 0) + %89 = bitcast i8* %88 to i2* + %90 = load i2, i2* %89 + %91 = load i2, i2* @PauliZ + %92 = icmp eq i2 %90, %91 + %93 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 1) + %94 = bitcast i8* %93 to i2* + %95 = load i2, i2* %94 + %96 = load i2, i2* @PauliZ + %97 = icmp eq i2 %95, %96 + %98 = and i1 %92, %97 + br i1 %98, label %then6__1, label %continue__2 + +then6__1: ; preds = %test6__1 + %99 = call %Result* @__quantum__qis__joint_measure(i64 111, i64 1, %Array* %qs12) + call void @__quantum__rt__result_update_reference_count(%Result* %99, i64 1) + %100 = load %Result*, %Result** %res + store %Result* %99, %Result** %res + call void @__quantum__rt__result_update_reference_count(%Result* %99, i64 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %100, i64 -1) + br label %continue__2 + +continue__2: ; preds = %then6__1, %test6__1, %then5__1, %then4__1, %then3__1, %continue__3, %then1__1, %then0__2 + %r12 = load %Result*, %Result** %res + call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i64 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qs12, i64 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %paulis, i64 -1) + br i1 %compare, label %then0__4, label %continue__4 + +then0__4: ; preds = %continue__2 + %101 = load %Result*, %Result** @ResultZero + %102 = call i1 @__quantum__rt__result_equal(%Result* %r0, %Result* %101) + br i1 %102, label %then0__5, label %continue__5 + +then0__5: ; preds = %then0__4 + %103 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 1) + %104 = bitcast i8* %103 to %Qubit** + %qb__4 = load %Qubit*, %Qubit** %104 + call void @__quantum__qis__single_qubit_op(i64 0, i64 1, %Qubit* %qb__4) + br label %continue__5 + +continue__5: ; preds = %then0__5, %then0__4 + br label %continue__4 + +continue__4: ; preds = %continue__5, %continue__2 + call void @__quantum__rt__qubit_release_array(%Array* %qs) + call void @__quantum__rt__array_update_alias_count(%Array* %qs, i64 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qs12, i64 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qs, i64 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %r0, i64 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qs12, i64 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %r12, i64 -1) + ret void +} + +declare i1 @__quantum__rt__result_equal(%Result*, %Result*) + +define { %String* }* @Microsoft__Quantum__Targeting__TargetInstruction__body(%String* %__Item1__) { +entry: + %0 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64)) + %1 = bitcast %Tuple* %0 to { %String* }* + %2 = getelementptr { %String* }, { %String* }* %1, i64 0, i32 0 + store %String* %__Item1__, %String** %2 + call void @__quantum__rt__string_update_reference_count(%String* %__Item1__, i64 1) + ret { %String* }* %1 +} + +declare %Tuple* @__quantum__rt__tuple_create(i64) + +declare void @__quantum__rt__string_update_reference_count(%String*, i64) diff --git a/src/QirRuntime/test/QIR-tracer/tracer-target.qs b/src/QirRuntime/test/QIR-tracer/tracer-target.qs new file mode 100644 index 00000000000..19f7d51abde --- /dev/null +++ b/src/QirRuntime/test/QIR-tracer/tracer-target.qs @@ -0,0 +1,251 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +namespace Microsoft.Quantum.Instructions { + + operation single_qubit_op(op_id: Int, duration: Int, qb : Qubit) : Unit { + body intrinsic; + } + + operation multi_qubit_op(op_id: Int, duration: Int, qbs : Qubit[]) : Unit { + body intrinsic; + } + + operation single_qubit_op_ctl(op_id: Int, duration: Int, ctl : Qubit[], qb : Qubit) : Unit { + body intrinsic; + } + + operation multi_qubit_op_ctl(op_id: Int, duration: Int, ctl : Qubit[], qbs : Qubit[]) : Unit { + body intrinsic; + } + + operation single_qubit_measure(op_id: Int, duration: Int, qb : Qubit) : Result { + body intrinsic; + } + + operation joint_measure(op_id: Int, duration: Int, qbs : Qubit[]) : Result { + body intrinsic; + } + + // Operations, used in Hadamard frame tracking + @Inline() + operation Tz(qb : Qubit) : Unit + is Adj + Ctl { + body (...) { single_qubit_op(11, 1, qb); } + adjoint (...) { single_qubit_op(11, 1, qb); } + controlled (ctls, ...) { single_qubit_op_ctl(12, 1, ctls, qb); } + controlled adjoint (ctls, ...) { single_qubit_op_ctl(12, 1, ctls, qb); } + } + + @Inline() + operation Tx(qb : Qubit) : Unit + is Adj + Ctl { + body (...) { single_qubit_op(13, 1, qb); } + adjoint (...) { single_qubit_op(13, 1, qb); } + controlled (ctls, ...) { single_qubit_op_ctl(14, 1, ctls, qb); } + controlled adjoint (ctls, ...) { single_qubit_op_ctl(14, 1, ctls, qb); } + } + + + @Inline() + operation Sz(qb : Qubit) : Unit + is Adj + Ctl { + body (...) { single_qubit_op(15, 1, qb); } + adjoint (...) { single_qubit_op(15, 1, qb); } + controlled (ctls, ...) { single_qubit_op_ctl(16, 1, ctls, qb); } + controlled adjoint (ctls, ...) { single_qubit_op_ctl(16, 1, ctls, qb); } + } + + @Inline() + operation Sx(qb : Qubit) : Unit + is Adj + Ctl { + body (...) { single_qubit_op(17, 1, qb); } + adjoint (...) { single_qubit_op(17, 1, qb); } + controlled (ctls, ...) { single_qubit_op_ctl(18, 1, ctls, qb); } + controlled adjoint (ctls, ...) { single_qubit_op_ctl(18, 1, ctls, qb); } + } + + @Inline() + operation Mz(qb : Qubit) : Result { + body (...) { return single_qubit_measure(100, 1, qb); } + } + + @Inline() + operation Mx(qb : Qubit) : Result { + body (...) { return single_qubit_measure(101, 1, qb); } + } + + @Inline() + operation Mzz(qubits : Qubit[]) : Result { + body (...) { return joint_measure(102, 1, qubits); } + } + + @Inline() + operation Mxz(qubits : Qubit[]) : Result { + body (...) { return joint_measure(103, 1, qubits); } + } + + @Inline() + operation Mzx(qubits : Qubit[]) : Result { + body (...) { return joint_measure(104, 1, qubits); } + } + + @Inline() + operation Mxx(qubits : Qubit[]) : Result { + body (...) { return joint_measure(105, 1, qubits); } + } +} + +namespace Microsoft.Quantum.Tracer { + + @TargetInstruction("inject_global_barrier") + operation Barrier(id : Int, duration : Int) : Unit { + body intrinsic; + } +} + +namespace Microsoft.Quantum.Intrinsic { + + open Microsoft.Quantum.Core; + open Microsoft.Quantum.Instructions as Phys; + open Microsoft.Quantum.Targeting; + + @Inline() + operation X(qb : Qubit) : Unit + is Adj + Ctl { + body (...) { Phys.single_qubit_op(0, 1, qb); } + adjoint self; + controlled (ctls, ...) { + if Length(ctls) == 1 { Phys.single_qubit_op_ctl(1, 1, ctls, qb); } + else { Phys.single_qubit_op_ctl(2, 1, ctls, qb); } + } + } + + operation CNOT(control : Qubit, target : Qubit) : Unit + is Adj + Ctl { + body (...) { Controlled X([control], target); } + adjoint self; + controlled (ctls, ...) { Controlled X(ctls + control, target); } + } + + @Inline() + operation Y(qb : Qubit) : Unit + is Adj + Ctl{ + body (...) { Phys.single_qubit_op(3, 1, qb); } + adjoint self; + controlled (ctls, ...) { + if Length(ctls) == 1 { Phys.single_qubit_op_ctl(4, 1, ctls, qb); } + else { Phys.single_qubit_op_ctl(5, 1, ctls, qb); } + } + } + + @Inline() + operation Z(qb : Qubit) : Unit + is Adj + Ctl { + body (...) { Phys.single_qubit_op(6, 1, qb); } + adjoint self; + controlled (ctls, ...) { + if Length(ctls) == 1 { Phys.single_qubit_op_ctl(7, 1, ctls, qb); } + else { Phys.single_qubit_op_ctl(8, 1, ctls, qb); } + } + } + + @Inline() + operation H(qb : Qubit) : Unit + is Adj + Ctl { + body (...) { Phys.single_qubit_op(9, 1, qb); } + adjoint self; + controlled (ctls, ...) { Phys.single_qubit_op_ctl(10, 1, ctls, qb); } + } + + @Inline() + operation T(qb : Qubit) : Unit + is Adj + Ctl { + body (...) { Phys.Tz(qb); } + adjoint (...) { Adjoint Phys.Tz(qb); } + controlled (ctls, ...) { Controlled Phys.Tz(ctls, qb); } + controlled adjoint (ctls, ...) { Controlled Adjoint Phys.Tz(ctls, qb); } + } + + @Inline() + operation S(qb : Qubit) : Unit + is Adj + Ctl { + body (...) { Phys.Sz(qb); } + adjoint (...) { Adjoint Phys.Sz(qb); } + controlled (ctls, ...) { Controlled Phys.Sz(ctls, qb); } + controlled adjoint (ctls, ...) { Controlled Adjoint Phys.Sz(ctls, qb); } + } + + @Inline() + operation Rx(theta : Double, qb : Qubit) : Unit + is Adj + Ctl { + body (...) { Phys.single_qubit_op(19, 1, qb); } + adjoint (...) { Phys.single_qubit_op(19, 1, qb); } + controlled (ctls, ...) { Phys.single_qubit_op_ctl(20, 1, ctls, qb); } + controlled adjoint (ctls, ...) { Phys.single_qubit_op_ctl(20, 1, ctls, qb); } + } + + @Inline() + operation Ry(theta : Double, qb : Qubit) : Unit + is Adj + Ctl { + body (...) { Phys.single_qubit_op(21, 1, qb); } + adjoint (...) { Phys.single_qubit_op(21, 1, qb); } + controlled (ctls, ...) { Phys.single_qubit_op_ctl(22, 1, ctls, qb); } + controlled adjoint (ctls, ...) { Phys.single_qubit_op_ctl(22, 1, ctls, qb); } + } + + @Inline() + operation Rz(theta : Double, qb : Qubit) : Unit + is Adj + Ctl { + body (...) { Phys.single_qubit_op(23, 1, qb); } + adjoint (...) { Phys.single_qubit_op(24, 1, qb); } + controlled (ctls, ...) { Phys.single_qubit_op_ctl(25, 1, ctls, qb); } + controlled adjoint (ctls, ...) { Phys.single_qubit_op_ctl(25, 1, ctls, qb); } + } + + @Inline() + operation M(qb : Qubit) : Result { + body (...) { return Phyz.Mz(qb); } + } + + @Inline() + operation Measure(paulis : Pauli[], qubits : Qubit[]) : Result { + body (...) { + mutable res = One; + mutable haveY = false; + // Measurements that involve PauliY or PauliI + for i in 0..Length(paulis)-1 { + if paulis[i] == PauliY or paulis[i] == PauliI { + set haveY = true; + } + } + if haveY { set res = Phys.joint_measure(106, 1, qubits); } + + // More than two qubits (but no PauliY or PauliI) + elif Length(paulis) > 2 { set res = Phys.joint_measure(107, 1, qubits); } + + // Single qubit measurement -- differentiate between Mx and Mz + elif Length(paulis) == 1 { + if (paulis[0] == PauliX) { set res = Mx(qubits[0]); } + else { set res = Mz(qubits[0]); } + } + + // Specialize for two-qubit measurements: Mxx, Mxz, Mzx, Mzz + elif paulis[0] == PauliX and paulis[1] == PauliX { set res = Phys.Mxx(qubits); } + elif paulis[0] == PauliX and paulis[1] == PauliZ { set res = Phys.Mxz(qubits); } + elif paulis[0] == PauliZ and paulis[1] == PauliX { set res = Phys.Mzx(qubits); } + elif paulis[0] == PauliZ and paulis[1] == PauliZ { set res = Phys.Mzz(qubits); } + + //shouldn't get here + return res; + } + } + + // operation SWAP(a : Qubit, b : Qubit) : Unit + // is Adj { + // body intrinsic; + // adjoint self; + // } + + +} diff --git a/src/QirRuntime/test/unittests/CMakeLists.txt b/src/QirRuntime/test/unittests/CMakeLists.txt index a6fb9a30662..22b2bc2802f 100644 --- a/src/QirRuntime/test/unittests/CMakeLists.txt +++ b/src/QirRuntime/test/unittests/CMakeLists.txt @@ -5,18 +5,21 @@ add_executable(qir-runtime-unittests driver.cpp QirRuntimeTests.cpp ToffoliTests.cpp + TracerTests.cpp ) target_link_libraries(qir-runtime-unittests PUBLIC qir-rt-support qir-qis-support simulators + tracer ) target_include_directories(qir-runtime-unittests PUBLIC "${test_includes}" ${public_includes} "${PROJECT_SOURCE_DIR}/lib/QIR" + "${PROJECT_SOURCE_DIR}/lib/Tracer" ) install(TARGETS qir-runtime-unittests RUNTIME DESTINATION "${CMAKE_BINARY_DIR}/bin") add_unit_test(qir-runtime-unittests) diff --git a/src/QirRuntime/test/unittests/TracerTests.cpp b/src/QirRuntime/test/unittests/TracerTests.cpp new file mode 100644 index 00000000000..37f8bc57ad1 --- /dev/null +++ b/src/QirRuntime/test/unittests/TracerTests.cpp @@ -0,0 +1,305 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +#include +#include +#include + +#include "catch.hpp" + +#include "CoreTypes.hpp" +#include "tracer.hpp" + +using namespace std; +using namespace Microsoft::Quantum; + +TEST_CASE("Layering distinct single-qubit operations of non-zero durations", "[tracer]") +{ + shared_ptr tr = CreateTracer(3 /*layer duration*/); + + Qubit q1 = tr->AllocateQubit(); + Qubit q2 = tr->AllocateQubit(); + Qubit q3 = tr->AllocateQubit(); + + CHECK(0 == tr->TraceSingleQubitOp(1, 1, q1)); // L(0,3) should be created + CHECK(0 == tr->TraceSingleQubitOp(2, 2, q1)); // add the op into L(0,3) + CHECK(0 == tr->TraceSingleQubitOp(3, 1, q2)); // add the op into L(0,3) + CHECK(1 == tr->TraceSingleQubitOp(4, 3, q2)); // create new layer L(3,3) + CHECK(2 == tr->TraceSingleQubitOp(5, 4, q2)); // long op! create new layer L(6,4) + CHECK(1 == tr->TraceSingleQubitOp(6, 2, q1)); // add the op into L(3,3) + CHECK(0 == tr->TraceSingleQubitOp(7, 1, q3)); // add the op into L(0,3) + CHECK(2 == tr->TraceSingleQubitOp(8, 4, q3)); // long op! but fits into existing L(6,4) + CHECK(3 == tr->TraceSingleQubitOp(9, 5, q1)); // long op! add the op into L(10,5) + + const vector& layers = tr->UseLayers(); + REQUIRE(layers.size() == 4); + CHECK(layers[0].startTime == 0); + CHECK(layers[0].operations.size() == 4); + CHECK(layers[1].startTime == 3); + CHECK(layers[1].operations.size() == 2); + CHECK(layers[2].startTime == 6); + CHECK(layers[2].operations.size() == 2); + CHECK(layers[3].startTime == 10); + CHECK(layers[3].operations.size() == 1); +} + +TEST_CASE("Layering single-qubit operations of zero duration", "[tracer]") +{ + shared_ptr tr = CreateTracer(3 /*layer duration*/); + + Qubit q1 = tr->AllocateQubit(); + Qubit q2 = tr->AllocateQubit(); + Qubit q3 = tr->AllocateQubit(); + + CHECK(0 == tr->TraceSingleQubitOp(1, 1, q1)); // L(0,3) should be created + CHECK(0 == tr->TraceSingleQubitOp(2, 0, q1)); // add the op into L(0,3) + CHECK(INVALID == tr->TraceSingleQubitOp(3, 0, q3)); // pending zero op (will remain orphan) + CHECK(INVALID == tr->TraceSingleQubitOp(4, 0, q2)); // pending zero op + CHECK(INVALID == tr->TraceSingleQubitOp(5, 0, q2)); // another pending zero op + CHECK(0 == tr->TraceSingleQubitOp(6, 1, q2)); // add the op into L(0,3) together with the pending ones + + const vector& layers = tr->UseLayers(); + REQUIRE(layers.size() == 1); + CHECK(layers[0].operations.size() == 5); +} + +TEST_CASE("Layering distinct controlled single-qubit operations", "[tracer]") +{ + shared_ptr tr = CreateTracer(3 /*layer duration*/); + + Qubit q1 = tr->AllocateQubit(); + Qubit q2 = tr->AllocateQubit(); + Qubit q3 = tr->AllocateQubit(); + Qubit q4 = tr->AllocateQubit(); + Qubit q5 = tr->AllocateQubit(); + Qubit q6 = tr->AllocateQubit(); + + CHECK(0 == tr->TraceMultiQubitOp(1, 1, 1 /*nFirst*/, &q1 /*first*/, 1 /*nSecond*/, &q2 /*second*/)); + CHECK(0 == tr->TraceMultiQubitOp(2, 2, 0 /*nFirst*/, nullptr /*first*/, 1 /*nSecond*/, &q2 /*second*/)); + // q2 now is at the limit of the layer duration + + Qubit qs12[2] = {q1, q2}; + CHECK(1 == tr->TraceMultiQubitOp(3, 1, 0 /*nFirst*/, nullptr /*first*/, 2 /*nSecond*/, qs12 /*second*/)); + CHECK(1 == tr->TraceMultiQubitOp(4, 1, 1 /*nFirst*/, &q2 /*first*/, 1 /*nSecond*/, &q3 /*second*/)); + // because of q2, both ops should have been added to a new layer, which now "catches" q1, q2, q3 + + CHECK(0 == tr->TraceMultiQubitOp(5, 0, 1 /*nFirst*/, &q4 /*first*/, 1 /*nSecond*/, &q5 /*second*/)); + CHECK(0 == tr->TraceSingleQubitOp(6, 1, q6)); + // these ops should fall through into the first layer (notice no special handling of duration zero) + + CHECK(1 == tr->TraceMultiQubitOp(7, 1, 1 /*nFirst*/, &q1 /*first*/, 1 /*nSecond*/, &q6 /*second*/)); + CHECK(1 == tr->TraceMultiQubitOp(8, 1, 1 /*nFirst*/, &q3 /*first*/, 1 /*nSecond*/, &q4 /*second*/)); + // because of q1 and q3, thiese ops should be added into the second layer, which now has all but q5 + + CHECK(0 == tr->TraceSingleQubitOp(9, 1, q5)); + // should fall through to the first layer + + Qubit qs46[2] = {q4, q6}; + CHECK(1 == tr->TraceMultiQubitOp(10, 1, 2 /*nFirst*/, qs46 /*first*/, 1 /*nSecond*/, &q5 /*second*/)); + // because of the controls, should be added into the second layer + + const vector& layers = tr->UseLayers(); + REQUIRE(layers.size() == 2); + + CHECK(layers[0].operations.size() == 5); + const auto& ops0 = layers[0].operations; + CHECK(ops0.find(1) != ops0.end()); + CHECK(ops0.find(2) != ops0.end()); + CHECK(ops0.find(5) != ops0.end()); + CHECK(ops0.find(6) != ops0.end()); + CHECK(ops0.find(9) != ops0.end()); + + CHECK(layers[1].operations.size() == 5); + const auto& ops1 = layers[1].operations; + CHECK(ops1.find(3) != ops1.end()); + CHECK(ops1.find(4) != ops1.end()); + CHECK(ops1.find(7) != ops1.end()); + CHECK(ops1.find(8) != ops1.end()); + CHECK(ops1.find(10) != ops1.end()); +} + +// TODO: add multi-qubit ops +TEST_CASE("Operations with same id are counted together", "[tracer]") +{ + shared_ptr tr = CreateTracer(3 /*layer duration*/); + + Qubit q1 = tr->AllocateQubit(); + Qubit q2 = tr->AllocateQubit(); + Qubit q3 = tr->AllocateQubit(); + + // All of these ops should fit into a single layer L(0,3) + tr->TraceSingleQubitOp(1, 1, q1); + tr->TraceSingleQubitOp(2, 2, q1); + tr->TraceSingleQubitOp(1, 1, q2); + tr->TraceSingleQubitOp(2, 1, q2); + tr->TraceSingleQubitOp(1, 1, q2); + tr->TraceSingleQubitOp(3, 2, q3); + + const vector& layers = tr->UseLayers(); + REQUIRE(layers.size() == 1); + CHECK(layers[0].operations.size() == 3); + const auto& ops = layers[0].operations; + CHECK(ops.find(1)->second == 3); + CHECK(ops.find(2)->second == 2); + CHECK(ops.find(3)->second == 1); +} + +TEST_CASE("Global barrier", "[tracer]") +{ + shared_ptr tr = CreateTracer(2 /*layer duration*/); + + Qubit q1 = tr->AllocateQubit(); + Qubit q2 = tr->AllocateQubit(); + Qubit q3 = tr->AllocateQubit(); + Qubit q4 = tr->AllocateQubit(); + + CHECK(0 == tr->TraceSingleQubitOp(1, 4, q1)); // L(0,4) created + CHECK(0 == tr->TraceSingleQubitOp(2, 1, q4)); // added to L(0,4) + CHECK(1 == tr->InjectGlobalBarrier(42, 1)); // creates L(4,2) + + CHECK(2 == tr->TraceMultiQubitOp(3, 1, 1 /*nFirst*/, &q2 /*first*/, 1 /*nSecond*/, &q3 /*second*/)); + // the barrier shouldn't allow this op to fall through into L(0,4), so should create L(6,2) + + CHECK(INVALID == tr->TraceSingleQubitOp(4, 0, q1)); + // the barrier shouldn't allow this op to fall through into L(0,4), so should create pending op + + CHECK(2 == tr->TraceSingleQubitOp(5, 1, q1)); + // should be added into L(6,2) together with the pending op `3` + + CHECK(3 == tr->TraceSingleQubitOp(6, 3, q2)); + // long op, with no existing wide layers to host it, so should create L(8,3) + + CHECK(3 == tr->TraceSingleQubitOp(7, 3, q4)); + // long op but can be added into L(8,3), which is post the barrier + + const vector& layers = tr->UseLayers(); + REQUIRE(layers.size() == 4); + CHECK(layers[0].operations.size() == 2); + CHECK(layers[1].operations.size() == 0); + CHECK(layers[2].operations.size() == 3); + CHECK(layers[3].operations.size() == 2); + + const auto& ops0 = layers[0].operations; + CHECK(ops0.find(1) != ops0.end()); + CHECK(ops0.find(2) != ops0.end()); + + CHECK(42 == layers[1].barrierId); + + const auto& ops2 = layers[2].operations; + CHECK(ops2.find(3) != ops2.end()); + CHECK(ops2.find(4) != ops2.end()); + CHECK(ops2.find(5) != ops2.end()); + + const auto& ops3 = layers[3].operations; + CHECK(ops3.find(6) != ops3.end()); + CHECK(ops3.find(7) != ops3.end()); +} + +// For layering purposes, measurements behave pretty much the same as other operations +TEST_CASE("Layering measurements", "[tracer]") +{ + shared_ptr tr = CreateTracer(1 /*layer duration*/); + + Qubit q1 = tr->AllocateQubit(); + Qubit q2 = tr->AllocateQubit(); + Qubit q3 = tr->AllocateQubit(); + Qubit q4 = tr->AllocateQubit(); + + CHECK(0 == tr->GetLayerIdOfSourceMeasurement(tr->TraceSingleQubitMeasurement(1, 1, q1))); + Qubit qs12[2] = {q1, q2}; + CHECK(1 == tr->GetLayerIdOfSourceMeasurement(tr->TraceMultiQubitMeasurement(2, 1, 2, qs12))); + CHECK(0 == tr->TraceSingleQubitOp(3, 1, q4)); + CHECK(0 == tr->GetLayerIdOfSourceMeasurement(tr->TraceSingleQubitMeasurement(4, 1, q3))); + Qubit qs23[2] = {q2, q3}; + CHECK(2 == tr->GetLayerIdOfSourceMeasurement(tr->TraceMultiQubitMeasurement(5, 1, 2, qs23))); + CHECK(1 == tr->TraceSingleQubitOp(3, 1, q4)); + + const vector& layers = tr->UseLayers(); + REQUIRE(layers.size() == 3); + CHECK(layers[0].operations.size() == 3); + CHECK(layers[1].operations.size() == 2); + CHECK(layers[2].operations.size() == 1); +} + +TEST_CASE("Output: to string", "[tracer]") +{ + std::unordered_map opNames = {{1, "X"}, {2, "Y"}, {3, "Z"}, {4, "b"}}; + shared_ptr tr = CreateTracer(1 /*layer duration*/, opNames); + + Qubit q1 = tr->AllocateQubit(); + tr->TraceSingleQubitOp(3, 1, q1); + tr->TraceSingleQubitOp(5, 1, q1); + tr->InjectGlobalBarrier(4, 2); + tr->TraceSingleQubitOp(3, 4, q1); + tr->TraceSingleQubitOp(2, 1, q1); + + { + std::stringstream out; + tr->PrintLayerMetrics(out, ",", true /*printZeroMetrics*/); + std::string metrics = out.str(); + + std::stringstream expected; + expected << "layer_id,name,Y,Z,5" << std::endl; + expected << "0,,0,1,0" << std::endl; + expected << "1,,0,0,1" << std::endl; + expected << "2,b,0,0,0" << std::endl; + expected << "4,,0,1,0" << std::endl; + expected << "8,,1,0,0" << std::endl; + + INFO(metrics); + CHECK(metrics == expected.str()); + } + + { + std::stringstream out; + tr->PrintLayerMetrics(out, ",", false /*printZeroMetrics*/); + std::string metrics = out.str(); + + std::stringstream expected; + expected << "layer_id,name,Y,Z,5" << std::endl; + expected << "0,,,1," << std::endl; + expected << "1,,,,1" << std::endl; + expected << "2,b,,," << std::endl; + expected << "4,,,1," << std::endl; + expected << "8,,1,," << std::endl; + + INFO(metrics); + CHECK(metrics == expected.str()); + } +} + +TEST_CASE("Output: to file", "[tracer]") +{ + std::unordered_map opNames = {{1, "X"}, {2, "Y"}, {3, "Z"}, {4, "b"}}; + shared_ptr tr = CreateTracer(1 /*layer duration*/, opNames); + + Qubit q1 = tr->AllocateQubit(); + tr->TraceSingleQubitOp(3, 1, q1); + tr->TraceSingleQubitOp(5, 1, q1); + tr->InjectGlobalBarrier(4, 2); + tr->TraceSingleQubitOp(3, 4, q1); + tr->TraceSingleQubitOp(2, 1, q1); + + const std::string fileName = "tracer-test.txt"; + std::ofstream out; + out.open(fileName); + tr->PrintLayerMetrics(out, "\t", false /*printZeroMetrics*/); + out.close(); + + std::ifstream in(fileName); + string line; + REQUIRE(in.is_open()); + std::string metrics(std::istreambuf_iterator{in}, {}); + in.close(); + + std::stringstream expected; + expected << "layer_id\tname\tY\tZ\t5" << std::endl; + expected << "0\t\t\t1\t" << std::endl; + expected << "1\t\t\t\t1" << std::endl; + expected << "2\tb\t\t\t" << std::endl; + expected << "4\t\t\t1\t" << std::endl; + expected << "8\t\t1\t\t" << std::endl; + + INFO(metrics); + CHECK(metrics == expected.str()); +} From 71e6bb374afd1c9e36c523a0ed09ee23c00d13c9 Mon Sep 17 00:00:00 2001 From: Robin Kuzmin Date: Tue, 23 Feb 2021 10:43:59 -0800 Subject: [PATCH 11/30] Added a few more math funcs (#526) * Added Sin(). * Added Cos(). * Added Tan(). * Added Sinh(), Cosh(), Tanh(). * Added tests. * Added IEEERemainder(). * Added test. * Added ArcSin(), ArcCos(), ArcTan(), and tests. --- src/QirRuntime/lib/QIR/bridge-qis.ll | 82 +++++ src/QirRuntime/lib/QIR/intrinsicsMath.cpp | 30 ++ src/QirRuntime/lib/QIR/quantum__qis.hpp | 7 + .../test/QIR-static/qir-test-math.cpp | 60 ++++ .../test/QIR-static/qsharp/qir-gen.csproj | 2 +- .../test/QIR-static/qsharp/qir-test-arrays.qs | 10 + .../test/QIR-static/qsharp/qir-test-math.qs | 293 +++++++++++++++++- 7 files changed, 482 insertions(+), 2 deletions(-) diff --git a/src/QirRuntime/lib/QIR/bridge-qis.ll b/src/QirRuntime/lib/QIR/bridge-qis.ll index 729b8c77e2e..6566d652351 100644 --- a/src/QirRuntime/lib/QIR/bridge-qis.ll +++ b/src/QirRuntime/lib/QIR/bridge-qis.ll @@ -296,12 +296,20 @@ define void @__quantum__qis__message__body(%String* %.str) { ; LLVM intrinsics (https://llvm.org/docs/LangRef.html): declare double @llvm.sqrt.f64(double %.val) declare double @llvm.log.f64(double %Val) +declare double @llvm.sin.f64(double %Val) +declare double @llvm.cos.f64(double %Val) ; Native implementations: declare i1 @quantum__qis__isnan__body(double %d) declare double @quantum__qis__infinity__body() declare i1 @quantum__qis__isinf__body(double %d) declare double @quantum__qis__arctan2__body(double %y, double %x) +declare double @quantum__qis__sinh__body(double %theta) +declare double @quantum__qis__cosh__body(double %theta) +declare double @quantum__qis__arcsin__body(double %theta) +declare double @quantum__qis__arccos__body(double %theta) +declare double @quantum__qis__arctan__body(double %theta) +declare double @quantum__qis__ieeeremainder__body(double %y, double %x) declare i64 @quantum__qis__drawrandomint__body(i64 %min, i64 %max) ; API for the user code: @@ -348,6 +356,80 @@ define double @__quantum__qis__arctan2__body(double %y, double %x) { ; Q#: func ret double %result } +; function Sin (theta : Double) : Double +; https://docs.microsoft.com/qsharp/api/qsharp/microsoft.quantum.math.sin +define double @__quantum__qis__sin__body(double %theta) { ; https://en.cppreference.com/w/cpp/numeric/math/sin + %result = call double @llvm.sin.f64(double %theta) ; https://llvm.org/docs/LangRef.html#llvm-sin-intrinsic + ret double %result +} + +; function Cos (theta : Double) : Double +; https://docs.microsoft.com/qsharp/api/qsharp/microsoft.quantum.math.cos +define double @__quantum__qis__cos__body(double %theta) { ; https://en.cppreference.com/w/cpp/numeric/math/cos + %result = call double @llvm.cos.f64(double %theta) ; https://llvm.org/docs/LangRef.html#llvm-cos-intrinsic + ret double %result +} + +; function Tan (theta : Double) : Double +; https://docs.microsoft.com/qsharp/api/qsharp/microsoft.quantum.math.tan +define double @__quantum__qis__tan__body(double %theta) { ; https://en.cppreference.com/w/cpp/numeric/math/tan + %sin = call double @llvm.sin.f64(double %theta) + %cos = call double @llvm.cos.f64(double %theta) + %result = fdiv double %sin, %cos ; tg(x) = sin(x) / cos(x) + ret double %result +} + +; function Sinh (theta : Double) : Double +; https://docs.microsoft.com/qsharp/api/qsharp/microsoft.quantum.math.sinh +define double @__quantum__qis__sinh__body(double %theta) { ; https://en.cppreference.com/w/cpp/numeric/math/sinh + %result = call double @quantum__qis__sinh__body(double %theta) + ret double %result +} + +; function Cosh (theta : Double) : Double +; https://docs.microsoft.com/qsharp/api/qsharp/microsoft.quantum.math.cosh +define double @__quantum__qis__cosh__body(double %theta) { ; https://en.cppreference.com/w/cpp/numeric/math/cosh + %result = call double @quantum__qis__cosh__body(double %theta) + ret double %result +} + +; function Tanh (theta : Double) : Double +; https://docs.microsoft.com/qsharp/api/qsharp/microsoft.quantum.math.tanh +define double @__quantum__qis__tanh__body(double %theta) { ; https://en.cppreference.com/w/cpp/numeric/math/tanh + %sin = call double @__quantum__qis__sinh__body(double %theta) + %cos = call double @__quantum__qis__cosh__body(double %theta) + %result = fdiv double %sin, %cos ; tanh(x) = sinh(x) / cosh(x) + ret double %result +} + +; function ArcSin (theta : Double) : Double +; https://docs.microsoft.com/qsharp/api/qsharp/microsoft.quantum.math.arcsin +define double @__quantum__qis__arcsin__body(double %theta) { ; https://en.cppreference.com/w/cpp/numeric/math/asin + %result = call double @quantum__qis__arcsin__body(double %theta) + ret double %result +} + +; function ArcCos (theta : Double) : Double +; https://docs.microsoft.com/qsharp/api/qsharp/microsoft.quantum.math.arccos +define double @__quantum__qis__arccos__body(double %theta) { ; https://en.cppreference.com/w/cpp/numeric/math/acos + %result = call double @quantum__qis__arccos__body(double %theta) + ret double %result +} + +; function ArcTan (theta : Double) : Double +; https://docs.microsoft.com/qsharp/api/qsharp/microsoft.quantum.math.arctan +define double @__quantum__qis__arctan__body(double %theta) { ; https://en.cppreference.com/w/cpp/numeric/math/atan + %result = call double @quantum__qis__arctan__body(double %theta) + ret double %result +} + + +; function IEEERemainder(x : Double, y : Double) : Double +define double @__quantum__qis__ieeeremainder__body(double %x, double %y) { + %result = call double @quantum__qis__ieeeremainder__body(double %x, double %y) + ret double %result +} + ; operation DrawRandomInt (min : Int, max : Int) : Int ; https://docs.microsoft.com/qsharp/api/qsharp/microsoft.quantum.random.drawrandomint diff --git a/src/QirRuntime/lib/QIR/intrinsicsMath.cpp b/src/QirRuntime/lib/QIR/intrinsicsMath.cpp index fedfbd6ba7b..41180ace397 100644 --- a/src/QirRuntime/lib/QIR/intrinsicsMath.cpp +++ b/src/QirRuntime/lib/QIR/intrinsicsMath.cpp @@ -40,6 +40,36 @@ double quantum__qis__arctan2__body(double y, double x) return std::atan2(y, x); // https://en.cppreference.com/w/cpp/numeric/math/atan2 } +double quantum__qis__sinh__body(double theta) +{ + return std::sinh(theta); +} + +double quantum__qis__cosh__body(double theta) +{ + return std::cosh(theta); +} + +double quantum__qis__arcsin__body(double theta) +{ + return std::asin(theta); // https://en.cppreference.com/w/cpp/numeric/math/asin +} + +double quantum__qis__arccos__body(double theta) +{ + return std::acos(theta); // https://en.cppreference.com/w/cpp/numeric/math/acos +} + +double quantum__qis__arctan__body(double theta) +{ + return std::atan(theta); // https://en.cppreference.com/w/cpp/numeric/math/atan +} + +double quantum__qis__ieeeremainder__body(double x, double y) +{ + return std::remainder(x, y); // https://en.cppreference.com/w/cpp/numeric/math/remainder +} + int64_t quantum__qis__drawrandomint__body(int64_t minimum, int64_t maximum) { if(minimum > maximum) diff --git a/src/QirRuntime/lib/QIR/quantum__qis.hpp b/src/QirRuntime/lib/QIR/quantum__qis.hpp index 8e80cd80404..e22e1937f96 100644 --- a/src/QirRuntime/lib/QIR/quantum__qis.hpp +++ b/src/QirRuntime/lib/QIR/quantum__qis.hpp @@ -67,6 +67,13 @@ extern "C" QIR_SHARED_API double quantum__qis__infinity__body(); // NOLINT QIR_SHARED_API bool quantum__qis__isinf__body(double d); // NOLINT QIR_SHARED_API double quantum__qis__arctan2__body(double y, double x); // NOLINT + QIR_SHARED_API double quantum__qis__sinh__body(double theta); // NOLINT + QIR_SHARED_API double quantum__qis__cosh__body(double theta); // NOLINT + QIR_SHARED_API double quantum__qis__arcsin__body(double theta); // NOLINT + QIR_SHARED_API double quantum__qis__arccos__body(double theta); // NOLINT + QIR_SHARED_API double quantum__qis__arctan__body(double theta); // NOLINT + + QIR_SHARED_API double quantum__qis__ieeeremainder__body(double x, double y); // NOLINT QIR_SHARED_API int64_t quantum__qis__drawrandomint__body(int64_t minimum, int64_t maximum); // NOLINT } \ No newline at end of file diff --git a/src/QirRuntime/test/QIR-static/qir-test-math.cpp b/src/QirRuntime/test/QIR-static/qir-test-math.cpp index f1007acd73e..6995f4ac285 100644 --- a/src/QirRuntime/test/QIR-static/qir-test-math.cpp +++ b/src/QirRuntime/test/QIR-static/qir-test-math.cpp @@ -11,6 +11,16 @@ extern "C" uint64_t Microsoft__Quantum__Testing__QIR__Math__SqrtTest__body(); // NOLINT extern "C" uint64_t Microsoft__Quantum__Testing__QIR__Math__LogTest__body(); // NOLINT extern "C" uint64_t Microsoft__Quantum__Testing__QIR__Math__ArcTan2Test__body(); // NOLINT +extern "C" uint64_t Microsoft__Quantum__Testing__QIR__Math__SinTest__body(); // NOLINT +extern "C" uint64_t Microsoft__Quantum__Testing__QIR__Math__CosTest__body(); // NOLINT +extern "C" uint64_t Microsoft__Quantum__Testing__QIR__Math__TanTest__body(); // NOLINT +extern "C" uint64_t Microsoft__Quantum__Testing__QIR__Math__ArcSinTest__body(); // NOLINT +extern "C" uint64_t Microsoft__Quantum__Testing__QIR__Math__ArcCosTest__body(); // NOLINT +extern "C" uint64_t Microsoft__Quantum__Testing__QIR__Math__ArcTanTest__body(); // NOLINT +extern "C" uint64_t Microsoft__Quantum__Testing__QIR__Math__SinhTest__body(); // NOLINT +extern "C" uint64_t Microsoft__Quantum__Testing__QIR__Math__CoshTest__body(); // NOLINT +extern "C" uint64_t Microsoft__Quantum__Testing__QIR__Math__TanhTest__body(); // NOLINT +extern "C" uint64_t Microsoft__Quantum__Testing__QIR__Math__IeeeRemainderTest__body(); // NOLINT extern "C" uint64_t Microsoft__Quantum__Testing__QIR__Math__TestDrawRandomInt__body(int64_t min, int64_t max); // NOLINT TEST_CASE("QIR: Math.Sqrt", "[qir.math][qir.Math.Sqrt]") @@ -28,6 +38,56 @@ TEST_CASE("QIR: Math.ArcTan2", "[qir.math][qir.Math.ArcTan2]") REQUIRE(0 == Microsoft__Quantum__Testing__QIR__Math__ArcTan2Test__body()); } +TEST_CASE("QIR: Math.Sin", "[qir.math][qir.Math.Sin]") +{ + REQUIRE(0 == Microsoft__Quantum__Testing__QIR__Math__SinTest__body()); +} + +TEST_CASE("QIR: Math.Cos", "[qir.math][qir.Math.Cos]") +{ + REQUIRE(0 == Microsoft__Quantum__Testing__QIR__Math__CosTest__body()); +} + +TEST_CASE("QIR: Math.Tan", "[qir.math][qir.Math.Tan]") +{ + REQUIRE(0 == Microsoft__Quantum__Testing__QIR__Math__TanTest__body()); +} + +TEST_CASE("QIR: Math.ArcSin", "[qir.math][qir.Math.ArcSin]") +{ + REQUIRE(0 == Microsoft__Quantum__Testing__QIR__Math__ArcSinTest__body()); +} + +TEST_CASE("QIR: Math.ArcCos", "[qir.math][qir.Math.ArcCos]") +{ + REQUIRE(0 == Microsoft__Quantum__Testing__QIR__Math__ArcCosTest__body()); +} + +TEST_CASE("QIR: Math.ArcTan", "[qir.math][qir.Math.ArcTan]") +{ + REQUIRE(0 == Microsoft__Quantum__Testing__QIR__Math__ArcTanTest__body()); +} + +TEST_CASE("QIR: Math.Sinh", "[qir.math][qir.Math.Sinh]") +{ + REQUIRE(0 == Microsoft__Quantum__Testing__QIR__Math__SinhTest__body()); +} + +TEST_CASE("QIR: Math.Cosh", "[qir.math][qir.Math.Cosh]") +{ + REQUIRE(0 == Microsoft__Quantum__Testing__QIR__Math__CoshTest__body()); +} + +TEST_CASE("QIR: Math.Tanh", "[qir.math][qir.Math.Tanh]") +{ + REQUIRE(0 == Microsoft__Quantum__Testing__QIR__Math__TanhTest__body()); +} + +TEST_CASE("QIR: Math.IeeeRemainder", "[qir.math][qir.Math.IeeeRemainder]") +{ + REQUIRE(0 == Microsoft__Quantum__Testing__QIR__Math__IeeeRemainderTest__body()); +} + TEST_CASE("QIR: Math.DrawRandomInt", "[qir.math][qir.Math.DrawRandomInt]") { // Test that the Q# random number generator is a wrapper around the C++ generator: diff --git a/src/QirRuntime/test/QIR-static/qsharp/qir-gen.csproj b/src/QirRuntime/test/QIR-static/qsharp/qir-gen.csproj index 17f43b9b0c8..16a4473fd18 100644 --- a/src/QirRuntime/test/QIR-static/qsharp/qir-gen.csproj +++ b/src/QirRuntime/test/QIR-static/qsharp/qir-gen.csproj @@ -1,4 +1,4 @@ - + Exe diff --git a/src/QirRuntime/test/QIR-static/qsharp/qir-test-arrays.qs b/src/QirRuntime/test/QIR-static/qsharp/qir-test-arrays.qs index 10154ba9a02..0adc1cb96e1 100644 --- a/src/QirRuntime/test/QIR-static/qsharp/qir-test-arrays.qs +++ b/src/QirRuntime/test/QIR-static/qsharp/qir-test-arrays.qs @@ -42,6 +42,16 @@ namespace Microsoft.Quantum.Testing.QIR { let res6 = ArcTan2Test(); let res7 = PauliToStringTest(); let res8 = TestDrawRandomInt(0, 1); + let res9 = SinTest(); + let res10 = CosTest(); + let res11 = TanTest(); + let res12 = SinhTest(); + let res13 = CoshTest(); + let res14 = TanhTest(); + let res15 = IeeeRemainderTest(); + let res16 = ArcSinTest(); + let res17 = ArcCosTest(); + let res18 = ArcTanTest(); MessageTest("Test"); } return sum; diff --git a/src/QirRuntime/test/QIR-static/qsharp/qir-test-math.qs b/src/QirRuntime/test/QIR-static/qsharp/qir-test-math.qs index 2ad876a087c..b89d8777f8a 100644 --- a/src/QirRuntime/test/QIR-static/qsharp/qir-test-math.qs +++ b/src/QirRuntime/test/QIR-static/qsharp/qir-test-math.qs @@ -6,6 +6,7 @@ namespace Microsoft.Quantum.Testing.QIR.Math { open Microsoft.Quantum.Intrinsic; open Microsoft.Quantum.Math; // E() open Microsoft.Quantum.Random; + open Microsoft.Quantum.Convert; // DoubleAsString() function SqrtTest() : Int { if 2.0 != Sqrt( 4.0) { return 1; } // The return value indicates which test case has failed. @@ -65,5 +66,295 @@ namespace Microsoft.Quantum.Testing.QIR.Math { return DrawRandomInt(min, max); } -} + function Close(expected : Double, actual : Double) : Bool { + let neighbourhood = 0.0000001; // On x86-64 + Win the error is in 16th digit after the decimal point. + // E.g. enstead of 0.0 there can be 0.00000000000000012. + // Thus 0.0000001 should be more than enough. + + return ((expected - neighbourhood) < actual) and (actual < (expected + neighbourhood)); + } + + function SinTest() : Int { + + // function Sin (theta : Double) : Double + + if not Close(0.0, Sin(0.0)) { return 1; } // The return value indicates which test case has failed. + if not Close(0.5, Sin(PI()/6.0)) { return 2; } + if not Close(1.0, Sin(PI()/2.0)) { return 3; } + if not Close(0.5, Sin(5.0*PI()/6.0)) { return 4; } + if not Close(0.0, Sin(PI())) { return 5; } + + if not Close(-0.5, Sin(-5.0*PI()/6.0)) { return 6; } + if not Close(-1.0, Sin(-PI()/2.0)) { return 7; } + if not Close(-0.5, Sin(-PI()/6.0)) { return 8; } + + if not Close(Sqrt(2.0)/2.0, Sin(PI()/4.0)) { return 9; } + + if NAN() != Sin(NAN()) { return 10; } + if NAN() != Sin(INFINITY()) { return 11; } + if NAN() != Sin(-INFINITY()) { return 11; } + + return 0; + } + + function CosTest() : Int { + + // function Cos (theta : Double) : Double + + if not Close( 1.0, Cos(0.0)) { return 1; } // The return value indicates which test case has failed. + if not Close( 0.5, Cos(PI()/3.0)) { return 2; } + if not Close( 0.0, Cos(PI()/2.0)) { return 3; } + if not Close(-0.5, Cos(2.0*PI()/3.0)) { return 4; } + if not Close(-1.0, Cos(PI())) { return 5; } + + if not Close(-0.5, Cos(-2.0*PI()/3.0)) { return 6; } + if not Close( 0.0, Cos(-PI()/2.0)) { return 7; } + if not Close( 0.5, Cos(-PI()/3.0)) { return 8; } + + if not Close(Sqrt(2.0)/2.0, Cos(PI()/4.0)) { return 9; } + + if NAN() != Cos(NAN()) { return 10; } + if NAN() != Cos(INFINITY()) { return 11; } + if NAN() != Cos(-INFINITY()) { return 11; } + + return 0; + } + + function TanTest() : Int { + // function Tan (theta : Double) : Double + if not Close( 0.0, Tan(0.0)) { return 1; } // The return value indicates which test case has failed. + if not Close( 0.5/(Sqrt(3.0)/2.0), Tan( PI()/6.0)) { return 2; } // tg(Pi/6) = sin(Pi/6) / cos(Pi/6) = (1/2) / (sqrt(3)/2) + if not Close( 1.0, Tan( PI()/4.0)) { return 3; } + if not Close( (Sqrt(3.0)/2.0)/0.5, Tan( PI()/3.0)) { return 4; } + // https://en.cppreference.com/w/cpp/numeric/math/tan + // The function has mathematical poles at Pi(1/2 + n); however no common floating-point representation + // is able to represent Pi/2 exactly, thus there is no value of the argument for which a pole error occurs. + if not Close(-1.0, Tan(3.0*PI()/4.0)) { return 5; } + if not Close( 0.0, Tan(PI())) { return 6; } + + if not Close(-0.5/(Sqrt(3.0)/2.0), Tan(-PI()/6.0)) { return 7; } + if not Close(-1.0, Tan(-PI()/4.0)) { return 8; } + if not Close(-(Sqrt(3.0)/2.0)/0.5, Tan(-PI()/3.0)) { return 9; } + if not Close( 1.0, Tan(-3.0*PI()/4.0)) { return 10; } + + if NAN() != Tan(NAN()) { return 11; } + if NAN() != Tan(INFINITY()) { return 12; } + if NAN() != Tan(-INFINITY()) { return 13; } + return 0; + } + + function ArcSinTest() : Int { + + // function ArcSin (theta : Double) : Double + + if not Close(0.0, ArcSin(0.0)) { return 1; } // The return value indicates which test case has failed. + + if not Close(PI()/6.0, ArcSin(0.5)) { return 2; } + if not Close(PI()/2.0, ArcSin(1.0)) { return 3; } + + if not Close(-PI()/6.0, ArcSin(-0.5)) { return 4; } + if not Close(-PI()/2.0, ArcSin(-1.0)) { return 5; } + + if not Close(PI()/4.0, ArcSin(Sqrt(2.0)/2.0)) { return 6; } + + if NAN() != ArcSin(NAN()) { return 7; } + if NAN() != ArcSin(1.1) { return 8; } + if NAN() != ArcSin(-1.1) { return 9; } + + mutable testVal = -1.0; + while testVal <= 1.0 { + if not Close(testVal, Sin(ArcSin(testVal))) { return 10; } + set testVal = testVal + 0.1; + } + + return 0; + } + + function ArcCosTest() : Int { + + // function ArcCos (theta : Double) : Double + + if not Close( 0.0, ArcCos(1.0)) { return 1; } // The return value indicates which test case has failed. + if not Close( PI()/3.0, ArcCos(0.5)) { return 2; } + if not Close( PI()/2.0, ArcCos(0.0)) { return 3; } + if not Close(2.0*PI()/3.0, ArcCos(-0.5)) { return 4; } + if not Close(PI(), ArcCos(-1.0)) { return 5; } + + if not Close(PI()/4.0, ArcCos(Sqrt(2.0)/2.0)) { return 6; } + + if NAN() != ArcCos(NAN()) { return 7; } + if NAN() != ArcCos(1.1) { return 8; } + if NAN() != ArcCos(-1.1) { return 9; } + + mutable testVal = -1.0; + while testVal <= 1.0 { + if not Close(testVal, Cos(ArcCos(testVal))) { return 10; } + set testVal = testVal + 0.1; + } + + return 0; + } + + function ArcTanTest() : Int { + + // function ArcTan (theta : Double) : Double + + if not Close( 0.0, ArcTan(0.0)) { return 1; } // The return value indicates which test case has failed. + if not Close( PI()/6.0, ArcTan(1.0/Sqrt(3.0))) { return 2; } // tg(Pi/6) = sin(Pi/6) / cos(Pi/6) = (1/2) / (sqrt(3)/2) = 1/sqrt(3) + if not Close( PI()/4.0, ArcTan(1.0)) { return 3; } + if not Close( PI()/3.0, ArcTan(Sqrt(3.0))) { return 4; } + + if not Close(-PI()/6.0, ArcTan(-1.0/Sqrt(3.0))) { return 5; } + if not Close(-PI()/4.0, ArcTan(-1.0)) { return 6; } + if not Close(-PI()/3.0, ArcTan(-Sqrt(3.0))) { return 7; } + + if NAN() != ArcTan(NAN()) { return 8; } + if not Close( PI()/2.0, ArcTan( INFINITY())) { return 9; } + if not Close(-PI()/2.0, ArcTan(-INFINITY())) { return 10; } + + mutable testVal = -10.0; + while testVal <= 10.0 { + if not Close(testVal, Tan(ArcTan(testVal))) { return 11; } + set testVal = testVal + 0.1; + } + + return 0; + } + + function SinhTest() : Int { + + // function Sinh (theta : Double) : Double + + let xValues = [ -5.0, -4.5, -4.0, -3.5, -3.0, -2.5, -2.0, -1.5, -1.0, -0.5, + 5.0, 4.5, 4.0, 3.5, 3.0, 2.5, 2.0, 1.5, 1.0, 0.5, 0.0 ]; + for x in xValues { + if not Close( (ExpD(x) - ExpD(-x)) / 2.0, Sinh(x)) { return 1; } // The return value indicates which test case has failed. + } + + if NAN() != Sinh(NAN()) { return 2; } + if INFINITY() != Sinh(INFINITY()) { return 3; } + if -INFINITY() != Sinh(-INFINITY()) { return 4; } + + return 0; + } + + function CoshTest() : Int { + + // function Cosh (theta : Double) : Double + + let xValues = [ -5.0, -4.5, -4.0, -3.5, -3.0, -2.5, -2.0, -1.5, -1.0, -0.5, + 5.0, 4.5, 4.0, 3.5, 3.0, 2.5, 2.0, 1.5, 1.0, 0.5, 0.0 ]; + for x in xValues { + if not Close( (ExpD(x) + ExpD(-x)) / 2.0, Cosh(x)) { return 1; } // The return value indicates which test case has failed. + } + + if NAN() != Cosh(NAN()) { return 2; } + if INFINITY() != Cosh(INFINITY()) { return 3; } + if INFINITY() != Cosh(-INFINITY()) { return 4; } + + return 0; + } + + function TanhTest() : Int { + + // function Tanh (theta : Double) : Double + + let xValues = [ -5.0, -4.5, -4.0, -3.5, -3.0, -2.5, -2.0, -1.5, -1.0, -0.5, + 5.0, 4.5, 4.0, 3.5, 3.0, 2.5, 2.0, 1.5, 1.0, 0.5, 0.0 ]; + for x in xValues { + if not Close( Sinh(x) / Cosh(x), Tanh(x)) { return 1; } // The return value indicates which test case has failed. + } + + if NAN() != Tanh(NAN()) { return 2; } + if 1.0 != Tanh(INFINITY()) { return 3; } + if -1.0 != Tanh(-INFINITY()) { return 4; } + + return 0; + } + + // Remove when the Q# compiler bug https://github.com/microsoft/qsharp-compiler/issues/877 is resolved: + //function MyRound(value : Double) : Int { // 4.x 4.5 5.x 5.5 -4.x -4.5 -5.x -5.5 + // + // // Temporary piece of code to test a Q# compiler bug: + // + // let truncated = Truncate(value); // 4 4 5 5 -4 -4 -5 -5 + // if truncated >= 0 { + // let diff = value - IntAsDouble(truncated); // 0.x 0.5 0.x 0.5 diff + // if diff < 0.5 { return truncated; } // 4 5 return + // if diff > 0.5 { return (truncated + 1); } // 5 6 return + // if truncated % 2 == 0 { return truncated; } // 4 return + // else { return truncated + 1; } // 6 return + // } + // else { + // let diff = IntAsDouble(truncated) - value; // 0.x 0.5 0.x 0.5 diff + // if diff < 0.5 { return truncated; } // -4 -5 + // if diff > 0.5 { return (truncated - 1); } // -5 -6 + // if truncated % 2 == 0 { return truncated; } // -4 + // else { return truncated - 1; } // -6 + // } + // + // // End of temporary piece of code. + // + // + // // Temporary piece of code to work around the clang++ crash upon `Round()` (resolved in `0.15.2102129370-alpha`): + // + // //let truncated = Truncate(value); // 4 4 5 5 -4 -4 -5 -5 + // //if truncated >= 0 { + // // let diff = value - IntAsDouble(truncated); // 0.x 0.5 0.x 0.5 diff + // // if diff < 0.5 { return truncated; } // 4 5 return + // // if diff > 0.5 { return (truncated + 1); } // 5 6 return + // // if truncated % 2 == 0 { return truncated; } // 4 return + // // return truncated + 1; // 6 return + // //} + // //let diff2 = IntAsDouble(truncated) - value; // 0.x 0.5 0.x 0.5 diff + // //if diff2 < 0.5 { return truncated; } // -4 -5 + // //if diff2 > 0.5 { return (truncated - 1); } // -5 -6 + // //if truncated % 2 == 0 { return truncated; } // -4 + // //return truncated - 1; // -6 + // + // // End of temporary piece of code to work around the clang++ crash. + //} + + function IeeeRemainderTest() : Int { + + // function IeeeRemainder(x : Double, y : Double) : Double + + mutable dividend = -10.0; + while dividend <= 10.0 { + + mutable divisor = -20.0; + while divisor < 20.0 { + if divisor != 0.0 { + let absFractionalPart = AbsD(dividend / divisor) - IntAsDouble(AbsI(Truncate(dividend / divisor))); + if not Close(0.5, absFractionalPart) { // Because of the calculation errors the + // fractional part close to 0.5 causes very different result for + // the `remainder` and `IEEERemainder()` calculated below. + // That is normal but we avoid that. + let remainder = dividend - (divisor * IntAsDouble(Round(dividend / divisor))); + //MyRound(dividend / divisor))); // Remove when the https://github.com/microsoft/qsharp-compiler/issues/877 is resolved. + if not Close(remainder, IEEERemainder(dividend, divisor)) { + Message(DoubleAsString(remainder)); // The output for the test faiulure analysis, + Message(DoubleAsString(dividend)); // if the failure happens. + Message(DoubleAsString(divisor)); + Message(DoubleAsString(IEEERemainder(dividend, divisor))); + return 1; + } + } + } + set divisor = divisor + 0.3; + } + set dividend = dividend + 0.1; + } + + if NAN() != IEEERemainder( INFINITY(), 1.0) { return 2; } + if NAN() != IEEERemainder(-INFINITY(), 1.0) { return 3; } + if NAN() != IEEERemainder(1.0, 0.0) { return 4; } + if NAN() != IEEERemainder(NAN(), 1.0) { return 5; } + if NAN() != IEEERemainder(1.0, NAN()) { return 6; } + if NAN() != IEEERemainder(NAN(), NAN()) { return 7; } + + return 0; + } + +} From d9d08086d5c655afb0ad9ccf9f7c0870589ea7ef Mon Sep 17 00:00:00 2001 From: "Stefan J. Wernli" Date: Wed, 24 Feb 2021 13:50:34 -0800 Subject: [PATCH 12/30] Updating ClassicalControls to reduce intrinsics (#529) This updates the structure of the classical control ApplyIf* and ApplyControlled* callables in terms of each other to reduce 8 intrinsics to 2 intrinsics. Also updates the qir-gen.csproj to use the local QSharp.Core project instead of the one from the QDK for easier testing. --- src/QirRuntime/test/QIR-static/qsharp/main.cs | 13 ++++ .../test/QIR-static/qsharp/qir-gen.csproj | 8 ++- .../Microsoft.Quantum.CSharpGeneration.fsproj | 2 +- ....Simulation.QCTraceSimulatorRuntime.csproj | 2 +- .../Microsoft.Quantum.QSharp.Core.csproj | 2 +- .../QSharpFoundation/ClassicalControl.qs | 64 ++++++++++++++----- ...Microsoft.Quantum.QSharp.Foundation.csproj | 3 +- .../HoneywellExe/HoneywellExe.csproj | 2 +- .../IntrinsicTests/IntrinsicTests.csproj | 2 +- .../TestProjects/IonQExe/IonQExe.csproj | 2 +- .../Library with Spaces.csproj | 2 +- .../TestProjects/Library1/Library1.csproj | 2 +- .../TestProjects/Library2/Library2.csproj | 2 +- .../TestProjects/QCIExe/QCIExe.csproj | 2 +- .../TestProjects/QSharpExe/QSharpExe.csproj | 2 +- .../TargetedExe/TargetedExe.csproj | 2 +- .../TestProjects/UnitTests/UnitTests.csproj | 2 +- .../Tests.Microsoft.Quantum.Simulators.csproj | 2 +- ....Microsoft.Quantum.Simulators.Type1.csproj | 2 +- ....Microsoft.Quantum.Simulators.Type2.csproj | 2 +- ....Microsoft.Quantum.Simulators.Type3.csproj | 2 +- .../Microsoft.Quantum.Simulators.csproj | 2 +- .../Microsoft.Quantum.Type1.Core.csproj | 2 +- .../Microsoft.Quantum.Type2.Core.csproj | 2 +- .../Microsoft.Quantum.Type3.Core.csproj | 2 +- 25 files changed, 91 insertions(+), 39 deletions(-) create mode 100644 src/QirRuntime/test/QIR-static/qsharp/main.cs diff --git a/src/QirRuntime/test/QIR-static/qsharp/main.cs b/src/QirRuntime/test/QIR-static/qsharp/main.cs new file mode 100644 index 00000000000..3e0cec7acae --- /dev/null +++ b/src/QirRuntime/test/QIR-static/qsharp/main.cs @@ -0,0 +1,13 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +// Currently, compiling to QIR has to suppress C# generation but then we need to provide Main function ourselves. +namespace CompilerWorkaround +{ + class Program + { + static void Main(string[] args) + { + } + } +} \ No newline at end of file diff --git a/src/QirRuntime/test/QIR-static/qsharp/qir-gen.csproj b/src/QirRuntime/test/QIR-static/qsharp/qir-gen.csproj index 16a4473fd18..cafd2d3365a 100644 --- a/src/QirRuntime/test/QIR-static/qsharp/qir-gen.csproj +++ b/src/QirRuntime/test/QIR-static/qsharp/qir-gen.csproj @@ -1,9 +1,15 @@ - + Exe netcoreapp3.1 True + false + false + + + + diff --git a/src/Simulation/CSharpGeneration/Microsoft.Quantum.CSharpGeneration.fsproj b/src/Simulation/CSharpGeneration/Microsoft.Quantum.CSharpGeneration.fsproj index d387f0b4215..589624b2877 100644 --- a/src/Simulation/CSharpGeneration/Microsoft.Quantum.CSharpGeneration.fsproj +++ b/src/Simulation/CSharpGeneration/Microsoft.Quantum.CSharpGeneration.fsproj @@ -22,7 +22,7 @@ - + diff --git a/src/Simulation/QCTraceSimulator.Tests/Tests.Microsoft.Quantum.Simulation.QCTraceSimulatorRuntime.csproj b/src/Simulation/QCTraceSimulator.Tests/Tests.Microsoft.Quantum.Simulation.QCTraceSimulatorRuntime.csproj index e6c3aa13131..6d68cbcfd1f 100644 --- a/src/Simulation/QCTraceSimulator.Tests/Tests.Microsoft.Quantum.Simulation.QCTraceSimulatorRuntime.csproj +++ b/src/Simulation/QCTraceSimulator.Tests/Tests.Microsoft.Quantum.Simulation.QCTraceSimulatorRuntime.csproj @@ -1,4 +1,4 @@ - + diff --git a/src/Simulation/QSharpCore/Microsoft.Quantum.QSharp.Core.csproj b/src/Simulation/QSharpCore/Microsoft.Quantum.QSharp.Core.csproj index bee12356075..ca3c3be6651 100644 --- a/src/Simulation/QSharpCore/Microsoft.Quantum.QSharp.Core.csproj +++ b/src/Simulation/QSharpCore/Microsoft.Quantum.QSharp.Core.csproj @@ -1,4 +1,4 @@ - + diff --git a/src/Simulation/QSharpFoundation/ClassicalControl.qs b/src/Simulation/QSharpFoundation/ClassicalControl.qs index 05ea7392569..38d486e8e70 100644 --- a/src/Simulation/QSharpFoundation/ClassicalControl.qs +++ b/src/Simulation/QSharpFoundation/ClassicalControl.qs @@ -29,20 +29,36 @@ namespace Microsoft.Quantum.Simulation.QuantumProcessor.Extensions //ToDo: updat } operation ApplyIfElseIntrinsicA(measurementResult : Result, onResultZeroOp : (Unit => Unit is Adj) , onResultOneOp : (Unit => Unit is Adj)) : Unit is Adj { - body intrinsic; - adjoint intrinsic; + body (...) { + ApplyIfElseIntrinsic(measurementResult, onResultZeroOp, onResultOneOp); + } + adjoint (...) { + ApplyIfElseIntrinsic(measurementResult, Adjoint onResultZeroOp, Adjoint onResultOneOp); + } } operation ApplyIfElseIntrinsicC(measurementResult : Result, onResultZeroOp : (Unit => Unit is Ctl) , onResultOneOp : (Unit => Unit is Ctl)) : Unit is Ctl { - body intrinsic; - controlled intrinsic; + body (...) { + ApplyIfElseIntrinsic(measurementResult, onResultZeroOp, onResultOneOp); + } + controlled (ctls, ...) { + ApplyIfElseIntrinsic(measurementResult, Controlled onResultZeroOp(ctls, _), Controlled onResultOneOp(ctls, _)); + } } operation ApplyIfElseIntrinsicCA(measurementResult : Result, onResultZeroOp : (Unit => Unit is Ctl + Adj) , onResultOneOp : (Unit => Unit is Ctl + Adj)) : Unit is Ctl + Adj { - body intrinsic; - adjoint intrinsic; - controlled intrinsic; - controlled adjoint intrinsic; + body (...) { + ApplyIfElseIntrinsic(measurementResult, onResultZeroOp, onResultOneOp); + } + adjoint (...) { + ApplyIfElseIntrinsic(measurementResult, Adjoint onResultZeroOp, Adjoint onResultOneOp); + } + controlled (ctls, ...) { + ApplyIfElseIntrinsic(measurementResult, Controlled onResultZeroOp(ctls, _), Controlled onResultOneOp(ctls, _)); + } + controlled adjoint (ctls, ...) { + ApplyIfElseIntrinsic(measurementResult, Controlled Adjoint onResultZeroOp(ctls, _), Controlled Adjoint onResultOneOp(ctls, _)); + } } @@ -52,20 +68,36 @@ namespace Microsoft.Quantum.Simulation.QuantumProcessor.Extensions //ToDo: updat } operation ApplyConditionallyIntrinsicA(measurementResults : Result[], resultsValues : Result[], onEqualOp : (Unit => Unit is Adj) , onNonEqualOp : (Unit => Unit is Adj)) : Unit is Adj { - body intrinsic; - adjoint intrinsic; + body (...) { + ApplyConditionallyIntrinsic(measurementResults, resultsValues, onEqualOp, onNonEqualOp); + } + adjoint (...) { + ApplyConditionallyIntrinsic(measurementResults, resultsValues, Adjoint onEqualOp, Adjoint onNonEqualOp); + } } operation ApplyConditionallyIntrinsicC(measurementResults : Result[], resultsValues : Result[], onEqualOp : (Unit => Unit is Ctl) , onNonEqualOp : (Unit => Unit is Ctl)) : Unit is Ctl { - body intrinsic; - controlled intrinsic; + body (...) { + ApplyConditionallyIntrinsic(measurementResults, resultsValues, onEqualOp, onNonEqualOp); + } + controlled (ctls, ...) { + ApplyConditionallyIntrinsic(measurementResults, resultsValues, Controlled onEqualOp(ctls, _), Controlled onNonEqualOp(ctls, _)); + } } operation ApplyConditionallyIntrinsicCA(measurementResults : Result[], resultsValues : Result[], onEqualOp : (Unit => Unit is Ctl + Adj) , onNonEqualOp : (Unit => Unit is Ctl + Adj)) : Unit is Ctl + Adj { - body intrinsic; - adjoint intrinsic; - controlled intrinsic; - controlled adjoint intrinsic; + body (...) { + ApplyConditionallyIntrinsic(measurementResults, resultsValues, onEqualOp, onNonEqualOp); + } + adjoint (...) { + ApplyConditionallyIntrinsic(measurementResults, resultsValues, Adjoint onEqualOp, Adjoint onNonEqualOp); + } + controlled (ctls, ...) { + ApplyConditionallyIntrinsic(measurementResults, resultsValues, Controlled onEqualOp(ctls, _), Controlled onNonEqualOp(ctls, _)); + } + controlled adjoint (ctls, ...) { + ApplyConditionallyIntrinsic(measurementResults, resultsValues, Controlled Adjoint onEqualOp(ctls, _), Controlled Adjoint onNonEqualOp(ctls, _)); + } } diff --git a/src/Simulation/QSharpFoundation/Microsoft.Quantum.QSharp.Foundation.csproj b/src/Simulation/QSharpFoundation/Microsoft.Quantum.QSharp.Foundation.csproj index 3dd5111a716..d06dc33ddc3 100644 --- a/src/Simulation/QSharpFoundation/Microsoft.Quantum.QSharp.Foundation.csproj +++ b/src/Simulation/QSharpFoundation/Microsoft.Quantum.QSharp.Foundation.csproj @@ -1,4 +1,4 @@ - + @@ -8,6 +8,7 @@ true false false + D diff --git a/src/Simulation/Simulators.Tests/TestProjects/HoneywellExe/HoneywellExe.csproj b/src/Simulation/Simulators.Tests/TestProjects/HoneywellExe/HoneywellExe.csproj index dba30bd5d0e..0e85b9642da 100644 --- a/src/Simulation/Simulators.Tests/TestProjects/HoneywellExe/HoneywellExe.csproj +++ b/src/Simulation/Simulators.Tests/TestProjects/HoneywellExe/HoneywellExe.csproj @@ -1,4 +1,4 @@ - + Exe diff --git a/src/Simulation/Simulators.Tests/TestProjects/IntrinsicTests/IntrinsicTests.csproj b/src/Simulation/Simulators.Tests/TestProjects/IntrinsicTests/IntrinsicTests.csproj index 76774ef34c7..676d025b48b 100644 --- a/src/Simulation/Simulators.Tests/TestProjects/IntrinsicTests/IntrinsicTests.csproj +++ b/src/Simulation/Simulators.Tests/TestProjects/IntrinsicTests/IntrinsicTests.csproj @@ -1,4 +1,4 @@ - + netcoreapp3.1 diff --git a/src/Simulation/Simulators.Tests/TestProjects/IonQExe/IonQExe.csproj b/src/Simulation/Simulators.Tests/TestProjects/IonQExe/IonQExe.csproj index 8dc981e3a13..885b0acb808 100644 --- a/src/Simulation/Simulators.Tests/TestProjects/IonQExe/IonQExe.csproj +++ b/src/Simulation/Simulators.Tests/TestProjects/IonQExe/IonQExe.csproj @@ -1,4 +1,4 @@ - + Exe diff --git a/src/Simulation/Simulators.Tests/TestProjects/Library with Spaces/Library with Spaces.csproj b/src/Simulation/Simulators.Tests/TestProjects/Library with Spaces/Library with Spaces.csproj index 08b73178a78..8236575e628 100644 --- a/src/Simulation/Simulators.Tests/TestProjects/Library with Spaces/Library with Spaces.csproj +++ b/src/Simulation/Simulators.Tests/TestProjects/Library with Spaces/Library with Spaces.csproj @@ -1,4 +1,4 @@ - + netstandard2.1 false diff --git a/src/Simulation/Simulators.Tests/TestProjects/Library1/Library1.csproj b/src/Simulation/Simulators.Tests/TestProjects/Library1/Library1.csproj index 367d19a5ee2..df41722f72e 100644 --- a/src/Simulation/Simulators.Tests/TestProjects/Library1/Library1.csproj +++ b/src/Simulation/Simulators.Tests/TestProjects/Library1/Library1.csproj @@ -1,4 +1,4 @@ - + netstandard2.1 diff --git a/src/Simulation/Simulators.Tests/TestProjects/Library2/Library2.csproj b/src/Simulation/Simulators.Tests/TestProjects/Library2/Library2.csproj index 367d19a5ee2..df41722f72e 100644 --- a/src/Simulation/Simulators.Tests/TestProjects/Library2/Library2.csproj +++ b/src/Simulation/Simulators.Tests/TestProjects/Library2/Library2.csproj @@ -1,4 +1,4 @@ - + netstandard2.1 diff --git a/src/Simulation/Simulators.Tests/TestProjects/QCIExe/QCIExe.csproj b/src/Simulation/Simulators.Tests/TestProjects/QCIExe/QCIExe.csproj index e918999b811..b4470d362ec 100644 --- a/src/Simulation/Simulators.Tests/TestProjects/QCIExe/QCIExe.csproj +++ b/src/Simulation/Simulators.Tests/TestProjects/QCIExe/QCIExe.csproj @@ -1,4 +1,4 @@ - + Exe diff --git a/src/Simulation/Simulators.Tests/TestProjects/QSharpExe/QSharpExe.csproj b/src/Simulation/Simulators.Tests/TestProjects/QSharpExe/QSharpExe.csproj index ebcc391078d..c009660dcd5 100644 --- a/src/Simulation/Simulators.Tests/TestProjects/QSharpExe/QSharpExe.csproj +++ b/src/Simulation/Simulators.Tests/TestProjects/QSharpExe/QSharpExe.csproj @@ -1,4 +1,4 @@ - + Exe diff --git a/src/Simulation/Simulators.Tests/TestProjects/TargetedExe/TargetedExe.csproj b/src/Simulation/Simulators.Tests/TestProjects/TargetedExe/TargetedExe.csproj index 0e3dece3b64..d9647d6c65a 100644 --- a/src/Simulation/Simulators.Tests/TestProjects/TargetedExe/TargetedExe.csproj +++ b/src/Simulation/Simulators.Tests/TestProjects/TargetedExe/TargetedExe.csproj @@ -1,4 +1,4 @@ - + Exe diff --git a/src/Simulation/Simulators.Tests/TestProjects/UnitTests/UnitTests.csproj b/src/Simulation/Simulators.Tests/TestProjects/UnitTests/UnitTests.csproj index 744de7e54c1..f9f22b5d19d 100644 --- a/src/Simulation/Simulators.Tests/TestProjects/UnitTests/UnitTests.csproj +++ b/src/Simulation/Simulators.Tests/TestProjects/UnitTests/UnitTests.csproj @@ -1,4 +1,4 @@ - + netcoreapp3.1 diff --git a/src/Simulation/Simulators.Tests/Tests.Microsoft.Quantum.Simulators.csproj b/src/Simulation/Simulators.Tests/Tests.Microsoft.Quantum.Simulators.csproj index 945ad953a4f..013448b7f74 100644 --- a/src/Simulation/Simulators.Tests/Tests.Microsoft.Quantum.Simulators.csproj +++ b/src/Simulation/Simulators.Tests/Tests.Microsoft.Quantum.Simulators.csproj @@ -1,4 +1,4 @@ - + diff --git a/src/Simulation/Simulators.Type1.Tests/Tests.Microsoft.Quantum.Simulators.Type1.csproj b/src/Simulation/Simulators.Type1.Tests/Tests.Microsoft.Quantum.Simulators.Type1.csproj index ffeea476545..7717b8f1682 100644 --- a/src/Simulation/Simulators.Type1.Tests/Tests.Microsoft.Quantum.Simulators.Type1.csproj +++ b/src/Simulation/Simulators.Type1.Tests/Tests.Microsoft.Quantum.Simulators.Type1.csproj @@ -1,4 +1,4 @@ - + diff --git a/src/Simulation/Simulators.Type2.Tests/Tests.Microsoft.Quantum.Simulators.Type2.csproj b/src/Simulation/Simulators.Type2.Tests/Tests.Microsoft.Quantum.Simulators.Type2.csproj index 0493ce6298f..7f7435faf90 100644 --- a/src/Simulation/Simulators.Type2.Tests/Tests.Microsoft.Quantum.Simulators.Type2.csproj +++ b/src/Simulation/Simulators.Type2.Tests/Tests.Microsoft.Quantum.Simulators.Type2.csproj @@ -1,4 +1,4 @@ - + diff --git a/src/Simulation/Simulators.Type3.Tests/Tests.Microsoft.Quantum.Simulators.Type3.csproj b/src/Simulation/Simulators.Type3.Tests/Tests.Microsoft.Quantum.Simulators.Type3.csproj index d3011e0359f..fa4bb43fa21 100644 --- a/src/Simulation/Simulators.Type3.Tests/Tests.Microsoft.Quantum.Simulators.Type3.csproj +++ b/src/Simulation/Simulators.Type3.Tests/Tests.Microsoft.Quantum.Simulators.Type3.csproj @@ -1,4 +1,4 @@ - + diff --git a/src/Simulation/Simulators/Microsoft.Quantum.Simulators.csproj b/src/Simulation/Simulators/Microsoft.Quantum.Simulators.csproj index ee170f9087f..bccfd4022a8 100644 --- a/src/Simulation/Simulators/Microsoft.Quantum.Simulators.csproj +++ b/src/Simulation/Simulators/Microsoft.Quantum.Simulators.csproj @@ -1,4 +1,4 @@ - + diff --git a/src/Simulation/Type1Core/Microsoft.Quantum.Type1.Core.csproj b/src/Simulation/Type1Core/Microsoft.Quantum.Type1.Core.csproj index 7daac307ae6..48d17d33cfa 100644 --- a/src/Simulation/Type1Core/Microsoft.Quantum.Type1.Core.csproj +++ b/src/Simulation/Type1Core/Microsoft.Quantum.Type1.Core.csproj @@ -1,4 +1,4 @@ - + diff --git a/src/Simulation/Type2Core/Microsoft.Quantum.Type2.Core.csproj b/src/Simulation/Type2Core/Microsoft.Quantum.Type2.Core.csproj index 2a066c3ae25..0bdc65ac1aa 100644 --- a/src/Simulation/Type2Core/Microsoft.Quantum.Type2.Core.csproj +++ b/src/Simulation/Type2Core/Microsoft.Quantum.Type2.Core.csproj @@ -1,4 +1,4 @@ - + diff --git a/src/Simulation/Type3Core/Microsoft.Quantum.Type3.Core.csproj b/src/Simulation/Type3Core/Microsoft.Quantum.Type3.Core.csproj index 37e8013917b..a0d68f37051 100644 --- a/src/Simulation/Type3Core/Microsoft.Quantum.Type3.Core.csproj +++ b/src/Simulation/Type3Core/Microsoft.Quantum.Type3.Core.csproj @@ -1,4 +1,4 @@ - + From a61a98eb7265d4ee3f9c1ce97103568a983e23e5 Mon Sep 17 00:00:00 2001 From: Irina Yatsenko <36858951+irinayat-MS@users.noreply.github.com> Date: Wed, 24 Feb 2021 17:44:08 -0800 Subject: [PATCH 13/30] QirRuntime: support ApplyIf* and ApplyConditionally* intrinsics (#527) --- src/QirRuntime/lib/QIR/CMakeLists.txt | 5 +- src/QirRuntime/lib/QIR/bridge-qis.ll | 38 ++++- src/QirRuntime/lib/QIR/conditionals.cpp | 54 ++++++ src/QirRuntime/lib/QIR/context.cpp | 2 + src/QirRuntime/lib/QIR/delegated.cpp | 4 +- src/QirRuntime/lib/QIR/quantum__qis.hpp | 31 ++-- src/QirRuntime/lib/QIR/quantum__rt.hpp | 4 + src/QirRuntime/test/QIR-static/CMakeLists.txt | 1 + src/QirRuntime/test/QIR-static/qir-driver.cpp | 27 ++- .../test/QIR-static/qir-test-conditionals.cpp | 160 ++++++++++++++++++ src/QirRuntime/test/QIR-static/qsharp/Math.qs | 3 + .../test/QIR-static/qsharp/qir-gen.csproj | 1 + .../test/QIR-static/qsharp/qir-test-arrays.qs | 8 +- .../qsharp/qir-test-conditionals.qs | 63 +++++++ .../QIR-static/qsharp/qir-test-functors.qs | 46 ++++- 15 files changed, 416 insertions(+), 31 deletions(-) create mode 100644 src/QirRuntime/lib/QIR/conditionals.cpp create mode 100644 src/QirRuntime/test/QIR-static/qir-test-conditionals.cpp create mode 100644 src/QirRuntime/test/QIR-static/qsharp/qir-test-conditionals.qs diff --git a/src/QirRuntime/lib/QIR/CMakeLists.txt b/src/QirRuntime/lib/QIR/CMakeLists.txt index 48d8e755119..16d1634b935 100644 --- a/src/QirRuntime/lib/QIR/CMakeLists.txt +++ b/src/QirRuntime/lib/QIR/CMakeLists.txt @@ -49,8 +49,9 @@ compile_from_qir(bridge-qis ${bridge_qis_target}) # create qir-qis-support lib from the C++ sources # set(qis_sup_source_files - "intrinsics.cpp" - "intrinsicsMath.cpp" + conditionals.cpp + intrinsics.cpp + intrinsicsMath.cpp intrinsicsOut.cpp ) diff --git a/src/QirRuntime/lib/QIR/bridge-qis.ll b/src/QirRuntime/lib/QIR/bridge-qis.ll index 6566d652351..e841bb18dd2 100644 --- a/src/QirRuntime/lib/QIR/bridge-qis.ll +++ b/src/QirRuntime/lib/QIR/bridge-qis.ll @@ -60,7 +60,7 @@ declare void @quantum__qis__y__ctl(%struct.QirArray*, %class.QUBIT*) declare void @quantum__qis__z__body(%class.QUBIT*) declare void @quantum__qis__z__ctl(%struct.QirArray*, %class.QUBIT*) -declare void @quantum__qis__message__body(%"struct.QirString"* %str) +declare void @quantum__qis__message__body(%struct.QirString* %str) ;=============================================================================== ; quantum.qis namespace implementations @@ -284,8 +284,8 @@ define void @__quantum__qis__z__ctl(%Array* %.ctls, %Qubit* %.q) { ;=============================================================================== ; define void @__quantum__qis__message__body(%String* %.str) { - %str = bitcast %String* %.str to %"struct.QirString"* - call void @quantum__qis__message__body(%"struct.QirString"* %str) + %str = bitcast %String* %.str to %struct.QirString* + call void @quantum__qis__message__body(%struct.QirString* %str) ret void } @@ -437,3 +437,35 @@ define i64 @__quantum__qis__drawrandomint__body(i64 %min, i64 %max) { %result = call i64 @quantum__qis__drawrandomint__body(i64 %min, i64 %max) ret i64 %result } + +;=============================================================================== +; quantum.qis conditional functions +; +declare void @quantum__qis__applyifelseintrinsic__body(%class.RESULT*, %struct.QirCallable*, %struct.QirCallable*) +declare void @quantum__qis__applyconditionallyintrinsic__body( + %struct.QirArray*, %struct.QirArray*, %struct.QirCallable*, %struct.QirCallable*) + +define void @__quantum__qis__applyifelseintrinsic__body( + %Result* %.r, %Callable* %.clb_on_zero, %Callable* %.clb_on_one) { + + %r = bitcast %Result* %.r to %class.RESULT* + %clb_on_zero = bitcast %Callable* %.clb_on_zero to %struct.QirCallable* + %clb_on_one = bitcast %Callable* %.clb_on_one to %struct.QirCallable* + call void @quantum__qis__applyifelseintrinsic__body( + %class.RESULT* %r, %struct.QirCallable* %clb_on_zero, %struct.QirCallable* %clb_on_one) + ret void +} + +define void @__quantum__qis__applyconditionallyintrinsic__body( + %Array* %.rs1, %Array* %.rs2, %Callable* %.clb_on_equal, %Callable* %.clb_on_different) { + + %rs1 = bitcast %Array* %.rs1 to %struct.QirArray* + %rs2 = bitcast %Array* %.rs2 to %struct.QirArray* + %clb_on_equal = bitcast %Callable* %.clb_on_equal to %struct.QirCallable* + %clb_on_different = bitcast %Callable* %.clb_on_different to %struct.QirCallable* + call void @quantum__qis__applyconditionallyintrinsic__body( + %struct.QirArray* %rs1, %struct.QirArray* %rs2, + %struct.QirCallable* %clb_on_equal, %struct.QirCallable* %clb_on_different) + ret void +} + diff --git a/src/QirRuntime/lib/QIR/conditionals.cpp b/src/QirRuntime/lib/QIR/conditionals.cpp new file mode 100644 index 00000000000..72c46e21db6 --- /dev/null +++ b/src/QirRuntime/lib/QIR/conditionals.cpp @@ -0,0 +1,54 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +#include +#include + +#include "quantum__qis.hpp" + +#include "QirTypes.hpp" +#include "quantum__rt.hpp" + +static void Apply(QirCallable* clb) +{ + PTuple argsTuple = quantum__rt__tuple_create(0); + quantum__rt__callable_invoke(clb, argsTuple /*args*/, nullptr /*result*/); + quantum__rt__tuple_update_reference_count(argsTuple, -1); +} + +static bool ArraysContainEqualResults(QirArray* rs1, QirArray* rs2) +{ + assert(rs1 != nullptr && rs2 != nullptr && rs1->count == rs2->count); + assert(rs1->itemSizeInBytes == sizeof(void*)); // the array should contain pointers to RESULT + assert(rs2->itemSizeInBytes == sizeof(void*)); // the array should contain pointers to RESULT + + RESULT** results1 = reinterpret_cast(rs1->buffer); + RESULT** results2 = reinterpret_cast(rs2->buffer); + for (int64_t i = 0; i < rs1->count; i++) + { + if (!quantum__rt__result_equal(results1[i], results2[i])) + { + return false; + } + } + return true; +} + +extern "C" +{ + void quantum__qis__applyifelseintrinsic__body(RESULT* r, QirCallable* clbOnZero, QirCallable* clbOnOne) + { + QirCallable* clbApply = quantum__rt__result_equal(r, quantum__rt__result_zero()) ? clbOnZero : clbOnOne; + Apply(clbApply); + } + + void quantum__qis__applyconditionallyintrinsic__body( + QirArray* rs1, + QirArray* rs2, + QirCallable* clbOnAllEqual, + QirCallable* clbOnSomeDifferent) + { + QirCallable* clbApply = ArraysContainEqualResults(rs1, rs2) ? clbOnAllEqual : clbOnSomeDifferent; + Apply(clbApply); + } +} \ No newline at end of file diff --git a/src/QirRuntime/lib/QIR/context.cpp b/src/QirRuntime/lib/QIR/context.cpp index 5456b112fa6..11c7fcdcc38 100644 --- a/src/QirRuntime/lib/QIR/context.cpp +++ b/src/QirRuntime/lib/QIR/context.cpp @@ -15,8 +15,10 @@ #define QIR_SHARED_API #endif +// These two globals are used in QIR _directly_ so have to define them outside of the context. extern "C" QIR_SHARED_API Result ResultOne = nullptr; extern "C" QIR_SHARED_API Result ResultZero = nullptr; + namespace Microsoft { namespace Quantum diff --git a/src/QirRuntime/lib/QIR/delegated.cpp b/src/QirRuntime/lib/QIR/delegated.cpp index 129e6a615ee..6530297b227 100644 --- a/src/QirRuntime/lib/QIR/delegated.cpp +++ b/src/QirRuntime/lib/QIR/delegated.cpp @@ -27,12 +27,12 @@ std::unordered_map& AllocatedResults() extern "C" { - Result UseZero() + Result quantum__rt__result_zero() { return Microsoft::Quantum::g_context->simulator->UseZero(); } - Result UseOne() + Result quantum__rt__result_one() { return Microsoft::Quantum::g_context->simulator->UseOne(); } diff --git a/src/QirRuntime/lib/QIR/quantum__qis.hpp b/src/QirRuntime/lib/QIR/quantum__qis.hpp index e22e1937f96..c7f5a108ea8 100644 --- a/src/QirRuntime/lib/QIR/quantum__qis.hpp +++ b/src/QirRuntime/lib/QIR/quantum__qis.hpp @@ -60,20 +60,27 @@ extern "C" QIR_SHARED_API void quantum__qis__z__body(QUBIT*); // NOLINT QIR_SHARED_API void quantum__qis__z__ctl(QirArray*, QUBIT*); // NOLINT - QIR_SHARED_API void quantum__qis__message__body(QirString* qstr); // NOLINT + QIR_SHARED_API void quantum__qis__message__body(QirString* qstr); // NOLINT // Q# Math: - QIR_SHARED_API bool quantum__qis__isnan__body(double d); // NOLINT - QIR_SHARED_API double quantum__qis__infinity__body(); // NOLINT - QIR_SHARED_API bool quantum__qis__isinf__body(double d); // NOLINT - QIR_SHARED_API double quantum__qis__arctan2__body(double y, double x); // NOLINT - QIR_SHARED_API double quantum__qis__sinh__body(double theta); // NOLINT - QIR_SHARED_API double quantum__qis__cosh__body(double theta); // NOLINT - QIR_SHARED_API double quantum__qis__arcsin__body(double theta); // NOLINT - QIR_SHARED_API double quantum__qis__arccos__body(double theta); // NOLINT - QIR_SHARED_API double quantum__qis__arctan__body(double theta); // NOLINT + QIR_SHARED_API bool quantum__qis__isnan__body(double d); // NOLINT + QIR_SHARED_API double quantum__qis__infinity__body(); // NOLINT + QIR_SHARED_API bool quantum__qis__isinf__body(double d); // NOLINT + QIR_SHARED_API double quantum__qis__arctan2__body(double y, double x); // NOLINT + QIR_SHARED_API double quantum__qis__sinh__body(double theta); // NOLINT + QIR_SHARED_API double quantum__qis__cosh__body(double theta); // NOLINT + QIR_SHARED_API double quantum__qis__arcsin__body(double theta); // NOLINT + QIR_SHARED_API double quantum__qis__arccos__body(double theta); // NOLINT + QIR_SHARED_API double quantum__qis__arctan__body(double theta); // NOLINT - QIR_SHARED_API double quantum__qis__ieeeremainder__body(double x, double y); // NOLINT - QIR_SHARED_API int64_t quantum__qis__drawrandomint__body(int64_t minimum, int64_t maximum); // NOLINT + QIR_SHARED_API double quantum__qis__ieeeremainder__body(double x, double y); // NOLINT + QIR_SHARED_API int64_t quantum__qis__drawrandomint__body(int64_t minimum, int64_t maximum); // NOLINT + // Q# ApplyIf: + QIR_SHARED_API void quantum__qis__applyifelseintrinsic__body(RESULT*, QirCallable*, QirCallable*); // NOLINT + QIR_SHARED_API void quantum__qis__applyconditionallyintrinsic__body( // NOLINT + QirArray*, + QirArray*, + QirCallable*, + QirCallable*); } \ No newline at end of file diff --git a/src/QirRuntime/lib/QIR/quantum__rt.hpp b/src/QirRuntime/lib/QIR/quantum__rt.hpp index f0d24001f19..de3c64876a3 100644 --- a/src/QirRuntime/lib/QIR/quantum__rt.hpp +++ b/src/QirRuntime/lib/QIR/quantum__rt.hpp @@ -84,6 +84,10 @@ extern "C" // becomes 0. The behavior is undefined if the reference count becomes negative. QIR_SHARED_API void quantum__rt__result_update_reference_count(RESULT*, int32_t); // NOLINT + // Not in the QIR spec right now + QIR_SHARED_API RESULT* quantum__rt__result_one(); // NOLINT + QIR_SHARED_API RESULT* quantum__rt__result_zero(); // NOLINT + // ------------------------------------------------------------------------ // Tuples // ------------------------------------------------------------------------ diff --git a/src/QirRuntime/test/QIR-static/CMakeLists.txt b/src/QirRuntime/test/QIR-static/CMakeLists.txt index a9754805783..9fc202a931a 100644 --- a/src/QirRuntime/test/QIR-static/CMakeLists.txt +++ b/src/QirRuntime/test/QIR-static/CMakeLists.txt @@ -17,6 +17,7 @@ add_custom_target(qir_static_test_lib DEPENDS ${QIR_TESTS_LIBS}) # add_executable(qir-static-tests qir-driver.cpp + qir-test-conditionals.cpp qir-test-math.cpp qir-test-strings.cpp qir-test-ouput.cpp diff --git a/src/QirRuntime/test/QIR-static/qir-driver.cpp b/src/QirRuntime/test/QIR-static/qir-driver.cpp index 3015db659bb..c9157c42fcf 100644 --- a/src/QirRuntime/test/QIR-static/qir-driver.cpp +++ b/src/QirRuntime/test/QIR-static/qir-driver.cpp @@ -1,19 +1,19 @@ // Copyright (c) Microsoft Corporation. // Licensed under the MIT License. -#include #include +#include #include #include #include #include #include "CoreTypes.hpp" +#include "QirContext.hpp" +#include "QirTypes.hpp" #include "QuantumApi_I.hpp" #include "SimFactory.hpp" #include "SimulatorStub.hpp" -#include "QirContext.hpp" -#include "QirTypes.hpp" #include "quantum__rt.hpp" #define CATCH_CONFIG_MAIN // This tells Catch to provide a main() - only do this in one cpp file @@ -223,8 +223,8 @@ TEST_CASE("QIR: Partial application of a callable", "[qir][qir.partCallable]") } #endif -// The Microsoft__Quantum__Testing__QIR__TestControlled__body tests needs proper semantics of X and M, and nothing else. -// The validation is done inside the test and it would return an error code in case of failure. +// The Microsoft__Quantum__Testing__QIR__TestFunctors__body tests needs proper semantics of X and M, and nothing else. +// The validation is done inside the test and it would throw in case of failure. struct FunctorsTestSimulator : public Microsoft::Quantum::SimulatorStub { std::vector qubits; @@ -299,13 +299,18 @@ struct FunctorsTestSimulator : public Microsoft::Quantum::SimulatorStub } }; FunctorsTestSimulator* g_ctrqapi = nullptr; -extern "C" void Microsoft__Quantum__Testing__QIR__TestControlled__body(); // NOLINT -extern "C" void __quantum__qis__k__body(Qubit q) // NOLINT +static int g_cKCalls = 0; +static int g_cKCallsControlled = 0; +extern "C" void Microsoft__Quantum__Testing__QIR__TestFunctors__body(); // NOLINT +extern "C" void Microsoft__Quantum__Testing__QIR__TestFunctorsNoArgs__body(); // NOLINT +extern "C" void __quantum__qis__k__body(Qubit q) // NOLINT { + g_cKCalls++; g_ctrqapi->X(q); } extern "C" void __quantum__qis__k__ctl(QirArray* controls, Qubit q) // NOLINT { + g_cKCallsControlled++; g_ctrqapi->ControlledX(controls->count, reinterpret_cast(controls->buffer), q); } TEST_CASE("QIR: application of nested controlled functor", "[qir][qir.functor]") @@ -314,7 +319,13 @@ TEST_CASE("QIR: application of nested controlled functor", "[qir][qir.functor]") QirContextScope qirctx(qapi.get(), true /*trackAllocatedObjects*/); g_ctrqapi = qapi.get(); - CHECK_NOTHROW(Microsoft__Quantum__Testing__QIR__TestControlled__body()); + CHECK_NOTHROW(Microsoft__Quantum__Testing__QIR__TestFunctors__body()); + + const int cKCalls = g_cKCalls; + const int cKCallsControlled = g_cKCallsControlled; + CHECK_NOTHROW(Microsoft__Quantum__Testing__QIR__TestFunctorsNoArgs__body()); + CHECK(g_cKCalls - cKCalls == 3); + CHECK(g_cKCallsControlled - cKCallsControlled == 5); g_ctrqapi = nullptr; } diff --git a/src/QirRuntime/test/QIR-static/qir-test-conditionals.cpp b/src/QirRuntime/test/QIR-static/qir-test-conditionals.cpp new file mode 100644 index 00000000000..556f63d12d6 --- /dev/null +++ b/src/QirRuntime/test/QIR-static/qir-test-conditionals.cpp @@ -0,0 +1,160 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. +#include +#include +#include +#include + +#include "catch.hpp" + +#include "CoreTypes.hpp" +#include "QirContext.hpp" +#include "QirTypes.hpp" +#include "QuantumApi_I.hpp" +#include "SimulatorStub.hpp" + +using namespace std; +using namespace Microsoft::Quantum; + +// TestConditionalOnResult() is authored in a way that the expected path through the function only applies X operator +// for the chosen sequence of measurement results, and all other paths apply Y. Thus, the correct execution must get the +// expected maximum number of X and ControlledX callbacks. +struct ConditionalsTestSimulator : public Microsoft::Quantum::SimulatorStub +{ + int nGateCallback = 0; + vector xCallbacks; + vector cxCallbacks; + vector otherCallbacks; + + vector mockMeasurements; + int nextMeasureResult = 0; + + explicit ConditionalsTestSimulator(vector&& results) + : mockMeasurements(results) + { + } + + std::string GetHistory() + { + std::stringstream out; + out << "X: "; + for (int i : this->xCallbacks) + { + out << i << ","; + } + + out << std::endl << "CX: "; + for (int i : this->cxCallbacks) + { + out << i << ","; + } + + out << std::endl << "Other: "; + for (int i : this->otherCallbacks) + { + out << i << ","; + } + return out.str(); + } + + Qubit AllocateQubit() override + { + return nullptr; + } + void ReleaseQubit(Qubit qubit) override {} + + void X(Qubit) override + { + this->xCallbacks.push_back(this->nGateCallback); + this->nGateCallback++; + } + void ControlledX(long numControls, Qubit controls[], Qubit qubit) override + { + this->cxCallbacks.push_back(this->nGateCallback); + this->nGateCallback++; + } + void Y(Qubit) override + { + this->otherCallbacks.push_back(this->nGateCallback); + this->nGateCallback++; + } + void ControlledY(long numControls, Qubit controls[], Qubit qubit) override + { + this->otherCallbacks.push_back(this->nGateCallback); + this->nGateCallback++; + } + + Result Measure(long numBases, PauliId bases[], long numTargets, Qubit targets[]) override + { + assert( + this->nextMeasureResult < this->mockMeasurements.size() && + "ConditionalsTestSimulator isn't set up correctly"); + + Result r = (this->mockMeasurements[this->nextMeasureResult] == Result_Zero) ? UseZero() : UseOne(); + this->nextMeasureResult++; + return r; + } + + bool AreEqualResults(Result r1, Result r2) override + { + // those are bogus pointers but it's ok to compare them _as pointers_ + return (r1 == r2); + } + + void ReleaseResult(Result result) override {} // the results aren't allocated by this test simulator + + Result UseZero() override + { + return reinterpret_cast(0); + } + + Result UseOne() override + { + return reinterpret_cast(1); + } +}; + +extern "C" void Microsoft__Quantum__Testing__QIR__TestApplyIf__body(); // NOLINT +TEST_CASE("QIR: ApplyIf", "[qir][qir.conditionals]") +{ + unique_ptr qapi = + make_unique(vector{Result_Zero, Result_One}); + QirContextScope qirctx(qapi.get(), true /*trackAllocatedObjects*/); + + CHECK_NOTHROW(Microsoft__Quantum__Testing__QIR__TestApplyIf__body()); + + INFO(qapi->GetHistory()); + CHECK(qapi->xCallbacks.size() == 8); + CHECK(qapi->cxCallbacks.size() == 0); + CHECK(qapi->otherCallbacks.size() == 0); +} + +extern "C" void Microsoft__Quantum__Testing__QIR__TestApplyIfWithFunctors__body(); // NOLINT +TEST_CASE("QIR: ApplyIf with functors", "[qir][qir.conditionals]") +{ + unique_ptr qapi = + make_unique(vector{Result_Zero, Result_One}); + QirContextScope qirctx(qapi.get(), true /*trackAllocatedObjects*/); + + CHECK_NOTHROW(Microsoft__Quantum__Testing__QIR__TestApplyIfWithFunctors__body()); + + INFO(qapi->GetHistory()); + CHECK(qapi->xCallbacks.size() == 5); + CHECK(qapi->cxCallbacks.size() == 7); + CHECK(qapi->otherCallbacks.size() == 0); +} + +extern "C" void Microsoft__Quantum__Testing__QIR__TestApplyConditionally__body(); // NOLINT +TEST_CASE("QIR: ApplyConditionally", "[qir][qir.conditionals]") +{ + unique_ptr qapi = + make_unique(vector{Result_Zero, Result_One}); + QirContextScope qirctx(qapi.get(), true /*trackAllocatedObjects*/); + + CHECK_NOTHROW(Microsoft__Quantum__Testing__QIR__TestApplyConditionally__body()); + + INFO(qapi->GetHistory()); + CHECK(qapi->xCallbacks.size() == 4); + CHECK(qapi->cxCallbacks.size() == 2); + CHECK(qapi->otherCallbacks.size() == 0); +} diff --git a/src/QirRuntime/test/QIR-static/qsharp/Math.qs b/src/QirRuntime/test/QIR-static/qsharp/Math.qs index 23d81bcbe09..6edba1a7860 100644 --- a/src/QirRuntime/test/QIR-static/qsharp/Math.qs +++ b/src/QirRuntime/test/QIR-static/qsharp/Math.qs @@ -1,3 +1,6 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + namespace Microsoft.Quantum.Intrinsic { open Microsoft.Quantum.Targeting; diff --git a/src/QirRuntime/test/QIR-static/qsharp/qir-gen.csproj b/src/QirRuntime/test/QIR-static/qsharp/qir-gen.csproj index cafd2d3365a..dc0df356f9f 100644 --- a/src/QirRuntime/test/QIR-static/qsharp/qir-gen.csproj +++ b/src/QirRuntime/test/QIR-static/qsharp/qir-gen.csproj @@ -3,6 +3,7 @@ Exe netcoreapp3.1 + false True false false diff --git a/src/QirRuntime/test/QIR-static/qsharp/qir-test-arrays.qs b/src/QirRuntime/test/QIR-static/qsharp/qir-test-arrays.qs index 0adc1cb96e1..58ced577cd5 100644 --- a/src/QirRuntime/test/QIR-static/qsharp/qir-test-arrays.qs +++ b/src/QirRuntime/test/QIR-static/qsharp/qir-test-arrays.qs @@ -32,7 +32,8 @@ namespace Microsoft.Quantum.Testing.QIR { // The purpose of this block is to keep the Q# compiler from optimizing away other tests when generating QIR if (compilerDecoy) { - let res1 = TestControlled(); + let res1_1 = TestFunctors(); + let res1_2 = TestFunctorsNoArgs(); let res2 = TestPartials(17, 42); TestQubitResultManagement(); @@ -53,6 +54,11 @@ namespace Microsoft.Quantum.Testing.QIR { let res17 = ArcCosTest(); let res18 = ArcTanTest(); MessageTest("Test"); + + // Conditionals: + TestApplyIf(); + TestApplyIfWithFunctors(); + TestApplyConditionally(); } return sum; } diff --git a/src/QirRuntime/test/QIR-static/qsharp/qir-test-conditionals.qs b/src/QirRuntime/test/QIR-static/qsharp/qir-test-conditionals.qs new file mode 100644 index 00000000000..28a1b755a64 --- /dev/null +++ b/src/QirRuntime/test/QIR-static/qsharp/qir-test-conditionals.qs @@ -0,0 +1,63 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. +namespace Microsoft.Quantum.Testing.QIR { + open Microsoft.Quantum.Intrinsic; + open Microsoft.Quantum.Simulation.QuantumProcessor.Extensions; + + operation TestApplyIf() : Unit { + use q1 = Qubit(); + use q2 = Qubit(); + + let r1 = M(q1); // expected: r1 = Zero + X(q2); + let r2 = M(q2); // expected: r2 = One + + ApplyIfElseR(r1, (X, q1), (Y, q1)); + ApplyIfElseR(r2, (Y, q1), (X, q1)); + + // Other variants + ApplyIfElseRA(r1, (X, q1), (Y, q1)); + ApplyIfElseRC(r1, (X, q1), (Y, q1)); + ApplyIfElseRCA(r1, (X, q1), (Y, q1)); + ApplyIfOne(r2, (X, q1)); + ApplyIfZero(r1, (X, q1)); + } + + operation TestApplyIfWithFunctors() : Unit { + use q1 = Qubit(); + use q2 = Qubit(); + + let r1 = M(q1); + X(q2); + let r2 = M(q2); + + Adjoint ApplyIfElseRCA(r1, (X, q1), (Y, q1)); + Controlled ApplyIfElseRCA([q2], (r1, (X, q1), (Y, q1))); + Adjoint Controlled ApplyIfElseRCA([q2], (r1, (X, q1), (Y, q1))); + Adjoint ApplyIfElseRA(r1, (X, q1), (Y, q1)); + Controlled ApplyIfElseRC([q2], (r1, (X, q1), (Y, q1))); + Adjoint ApplyIfOneA(r2, (X, q1)); + Controlled ApplyIfOneC([q2], (r2, (X, q1))); + Adjoint Controlled ApplyIfOneCA([q2], (r2, (X, q1))); + Adjoint ApplyIfZeroA(r1, (X, q1)); + Controlled ApplyIfZeroC([q2], (r1, (X, q1))); + Adjoint Controlled ApplyIfZeroCA([q2], (r1, (X, q1))); + } + + operation TestApplyConditionally() : Unit { + use q1 = Qubit(); + use q2 = Qubit(); + + let r1 = M(q1); + X(q2); + let r2 = M(q2); + + ApplyConditionally([r1], [r2], (Y, q1), (X, q1)); + ApplyConditionally([r1, One], [Zero, r2], (X, q1), (Y, q1)); + + Adjoint ApplyConditionallyA([r1], [r2], (Y, q1), (X, q1)); + Controlled ApplyConditionallyC([q2], ([r1], [r2], (Y, q1), (X, q1))); + Adjoint Controlled ApplyConditionallyCA([q2], ([r1], [r2], (Y, q1), (X, q1))); + } + +} \ No newline at end of file diff --git a/src/QirRuntime/test/QIR-static/qsharp/qir-test-functors.qs b/src/QirRuntime/test/QIR-static/qsharp/qir-test-functors.qs index aa6a91f200b..0d8c283dc87 100644 --- a/src/QirRuntime/test/QIR-static/qsharp/qir-test-functors.qs +++ b/src/QirRuntime/test/QIR-static/qsharp/qir-test-functors.qs @@ -1,3 +1,5 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. // For the test to pass implement K to be the same as X. We need it for the test, because the standard bridge doesn't // support multi-controlled X. @@ -18,11 +20,11 @@ namespace Microsoft.Quantum.Testing.QIR { operation Qop(q : Qubit, n : Int) : Unit is Adj+Ctl { body (...) { - if (n%2 == 1) { K(q); } + if n%2 == 1 { K(q); } } adjoint self; controlled (ctrls, ...) { - if (n%2 == 1) { Controlled K(ctrls, q); } + if n%2 == 1 { Controlled K(ctrls, q); } } } @@ -30,7 +32,7 @@ namespace Microsoft.Quantum.Testing.QIR { // BasicMeasurementFeedback, which in turn doesn't allow updating mutables inside measurement conditionals. // this means, we cannot easily get detailed failure information back from Q#, but the test driver can mock // the simulator to track the point of failure. - operation TestControlled() : Unit { + operation TestFunctors() : Unit { let qop = Qop(_, 1); let adj_qop = Adjoint qop; let ctl_qop = Controlled qop; @@ -63,4 +65,42 @@ namespace Microsoft.Quantum.Testing.QIR { } } } + + // The operation is not sensical but in tests we can mock K operator to check that it actually executes + operation NoArgs() : Unit + is Adj+Ctl { + body (...) { + use q = Qubit(); + K(q); + } + adjoint self; + controlled (ctrls, ...) { + use q = Qubit(); + Controlled K(ctrls, q); + } + } + + operation TestFunctorsNoArgs() : Unit { + NoArgs(); + let qop = NoArgs; + let adj_qop = Adjoint qop; + let ctl_qop = Controlled qop; + let adj_ctl_qop = Adjoint Controlled qop; + let ctl_ctl_qop = Controlled ctl_qop; + + use (q1, q2, q3) = (Qubit(), Qubit(), Qubit()) { + X(q1); + X(q2); + X(q3); + + qop(); + adj_qop(); + ctl_qop([q1], ()); + adj_ctl_qop([q1], ()); + ctl_ctl_qop([q1], ([q2], ())); + + Controlled qop([q1, q2], ()); + Adjoint Controlled ctl_ctl_qop([q1], ([q2], ([q3], ()))); + } + } } \ No newline at end of file From cd360b3fad5477930122a7402f2ae473665dc505 Mon Sep 17 00:00:00 2001 From: Irina Yatsenko <36858951+irinayat-MS@users.noreply.github.com> Date: Thu, 25 Feb 2021 11:59:48 -0800 Subject: [PATCH 14/30] Tracer: Fix the target.qs file and link in QIS lib (#533) --- src/QirRuntime/test/QIR-tracer/CMakeLists.txt | 2 + src/QirRuntime/test/QIR-tracer/generate.py | 7 +- src/QirRuntime/test/QIR-tracer/tracer-qir.ll | 2175 +++++++++-------- .../test/QIR-tracer/tracer-target.qs | 21 +- 4 files changed, 1145 insertions(+), 1060 deletions(-) diff --git a/src/QirRuntime/test/QIR-tracer/CMakeLists.txt b/src/QirRuntime/test/QIR-tracer/CMakeLists.txt index e48b4c619f7..8f1835140a9 100644 --- a/src/QirRuntime/test/QIR-tracer/CMakeLists.txt +++ b/src/QirRuntime/test/QIR-tracer/CMakeLists.txt @@ -13,8 +13,10 @@ target_link_libraries(qir-tracer-tests PUBLIC ${QIR_UTILITY_LIB} # set by compile_from_qir ${QIR_BRIDGE_UTILITY_LIB} ${QIR_BRIDGE_TRACER_UTILITY_LIB} + ${QIR_BRIDGE_QIS_UTILITY_LIB} tracer qir-rt-support + qir-qis-support ) target_include_directories(qir-tracer-tests PUBLIC diff --git a/src/QirRuntime/test/QIR-tracer/generate.py b/src/QirRuntime/test/QIR-tracer/generate.py index 206afb6b6d6..473535416f9 100644 --- a/src/QirRuntime/test/QIR-tracer/generate.py +++ b/src/QirRuntime/test/QIR-tracer/generate.py @@ -35,7 +35,12 @@ def log(message): # Compile as a lib so all functions are retained and don't have to workaround the current limitations of # @EntryPoint attribute. - command = (qsc + " build --qir s --input " + files_to_process + " --proj " + output_file) + command = (qsc + " build --qir qir --input " + files_to_process + " --proj " + output_file) log("Executing: " + command) subprocess.run(command, shell = True) + # copy the generated file into tracer's input files + generated_file = os.path.join(root_dir, "qir", output_file) + ".ll" + build_input_file = os.path.join(root_dir, output_file) + ".ll" + shutil.copyfile(generated_file, build_input_file) + diff --git a/src/QirRuntime/test/QIR-tracer/tracer-qir.ll b/src/QirRuntime/test/QIR-tracer/tracer-qir.ll index 73d9cf34372..3da1a0f7fa6 100644 --- a/src/QirRuntime/test/QIR-tracer/tracer-qir.ll +++ b/src/QirRuntime/test/QIR-tracer/tracer-qir.ll @@ -1,9 +1,13 @@ +;This file was generated using: +;commit 722ec70a97b65f8d3ee1085368142a91183969db (HEAD, origin/swernli/standalone-llvm-2) +;Author: Stefan J. Wernli +;Date: Wed Feb 24 21:51:15 2021 -0800 %Result = type opaque %Range = type { i64, i64, i64 } -%Tuple = type opaque -%Qubit = type opaque %Array = type opaque +%Qubit = type opaque +%Tuple = type opaque %String = type opaque @ResultZero = external global %Result* @@ -14,161 +18,830 @@ @PauliZ = constant i2 -2 @EmptyRange = internal constant %Range { i64 0, i64 1, i64 -1 } -define %Tuple* @Microsoft__Quantum__Core__Attribute__body() { -entry: - ret %Tuple* null -} - -define %Tuple* @Microsoft__Quantum__Core__EntryPoint__body() { -entry: - ret %Tuple* null -} - -define %Tuple* @Microsoft__Quantum__Core__Inline__body() { +define void @Microsoft__Quantum__Testing__Tracer__Fixup__body(%Array* %qs) { entry: - ret %Tuple* null -} + call void @__quantum__rt__array_update_alias_count(%Array* %qs, i64 1) + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %qs) + %1 = sub i64 %0, 1 + br label %header__1 -define void @Microsoft__Quantum__Intrinsic__CNOT__body(%Qubit* %control, %Qubit* %target) { -entry: - %ctls = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) - %0 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ctls, i64 0) - %1 = bitcast i8* %0 to %Qubit** - store %Qubit* %control, %Qubit** %1 - call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 1) - br i1 true, label %then0__1, label %else__1 +header__1: ; preds = %exiting__1, %entry + %i = phi i64 [ 0, %entry ], [ %5, %exiting__1 ] + %2 = icmp sle i64 %i, %1 + br i1 %2, label %body__1, label %exit__1 -then0__1: ; preds = %entry - call void @__quantum__qis__single_qubit_op_ctl(i64 1, i64 1, %Array* %ctls, %Qubit* %target) - br label %continue__1 +body__1: ; preds = %header__1 + %3 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 %i) + %4 = bitcast i8* %3 to %Qubit** + %qb = load %Qubit*, %Qubit** %4 + call void @__quantum__qis__single_qubit_op(i64 0, i64 1, %Qubit* %qb) + br label %exiting__1 -else__1: ; preds = %entry - call void @__quantum__qis__single_qubit_op_ctl(i64 2, i64 1, %Array* %ctls, %Qubit* %target) - br label %continue__1 +exiting__1: ; preds = %body__1 + %5 = add i64 %i, 1 + br label %header__1 -continue__1: ; preds = %else__1, %then0__1 - call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 -1) - call void @__quantum__rt__array_update_reference_count(%Array* %ctls, i64 -1) +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %qs, i64 -1) ret void } -declare %Array* @__quantum__rt__array_create_1d(i32, i64) - -declare i8* @__quantum__rt__array_get_element_ptr_1d(%Array*, i64) - declare void @__quantum__rt__array_update_alias_count(%Array*, i64) -declare void @__quantum__qis__single_qubit_op_ctl(i64, i64, %Array*, %Qubit*) +declare i64 @__quantum__rt__array_get_size_1d(%Array*) -declare void @__quantum__rt__array_update_reference_count(%Array*, i64) +declare i8* @__quantum__rt__array_get_element_ptr_1d(%Array*, i64) -define void @Microsoft__Quantum__Intrinsic__CNOT__adj(%Qubit* %control, %Qubit* %target) { +declare void @__quantum__qis__single_qubit_op(i64, i64, %Qubit*) + +define void @Microsoft__Quantum__Testing__Tracer__TestCoreIntrinsics__body() { entry: - %ctls = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) - %0 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ctls, i64 0) + %qs = call %Array* @__quantum__rt__qubit_allocate_array(i64 3) + call void @__quantum__rt__array_update_alias_count(%Array* %qs, i64 1) + %0 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 0) %1 = bitcast i8* %0 to %Qubit** - store %Qubit* %control, %Qubit** %1 + %qb = load %Qubit*, %Qubit** %1 + call void @__quantum__qis__single_qubit_op(i64 0, i64 1, %Qubit* %qb) + %2 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 0) + %3 = bitcast i8* %2 to %Qubit** + %qb__1 = load %Qubit*, %Qubit** %3 + call void @__quantum__qis__single_qubit_op(i64 3, i64 1, %Qubit* %qb__1) + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 1) + %5 = bitcast i8* %4 to %Qubit** + %qb__2 = load %Qubit*, %Qubit** %5 + call void @__quantum__qis__single_qubit_op(i64 6, i64 1, %Qubit* %qb__2) + %6 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 1) + %7 = bitcast i8* %6 to %Qubit** + %qb__3 = load %Qubit*, %Qubit** %7 + call void @__quantum__qis__single_qubit_op(i64 9, i64 1, %Qubit* %qb__3) + %8 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 1) + %9 = bitcast i8* %8 to %Qubit** + %10 = load %Qubit*, %Qubit** %9 + %11 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 2) + %12 = bitcast i8* %11 to %Qubit** + %13 = load %Qubit*, %Qubit** %12 + call void @Microsoft__Quantum__Intrinsic__CNOT__body(%Qubit* %10, %Qubit* %13) + %14 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 0) + %15 = bitcast i8* %14 to %Qubit** + %qb__4 = load %Qubit*, %Qubit** %15 + call void @__quantum__qis__single_qubit_op(i64 19, i64 1, %Qubit* %qb__4) + %16 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 1) + %17 = bitcast i8* %16 to %Qubit** + %qb__5 = load %Qubit*, %Qubit** %17 + call void @__quantum__qis__single_qubit_op(i64 21, i64 1, %Qubit* %qb__5) + %18 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 2) + %19 = bitcast i8* %18 to %Qubit** + %qb__6 = load %Qubit*, %Qubit** %19 + call void @__quantum__qis__single_qubit_op(i64 23, i64 1, %Qubit* %qb__6) + %20 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 1) + %21 = bitcast i8* %20 to %Qubit** + %qb__7 = load %Qubit*, %Qubit** %21 + call void @__quantum__qis__single_qubit_op(i64 15, i64 1, %Qubit* %qb__7) + %22 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 2) + %23 = bitcast i8* %22 to %Qubit** + %qb__9 = load %Qubit*, %Qubit** %23 + call void @__quantum__qis__single_qubit_op(i64 11, i64 1, %Qubit* %qb__9) + call void @__quantum__qis__inject_barrier(i64 42, i64 0) + %24 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 0) + %25 = bitcast i8* %24 to %Qubit** + %qb__11 = load %Qubit*, %Qubit** %25 + call void @__quantum__qis__single_qubit_op(i64 0, i64 1, %Qubit* %qb__11) + %26 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 0) + %27 = bitcast i8* %26 to %Qubit** + %qb__13 = load %Qubit*, %Qubit** %27 + call void @__quantum__qis__single_qubit_op(i64 3, i64 1, %Qubit* %qb__13) + %28 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 1) + %29 = bitcast i8* %28 to %Qubit** + %qb__15 = load %Qubit*, %Qubit** %29 + call void @__quantum__qis__single_qubit_op(i64 6, i64 1, %Qubit* %qb__15) + %30 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 1) + %31 = bitcast i8* %30 to %Qubit** + %qb__17 = load %Qubit*, %Qubit** %31 + call void @__quantum__qis__single_qubit_op(i64 9, i64 1, %Qubit* %qb__17) + %32 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 1) + %33 = bitcast i8* %32 to %Qubit** + %34 = load %Qubit*, %Qubit** %33 + %35 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 2) + %36 = bitcast i8* %35 to %Qubit** + %37 = load %Qubit*, %Qubit** %36 + call void @Microsoft__Quantum__Intrinsic__CNOT__adj(%Qubit* %34, %Qubit* %37) + %38 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 0) + %39 = bitcast i8* %38 to %Qubit** + %qb__19 = load %Qubit*, %Qubit** %39 + call void @__quantum__qis__single_qubit_op(i64 19, i64 1, %Qubit* %qb__19) + %40 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 1) + %41 = bitcast i8* %40 to %Qubit** + %qb__20 = load %Qubit*, %Qubit** %41 + call void @__quantum__qis__single_qubit_op(i64 21, i64 1, %Qubit* %qb__20) + %42 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 2) + %43 = bitcast i8* %42 to %Qubit** + %qb__21 = load %Qubit*, %Qubit** %43 + call void @__quantum__qis__single_qubit_op(i64 24, i64 1, %Qubit* %qb__21) + %44 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 1) + %45 = bitcast i8* %44 to %Qubit** + %qb__22 = load %Qubit*, %Qubit** %45 + call void @__quantum__qis__single_qubit_op(i64 15, i64 1, %Qubit* %qb__22) + %46 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 2) + %47 = bitcast i8* %46 to %Qubit** + %qb__24 = load %Qubit*, %Qubit** %47 + call void @__quantum__qis__single_qubit_op(i64 11, i64 1, %Qubit* %qb__24) + %c = call %Qubit* @__quantum__rt__qubit_allocate() + %ctls = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %48 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ctls, i64 0) + %49 = bitcast i8* %48 to %Qubit** + store %Qubit* %c, %Qubit** %49 call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 1) + %50 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 0) + %51 = bitcast i8* %50 to %Qubit** + %qb__26 = load %Qubit*, %Qubit** %51 br i1 true, label %then0__1, label %else__1 then0__1: ; preds = %entry - call void @__quantum__qis__single_qubit_op_ctl(i64 1, i64 1, %Array* %ctls, %Qubit* %target) + call void @__quantum__qis__single_qubit_op_ctl(i64 1, i64 1, %Array* %ctls, %Qubit* %qb__26) br label %continue__1 else__1: ; preds = %entry - call void @__quantum__qis__single_qubit_op_ctl(i64 2, i64 1, %Array* %ctls, %Qubit* %target) + call void @__quantum__qis__single_qubit_op_ctl(i64 2, i64 1, %Array* %ctls, %Qubit* %qb__26) br label %continue__1 continue__1: ; preds = %else__1, %then0__1 call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 -1) call void @__quantum__rt__array_update_reference_count(%Array* %ctls, i64 -1) - ret void -} - -define void @Microsoft__Quantum__Intrinsic__H__body(%Qubit* %qb) { -entry: - call void @__quantum__qis__single_qubit_op(i64 9, i64 1, %Qubit* %qb) - ret void -} - -declare void @__quantum__qis__single_qubit_op(i64, i64, %Qubit*) - -define void @Microsoft__Quantum__Intrinsic__H__adj(%Qubit* %qb) { -entry: - call void @__quantum__qis__single_qubit_op(i64 9, i64 1, %Qubit* %qb) - ret void -} - -define void @Microsoft__Quantum__Intrinsic__H__ctl(%Array* %ctls, %Qubit* %qb) { -entry: - call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 1) - call void @__quantum__qis__single_qubit_op_ctl(i64 10, i64 1, %Array* %ctls, %Qubit* %qb) - call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 -1) - ret void -} - -define void @Microsoft__Quantum__Intrinsic__H__ctladj(%Array* %ctls, %Qubit* %qb) { -entry: - call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 1) - call void @__quantum__qis__single_qubit_op_ctl(i64 10, i64 1, %Array* %ctls, %Qubit* %qb) - call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 -1) - ret void -} - -define %Result* @Microsoft__Quantum__Intrinsic__M__body(%Qubit* %qb) { -entry: - %0 = call %Result* @__quantum__qis__single_qubit_measure(i64 100, i64 1, %Qubit* %qb) - ret %Result* %0 -} - -declare %Result* @__quantum__qis__single_qubit_measure(i64, i64, %Qubit*) - -define %Result* @Microsoft__Quantum__Intrinsic__Measure__body(%Array* %paulis, %Array* %qubits) { -entry: - call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i64 1) - call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i64 1) - %0 = load %Result*, %Result** @ResultOne - %res = alloca %Result* - store %Result* %0, %Result** %res - call void @__quantum__rt__result_update_reference_count(%Result* %0, i64 1) - %haveY = alloca i1 - store i1 false, i1* %haveY - %1 = call i64 @__quantum__rt__array_get_size_1d(%Array* %paulis) - %2 = sub i64 %1, 1 - br label %header__1 + %ctls__1 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %52 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ctls__1, i64 0) + %53 = bitcast i8* %52 to %Qubit** + store %Qubit* %c, %Qubit** %53 + call void @__quantum__rt__array_update_alias_count(%Array* %ctls__1, i64 1) + %54 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 0) + %55 = bitcast i8* %54 to %Qubit** + %qb__27 = load %Qubit*, %Qubit** %55 + br i1 true, label %then0__2, label %else__2 -header__1: ; preds = %exiting__1, %entry - %i = phi i64 [ 0, %entry ], [ %15, %exiting__1 ] - %3 = icmp sle i64 %i, %2 - br i1 %3, label %body__1, label %exit__1 +then0__2: ; preds = %continue__1 + call void @__quantum__qis__single_qubit_op_ctl(i64 4, i64 1, %Array* %ctls__1, %Qubit* %qb__27) + br label %continue__2 -body__1: ; preds = %header__1 - %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 %i) - %5 = bitcast i8* %4 to i2* - %6 = load i2, i2* %5 - %7 = load i2, i2* @PauliY - %8 = icmp eq i2 %6, %7 - %9 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 %i) - %10 = bitcast i8* %9 to i2* - %11 = load i2, i2* %10 - %12 = load i2, i2* @PauliI - %13 = icmp eq i2 %11, %12 - %14 = or i1 %8, %13 - br i1 %14, label %then0__1, label %continue__1 +else__2: ; preds = %continue__1 + call void @__quantum__qis__single_qubit_op_ctl(i64 5, i64 1, %Array* %ctls__1, %Qubit* %qb__27) + br label %continue__2 -then0__1: ; preds = %body__1 - store i1 true, i1* %haveY - br label %continue__1 +continue__2: ; preds = %else__2, %then0__2 + call void @__quantum__rt__array_update_alias_count(%Array* %ctls__1, i64 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %ctls__1, i64 -1) + %ctls__2 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %56 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ctls__2, i64 0) + %57 = bitcast i8* %56 to %Qubit** + store %Qubit* %c, %Qubit** %57 + call void @__quantum__rt__array_update_alias_count(%Array* %ctls__2, i64 1) + %58 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 1) + %59 = bitcast i8* %58 to %Qubit** + %qb__28 = load %Qubit*, %Qubit** %59 + br i1 true, label %then0__3, label %else__3 -continue__1: ; preds = %then0__1, %body__1 - br label %exiting__1 +then0__3: ; preds = %continue__2 + call void @__quantum__qis__single_qubit_op_ctl(i64 7, i64 1, %Array* %ctls__2, %Qubit* %qb__28) + br label %continue__3 -exiting__1: ; preds = %continue__1 - %15 = add i64 %i, 1 - br label %header__1 +else__3: ; preds = %continue__2 + call void @__quantum__qis__single_qubit_op_ctl(i64 8, i64 1, %Array* %ctls__2, %Qubit* %qb__28) + br label %continue__3 -exit__1: ; preds = %header__1 - %16 = load i1, i1* %haveY +continue__3: ; preds = %else__3, %then0__3 + call void @__quantum__rt__array_update_alias_count(%Array* %ctls__2, i64 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %ctls__2, i64 -1) + %ctls__3 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %60 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ctls__3, i64 0) + %61 = bitcast i8* %60 to %Qubit** + store %Qubit* %c, %Qubit** %61 + call void @__quantum__rt__array_update_alias_count(%Array* %ctls__3, i64 1) + %62 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 1) + %63 = bitcast i8* %62 to %Qubit** + %qb__29 = load %Qubit*, %Qubit** %63 + call void @__quantum__qis__single_qubit_op_ctl(i64 10, i64 1, %Array* %ctls__3, %Qubit* %qb__29) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls__3, i64 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %ctls__3, i64 -1) + %ctls__4 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %64 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ctls__4, i64 0) + %65 = bitcast i8* %64 to %Qubit** + store %Qubit* %c, %Qubit** %65 + call void @__quantum__rt__array_update_alias_count(%Array* %ctls__4, i64 1) + %66 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 0) + %67 = bitcast i8* %66 to %Qubit** + %qb__30 = load %Qubit*, %Qubit** %67 + call void @__quantum__qis__single_qubit_op_ctl(i64 20, i64 1, %Array* %ctls__4, %Qubit* %qb__30) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls__4, i64 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %ctls__4, i64 -1) + %ctls__5 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %68 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ctls__5, i64 0) + %69 = bitcast i8* %68 to %Qubit** + store %Qubit* %c, %Qubit** %69 + call void @__quantum__rt__array_update_alias_count(%Array* %ctls__5, i64 1) + %70 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 1) + %71 = bitcast i8* %70 to %Qubit** + %qb__31 = load %Qubit*, %Qubit** %71 + call void @__quantum__qis__single_qubit_op_ctl(i64 22, i64 1, %Array* %ctls__5, %Qubit* %qb__31) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls__5, i64 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %ctls__5, i64 -1) + %ctls__6 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %72 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ctls__6, i64 0) + %73 = bitcast i8* %72 to %Qubit** + store %Qubit* %c, %Qubit** %73 + call void @__quantum__rt__array_update_alias_count(%Array* %ctls__6, i64 1) + %74 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 2) + %75 = bitcast i8* %74 to %Qubit** + %qb__32 = load %Qubit*, %Qubit** %75 + call void @__quantum__qis__single_qubit_op_ctl(i64 25, i64 1, %Array* %ctls__6, %Qubit* %qb__32) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls__6, i64 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %ctls__6, i64 -1) + %ctls__7 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %76 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ctls__7, i64 0) + %77 = bitcast i8* %76 to %Qubit** + store %Qubit* %c, %Qubit** %77 + call void @__quantum__rt__array_update_alias_count(%Array* %ctls__7, i64 1) + %78 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 1) + %79 = bitcast i8* %78 to %Qubit** + %qb__33 = load %Qubit*, %Qubit** %79 + call void @__quantum__rt__array_update_alias_count(%Array* %ctls__7, i64 1) + call void @__quantum__qis__single_qubit_op_ctl(i64 16, i64 1, %Array* %ctls__7, %Qubit* %qb__33) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls__7, i64 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls__7, i64 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %ctls__7, i64 -1) + %ctls__9 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %80 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ctls__9, i64 0) + %81 = bitcast i8* %80 to %Qubit** + store %Qubit* %c, %Qubit** %81 + call void @__quantum__rt__array_update_alias_count(%Array* %ctls__9, i64 1) + %82 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 2) + %83 = bitcast i8* %82 to %Qubit** + %qb__35 = load %Qubit*, %Qubit** %83 + call void @__quantum__rt__array_update_alias_count(%Array* %ctls__9, i64 1) + call void @__quantum__qis__single_qubit_op_ctl(i64 12, i64 1, %Array* %ctls__9, %Qubit* %qb__35) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls__9, i64 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls__9, i64 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %ctls__9, i64 -1) + call void @__quantum__rt__qubit_release(%Qubit* %c) + %cc = call %Array* @__quantum__rt__qubit_allocate_array(i64 2) + call void @__quantum__rt__array_update_alias_count(%Array* %cc, i64 1) + call void @__quantum__rt__array_update_alias_count(%Array* %cc, i64 1) + %84 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 0) + %85 = bitcast i8* %84 to %Qubit** + %qb__37 = load %Qubit*, %Qubit** %85 + %86 = call i64 @__quantum__rt__array_get_size_1d(%Array* %cc) + %87 = icmp eq i64 %86, 1 + br i1 %87, label %then0__4, label %else__4 + +then0__4: ; preds = %continue__3 + call void @__quantum__qis__single_qubit_op_ctl(i64 1, i64 1, %Array* %cc, %Qubit* %qb__37) + br label %continue__4 + +else__4: ; preds = %continue__3 + call void @__quantum__qis__single_qubit_op_ctl(i64 2, i64 1, %Array* %cc, %Qubit* %qb__37) + br label %continue__4 + +continue__4: ; preds = %else__4, %then0__4 + call void @__quantum__rt__array_update_alias_count(%Array* %cc, i64 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %cc, i64 1) + %88 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 0) + %89 = bitcast i8* %88 to %Qubit** + %qb__38 = load %Qubit*, %Qubit** %89 + %90 = icmp eq i64 %86, 1 + br i1 %90, label %then0__5, label %else__5 + +then0__5: ; preds = %continue__4 + call void @__quantum__qis__single_qubit_op_ctl(i64 4, i64 1, %Array* %cc, %Qubit* %qb__38) + br label %continue__5 + +else__5: ; preds = %continue__4 + call void @__quantum__qis__single_qubit_op_ctl(i64 5, i64 1, %Array* %cc, %Qubit* %qb__38) + br label %continue__5 + +continue__5: ; preds = %else__5, %then0__5 + call void @__quantum__rt__array_update_alias_count(%Array* %cc, i64 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %cc, i64 1) + %91 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 1) + %92 = bitcast i8* %91 to %Qubit** + %qb__39 = load %Qubit*, %Qubit** %92 + %93 = icmp eq i64 %86, 1 + br i1 %93, label %then0__6, label %else__6 + +then0__6: ; preds = %continue__5 + call void @__quantum__qis__single_qubit_op_ctl(i64 7, i64 1, %Array* %cc, %Qubit* %qb__39) + br label %continue__6 + +else__6: ; preds = %continue__5 + call void @__quantum__qis__single_qubit_op_ctl(i64 8, i64 1, %Array* %cc, %Qubit* %qb__39) + br label %continue__6 + +continue__6: ; preds = %else__6, %then0__6 + call void @__quantum__rt__array_update_alias_count(%Array* %cc, i64 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %cc, i64 1) + %94 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 1) + %95 = bitcast i8* %94 to %Qubit** + %qb__40 = load %Qubit*, %Qubit** %95 + call void @__quantum__qis__single_qubit_op_ctl(i64 10, i64 1, %Array* %cc, %Qubit* %qb__40) + call void @__quantum__rt__array_update_alias_count(%Array* %cc, i64 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %cc, i64 1) + %96 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 0) + %97 = bitcast i8* %96 to %Qubit** + %qb__41 = load %Qubit*, %Qubit** %97 + call void @__quantum__qis__single_qubit_op_ctl(i64 20, i64 1, %Array* %cc, %Qubit* %qb__41) + call void @__quantum__rt__array_update_alias_count(%Array* %cc, i64 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %cc, i64 1) + %98 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 1) + %99 = bitcast i8* %98 to %Qubit** + %qb__42 = load %Qubit*, %Qubit** %99 + call void @__quantum__qis__single_qubit_op_ctl(i64 22, i64 1, %Array* %cc, %Qubit* %qb__42) + call void @__quantum__rt__array_update_alias_count(%Array* %cc, i64 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %cc, i64 1) + %100 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 2) + %101 = bitcast i8* %100 to %Qubit** + %qb__43 = load %Qubit*, %Qubit** %101 + call void @__quantum__qis__single_qubit_op_ctl(i64 25, i64 1, %Array* %cc, %Qubit* %qb__43) + call void @__quantum__rt__array_update_alias_count(%Array* %cc, i64 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %cc, i64 1) + %102 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 1) + %103 = bitcast i8* %102 to %Qubit** + %qb__44 = load %Qubit*, %Qubit** %103 + call void @__quantum__rt__array_update_alias_count(%Array* %cc, i64 1) + call void @__quantum__qis__single_qubit_op_ctl(i64 16, i64 1, %Array* %cc, %Qubit* %qb__44) + call void @__quantum__rt__array_update_alias_count(%Array* %cc, i64 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %cc, i64 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %cc, i64 1) + %104 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 2) + %105 = bitcast i8* %104 to %Qubit** + %qb__46 = load %Qubit*, %Qubit** %105 + call void @__quantum__rt__array_update_alias_count(%Array* %cc, i64 1) + call void @__quantum__qis__single_qubit_op_ctl(i64 12, i64 1, %Array* %cc, %Qubit* %qb__46) + call void @__quantum__rt__array_update_alias_count(%Array* %cc, i64 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %cc, i64 -1) + call void @__quantum__rt__qubit_release_array(%Array* %cc) + call void @__quantum__rt__array_update_alias_count(%Array* %cc, i64 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %cc, i64 -1) + call void @__quantum__rt__qubit_release_array(%Array* %qs) + call void @__quantum__rt__array_update_alias_count(%Array* %qs, i64 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qs, i64 -1) + ret void +} + +declare %Qubit* @__quantum__rt__qubit_allocate() + +declare %Array* @__quantum__rt__qubit_allocate_array(i64) + +define void @Microsoft__Quantum__Intrinsic__CNOT__body(%Qubit* %control, %Qubit* %target) { +entry: + %ctls = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %0 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ctls, i64 0) + %1 = bitcast i8* %0 to %Qubit** + store %Qubit* %control, %Qubit** %1 + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 1) + br i1 true, label %then0__1, label %else__1 + +then0__1: ; preds = %entry + call void @__quantum__qis__single_qubit_op_ctl(i64 1, i64 1, %Array* %ctls, %Qubit* %target) + br label %continue__1 + +else__1: ; preds = %entry + call void @__quantum__qis__single_qubit_op_ctl(i64 2, i64 1, %Array* %ctls, %Qubit* %target) + br label %continue__1 + +continue__1: ; preds = %else__1, %then0__1 + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %ctls, i64 -1) + ret void +} + +declare void @__quantum__qis__inject_barrier(i64, i64) + +define void @Microsoft__Quantum__Intrinsic__CNOT__adj(%Qubit* %control, %Qubit* %target) { +entry: + call void @Microsoft__Quantum__Intrinsic__CNOT__body(%Qubit* %control, %Qubit* %target) + ret void +} + +declare %Array* @__quantum__rt__array_create_1d(i32, i64) + +declare void @__quantum__qis__single_qubit_op_ctl(i64, i64, %Array*, %Qubit*) + +declare void @__quantum__rt__array_update_reference_count(%Array*, i64) + +declare void @__quantum__rt__qubit_release(%Qubit*) + +declare void @__quantum__rt__qubit_release_array(%Array*) + +define void @Microsoft__Quantum__Testing__Tracer__TestMeasurements__body(i1 %compare) { +entry: + %qs = call %Array* @__quantum__rt__qubit_allocate_array(i64 3) + call void @__quantum__rt__array_update_alias_count(%Array* %qs, i64 1) + %0 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 0) + %1 = bitcast i8* %0 to %Qubit** + %qb = load %Qubit*, %Qubit** %1 + %r0 = call %Result* @__quantum__qis__single_qubit_measure(i64 100, i64 1, %Qubit* %qb) + %qs12 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 2) + %2 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs12, i64 0) + %3 = bitcast i8* %2 to %Qubit** + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs12, i64 1) + %5 = bitcast i8* %4 to %Qubit** + %6 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 1) + %7 = bitcast i8* %6 to %Qubit** + %8 = load %Qubit*, %Qubit** %7 + %9 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 2) + %10 = bitcast i8* %9 to %Qubit** + %11 = load %Qubit*, %Qubit** %10 + store %Qubit* %8, %Qubit** %3 + store %Qubit* %11, %Qubit** %5 + call void @__quantum__rt__array_update_alias_count(%Array* %qs12, i64 1) + %paulis = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 2) + %12 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 0) + %13 = bitcast i8* %12 to i2* + %14 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 1) + %15 = bitcast i8* %14 to i2* + %16 = load i2, i2* @PauliY + %17 = load i2, i2* @PauliX + store i2 %16, i2* %13 + store i2 %17, i2* %15 + call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i64 1) + call void @__quantum__rt__array_update_alias_count(%Array* %qs12, i64 1) + %18 = load %Result*, %Result** @ResultOne + %res = alloca %Result* + store %Result* %18, %Result** %res + call void @__quantum__rt__result_update_reference_count(%Result* %18, i64 1) + %haveY = alloca i1 + store i1 false, i1* %haveY + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %i = phi i64 [ 0, %entry ], [ %31, %exiting__1 ] + %19 = icmp sle i64 %i, 1 + br i1 %19, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %20 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 %i) + %21 = bitcast i8* %20 to i2* + %22 = load i2, i2* %21 + %23 = load i2, i2* @PauliY + %24 = icmp eq i2 %22, %23 + %25 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 %i) + %26 = bitcast i8* %25 to i2* + %27 = load i2, i2* %26 + %28 = load i2, i2* @PauliI + %29 = icmp eq i2 %27, %28 + %30 = or i1 %24, %29 + br i1 %30, label %then0__1, label %continue__1 + +then0__1: ; preds = %body__1 + store i1 true, i1* %haveY + br label %continue__1 + +continue__1: ; preds = %then0__1, %body__1 + br label %exiting__1 + +exiting__1: ; preds = %continue__1 + %31 = add i64 %i, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + %32 = load i1, i1* %haveY + br i1 %32, label %then0__2, label %test1__1 + +then0__2: ; preds = %exit__1 + %33 = call %Result* @__quantum__qis__joint_measure(i64 106, i64 1, %Array* %qs12) + call void @__quantum__rt__result_update_reference_count(%Result* %33, i64 1) + store %Result* %33, %Result** %res + call void @__quantum__rt__result_update_reference_count(%Result* %33, i64 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %18, i64 -1) + br label %continue__2 + +test1__1: ; preds = %exit__1 + br i1 false, label %then1__1, label %test2__1 + +then1__1: ; preds = %test1__1 + %34 = call %Result* @__quantum__qis__joint_measure(i64 107, i64 1, %Array* %qs12) + call void @__quantum__rt__result_update_reference_count(%Result* %34, i64 1) + %35 = load %Result*, %Result** %res + store %Result* %34, %Result** %res + call void @__quantum__rt__result_update_reference_count(%Result* %34, i64 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %35, i64 -1) + br label %continue__2 + +test2__1: ; preds = %test1__1 + br i1 false, label %then2__1, label %test3__1 + +then2__1: ; preds = %test2__1 + %36 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 0) + %37 = bitcast i8* %36 to i2* + %38 = load i2, i2* %37 + %39 = load i2, i2* @PauliX + %40 = icmp eq i2 %38, %39 + br i1 %40, label %then0__3, label %else__1 + +then0__3: ; preds = %then2__1 + %41 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs12, i64 0) + %42 = bitcast i8* %41 to %Qubit** + %qb__2 = load %Qubit*, %Qubit** %42 + %43 = call %Result* @__quantum__qis__single_qubit_measure(i64 101, i64 1, %Qubit* %qb__2) + call void @__quantum__rt__result_update_reference_count(%Result* %43, i64 1) + %44 = load %Result*, %Result** %res + store %Result* %43, %Result** %res + call void @__quantum__rt__result_update_reference_count(%Result* %43, i64 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %44, i64 -1) + br label %continue__3 + +else__1: ; preds = %then2__1 + %45 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs12, i64 0) + %46 = bitcast i8* %45 to %Qubit** + %qb__3 = load %Qubit*, %Qubit** %46 + %47 = call %Result* @__quantum__qis__single_qubit_measure(i64 100, i64 1, %Qubit* %qb__3) + call void @__quantum__rt__result_update_reference_count(%Result* %47, i64 1) + %48 = load %Result*, %Result** %res + store %Result* %47, %Result** %res + call void @__quantum__rt__result_update_reference_count(%Result* %47, i64 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %48, i64 -1) + br label %continue__3 + +continue__3: ; preds = %else__1, %then0__3 + br label %continue__2 + +test3__1: ; preds = %test2__1 + %49 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 0) + %50 = bitcast i8* %49 to i2* + %51 = load i2, i2* %50 + %52 = load i2, i2* @PauliX + %53 = icmp eq i2 %51, %52 + %54 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 1) + %55 = bitcast i8* %54 to i2* + %56 = load i2, i2* %55 + %57 = load i2, i2* @PauliX + %58 = icmp eq i2 %56, %57 + %59 = and i1 %53, %58 + br i1 %59, label %then3__1, label %test4__1 + +then3__1: ; preds = %test3__1 + call void @__quantum__rt__array_update_alias_count(%Array* %qs12, i64 1) + %60 = call %Result* @__quantum__qis__joint_measure(i64 105, i64 1, %Array* %qs12) + call void @__quantum__rt__array_update_alias_count(%Array* %qs12, i64 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %60, i64 1) + %61 = load %Result*, %Result** %res + store %Result* %60, %Result** %res + call void @__quantum__rt__result_update_reference_count(%Result* %60, i64 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %61, i64 -1) + br label %continue__2 + +test4__1: ; preds = %test3__1 + %62 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 0) + %63 = bitcast i8* %62 to i2* + %64 = load i2, i2* %63 + %65 = load i2, i2* @PauliX + %66 = icmp eq i2 %64, %65 + %67 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 1) + %68 = bitcast i8* %67 to i2* + %69 = load i2, i2* %68 + %70 = load i2, i2* @PauliZ + %71 = icmp eq i2 %69, %70 + %72 = and i1 %66, %71 + br i1 %72, label %then4__1, label %test5__1 + +then4__1: ; preds = %test4__1 + call void @__quantum__rt__array_update_alias_count(%Array* %qs12, i64 1) + %73 = call %Result* @__quantum__qis__joint_measure(i64 103, i64 1, %Array* %qs12) + call void @__quantum__rt__array_update_alias_count(%Array* %qs12, i64 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %73, i64 1) + %74 = load %Result*, %Result** %res + store %Result* %73, %Result** %res + call void @__quantum__rt__result_update_reference_count(%Result* %73, i64 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %74, i64 -1) + br label %continue__2 + +test5__1: ; preds = %test4__1 + %75 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 0) + %76 = bitcast i8* %75 to i2* + %77 = load i2, i2* %76 + %78 = load i2, i2* @PauliZ + %79 = icmp eq i2 %77, %78 + %80 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 1) + %81 = bitcast i8* %80 to i2* + %82 = load i2, i2* %81 + %83 = load i2, i2* @PauliX + %84 = icmp eq i2 %82, %83 + %85 = and i1 %79, %84 + br i1 %85, label %then5__1, label %test6__1 + +then5__1: ; preds = %test5__1 + call void @__quantum__rt__array_update_alias_count(%Array* %qs12, i64 1) + %86 = call %Result* @__quantum__qis__joint_measure(i64 104, i64 1, %Array* %qs12) + call void @__quantum__rt__array_update_alias_count(%Array* %qs12, i64 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %86, i64 1) + %87 = load %Result*, %Result** %res + store %Result* %86, %Result** %res + call void @__quantum__rt__result_update_reference_count(%Result* %86, i64 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %87, i64 -1) + br label %continue__2 + +test6__1: ; preds = %test5__1 + %88 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 0) + %89 = bitcast i8* %88 to i2* + %90 = load i2, i2* %89 + %91 = load i2, i2* @PauliZ + %92 = icmp eq i2 %90, %91 + %93 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 1) + %94 = bitcast i8* %93 to i2* + %95 = load i2, i2* %94 + %96 = load i2, i2* @PauliZ + %97 = icmp eq i2 %95, %96 + %98 = and i1 %92, %97 + br i1 %98, label %then6__1, label %continue__2 + +then6__1: ; preds = %test6__1 + call void @__quantum__rt__array_update_alias_count(%Array* %qs12, i64 1) + %99 = call %Result* @__quantum__qis__joint_measure(i64 102, i64 1, %Array* %qs12) + call void @__quantum__rt__array_update_alias_count(%Array* %qs12, i64 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %99, i64 1) + %100 = load %Result*, %Result** %res + store %Result* %99, %Result** %res + call void @__quantum__rt__result_update_reference_count(%Result* %99, i64 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %100, i64 -1) + br label %continue__2 + +continue__2: ; preds = %then6__1, %test6__1, %then5__1, %then4__1, %then3__1, %continue__3, %then1__1, %then0__2 + %r12 = load %Result*, %Result** %res + call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i64 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qs12, i64 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %paulis, i64 -1) + br i1 %compare, label %then0__4, label %continue__4 + +then0__4: ; preds = %continue__2 + %101 = load %Result*, %Result** @ResultZero + %102 = call i1 @__quantum__rt__result_equal(%Result* %r0, %Result* %101) + br i1 %102, label %then0__5, label %continue__5 + +then0__5: ; preds = %then0__4 + %103 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 1) + %104 = bitcast i8* %103 to %Qubit** + %qb__4 = load %Qubit*, %Qubit** %104 + call void @__quantum__qis__single_qubit_op(i64 0, i64 1, %Qubit* %qb__4) + br label %continue__5 + +continue__5: ; preds = %then0__5, %then0__4 + br label %continue__4 + +continue__4: ; preds = %continue__5, %continue__2 + call void @__quantum__rt__qubit_release_array(%Array* %qs) + call void @__quantum__rt__array_update_alias_count(%Array* %qs, i64 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qs12, i64 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qs, i64 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %r0, i64 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qs12, i64 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %r12, i64 -1) + ret void +} + +declare %Result* @__quantum__qis__single_qubit_measure(i64, i64, %Qubit*) + +declare void @__quantum__rt__result_update_reference_count(%Result*, i64) + +declare %Result* @__quantum__qis__joint_measure(i64, i64, %Array*) + +declare i1 @__quantum__rt__result_equal(%Result*, %Result*) + +define void @Microsoft__Quantum__Intrinsic__CNOT__ctl(%Array* %ctls, { %Qubit*, %Qubit* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 1) + %1 = getelementptr inbounds { %Qubit*, %Qubit* }, { %Qubit*, %Qubit* }* %0, i32 0, i32 0 + %control = load %Qubit*, %Qubit** %1 + %2 = getelementptr inbounds { %Qubit*, %Qubit* }, { %Qubit*, %Qubit* }* %0, i32 0, i32 1 + %target = load %Qubit*, %Qubit** %2 + %3 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %3, i64 0) + %5 = bitcast i8* %4 to %Qubit** + store %Qubit* %control, %Qubit** %5 + %ctls__1 = call %Array* @__quantum__rt__array_concatenate(%Array* %ctls, %Array* %3) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls__1, i64 1) + %6 = call i64 @__quantum__rt__array_get_size_1d(%Array* %ctls__1) + %7 = icmp eq i64 %6, 1 + br i1 %7, label %then0__1, label %else__1 + +then0__1: ; preds = %entry + call void @__quantum__qis__single_qubit_op_ctl(i64 1, i64 1, %Array* %ctls__1, %Qubit* %target) + br label %continue__1 + +else__1: ; preds = %entry + call void @__quantum__qis__single_qubit_op_ctl(i64 2, i64 1, %Array* %ctls__1, %Qubit* %target) + br label %continue__1 + +continue__1: ; preds = %else__1, %then0__1 + call void @__quantum__rt__array_update_alias_count(%Array* %ctls__1, i64 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %3, i64 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %ctls__1, i64 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 -1) + ret void +} + +declare %Array* @__quantum__rt__array_concatenate(%Array*, %Array*) + +define void @Microsoft__Quantum__Intrinsic__CNOT__ctladj(%Array* %__controlQubits__, { %Qubit*, %Qubit* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i64 1) + %1 = getelementptr inbounds { %Qubit*, %Qubit* }, { %Qubit*, %Qubit* }* %0, i32 0, i32 0 + %control = load %Qubit*, %Qubit** %1 + %2 = getelementptr inbounds { %Qubit*, %Qubit* }, { %Qubit*, %Qubit* }* %0, i32 0, i32 1 + %target = load %Qubit*, %Qubit** %2 + %3 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %4 = bitcast %Tuple* %3 to { %Qubit*, %Qubit* }* + %5 = getelementptr inbounds { %Qubit*, %Qubit* }, { %Qubit*, %Qubit* }* %4, i32 0, i32 0 + %6 = getelementptr inbounds { %Qubit*, %Qubit* }, { %Qubit*, %Qubit* }* %4, i32 0, i32 1 + store %Qubit* %control, %Qubit** %5 + store %Qubit* %target, %Qubit** %6 + call void @Microsoft__Quantum__Intrinsic__CNOT__ctl(%Array* %__controlQubits__, { %Qubit*, %Qubit* }* %4) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i64 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i64 -1) + ret void +} + +declare %Tuple* @__quantum__rt__tuple_create(i64) + +declare void @__quantum__rt__tuple_update_reference_count(%Tuple*, i64) + +define void @Microsoft__Quantum__Intrinsic__H__body(%Qubit* %qb) { +entry: + call void @__quantum__qis__single_qubit_op(i64 9, i64 1, %Qubit* %qb) + ret void +} + +define void @Microsoft__Quantum__Intrinsic__H__adj(%Qubit* %qb) { +entry: + call void @__quantum__qis__single_qubit_op(i64 9, i64 1, %Qubit* %qb) + ret void +} + +define void @Microsoft__Quantum__Intrinsic__H__ctl(%Array* %ctls, %Qubit* %qb) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 1) + call void @__quantum__qis__single_qubit_op_ctl(i64 10, i64 1, %Array* %ctls, %Qubit* %qb) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 -1) + ret void +} + +define void @Microsoft__Quantum__Intrinsic__H__ctladj(%Array* %__controlQubits__, %Qubit* %qb) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i64 1) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i64 1) + call void @__quantum__qis__single_qubit_op_ctl(i64 10, i64 1, %Array* %__controlQubits__, %Qubit* %qb) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i64 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i64 -1) + ret void +} + +define %Result* @Microsoft__Quantum__Intrinsic__M__body(%Qubit* %qb) { +entry: + %0 = call %Result* @__quantum__qis__single_qubit_measure(i64 100, i64 1, %Qubit* %qb) + ret %Result* %0 +} + +define %Result* @Microsoft__Quantum__Intrinsic__Measure__body(%Array* %paulis, %Array* %qubits) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i64 1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i64 1) + %0 = load %Result*, %Result** @ResultOne + %res = alloca %Result* + store %Result* %0, %Result** %res + call void @__quantum__rt__result_update_reference_count(%Result* %0, i64 1) + %haveY = alloca i1 + store i1 false, i1* %haveY + %1 = call i64 @__quantum__rt__array_get_size_1d(%Array* %paulis) + %2 = sub i64 %1, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %i = phi i64 [ 0, %entry ], [ %15, %exiting__1 ] + %3 = icmp sle i64 %i, %2 + br i1 %3, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 %i) + %5 = bitcast i8* %4 to i2* + %6 = load i2, i2* %5 + %7 = load i2, i2* @PauliY + %8 = icmp eq i2 %6, %7 + %9 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 %i) + %10 = bitcast i8* %9 to i2* + %11 = load i2, i2* %10 + %12 = load i2, i2* @PauliI + %13 = icmp eq i2 %11, %12 + %14 = or i1 %8, %13 + br i1 %14, label %then0__1, label %continue__1 + +then0__1: ; preds = %body__1 + store i1 true, i1* %haveY + br label %continue__1 + +continue__1: ; preds = %then0__1, %body__1 + br label %exiting__1 + +exiting__1: ; preds = %continue__1 + %15 = add i64 %i, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + %16 = load i1, i1* %haveY br i1 %16, label %then0__2, label %test1__1 then0__2: ; preds = %exit__1 @@ -246,7 +919,9 @@ test3__1: ; preds = %test2__1 br i1 %45, label %then3__1, label %test4__1 then3__1: ; preds = %test3__1 - %46 = call %Result* @__quantum__qis__joint_measure(i64 108, i64 1, %Array* %qubits) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i64 1) + %46 = call %Result* @__quantum__qis__joint_measure(i64 105, i64 1, %Array* %qubits) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i64 -1) call void @__quantum__rt__result_update_reference_count(%Result* %46, i64 1) %47 = load %Result*, %Result** %res store %Result* %46, %Result** %res @@ -269,7 +944,9 @@ test4__1: ; preds = %test3__1 br i1 %58, label %then4__1, label %test5__1 then4__1: ; preds = %test4__1 - %59 = call %Result* @__quantum__qis__joint_measure(i64 109, i64 1, %Array* %qubits) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i64 1) + %59 = call %Result* @__quantum__qis__joint_measure(i64 103, i64 1, %Array* %qubits) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i64 -1) call void @__quantum__rt__result_update_reference_count(%Result* %59, i64 1) %60 = load %Result*, %Result** %res store %Result* %59, %Result** %res @@ -292,7 +969,9 @@ test5__1: ; preds = %test4__1 br i1 %71, label %then5__1, label %test6__1 then5__1: ; preds = %test5__1 - %72 = call %Result* @__quantum__qis__joint_measure(i64 110, i64 1, %Array* %qubits) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i64 1) + %72 = call %Result* @__quantum__qis__joint_measure(i64 104, i64 1, %Array* %qubits) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i64 -1) call void @__quantum__rt__result_update_reference_count(%Result* %72, i64 1) %73 = load %Result*, %Result** %res store %Result* %72, %Result** %res @@ -300,84 +979,36 @@ then5__1: ; preds = %test5__1 call void @__quantum__rt__result_update_reference_count(%Result* %73, i64 -1) br label %continue__2 -test6__1: ; preds = %test5__1 - %74 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 0) - %75 = bitcast i8* %74 to i2* - %76 = load i2, i2* %75 - %77 = load i2, i2* @PauliZ - %78 = icmp eq i2 %76, %77 - %79 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 1) - %80 = bitcast i8* %79 to i2* - %81 = load i2, i2* %80 - %82 = load i2, i2* @PauliZ - %83 = icmp eq i2 %81, %82 - %84 = and i1 %78, %83 - br i1 %84, label %then6__1, label %continue__2 - -then6__1: ; preds = %test6__1 - %85 = call %Result* @__quantum__qis__joint_measure(i64 111, i64 1, %Array* %qubits) - call void @__quantum__rt__result_update_reference_count(%Result* %85, i64 1) - %86 = load %Result*, %Result** %res - store %Result* %85, %Result** %res - call void @__quantum__rt__result_update_reference_count(%Result* %85, i64 -1) - call void @__quantum__rt__result_update_reference_count(%Result* %86, i64 -1) - br label %continue__2 - -continue__2: ; preds = %then6__1, %test6__1, %then5__1, %then4__1, %then3__1, %continue__3, %then1__1, %then0__2 - %87 = load %Result*, %Result** %res - call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i64 -1) - call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i64 -1) - ret %Result* %87 -} - -declare void @__quantum__rt__result_update_reference_count(%Result*, i64) - -declare i64 @__quantum__rt__array_get_size_1d(%Array*) - -declare %Result* @__quantum__qis__joint_measure(i64, i64, %Array*) - -define %Result* @Microsoft__Quantum__Intrinsic__Mx__body(%Qubit* %qb) { -entry: - %0 = call %Result* @__quantum__qis__single_qubit_measure(i64 101, i64 1, %Qubit* %qb) - ret %Result* %0 -} - -define %Result* @Microsoft__Quantum__Intrinsic__Mxx__body(%Array* %qubits) { -entry: - call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i64 1) - %0 = call %Result* @__quantum__qis__joint_measure(i64 105, i64 1, %Array* %qubits) - call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i64 -1) - ret %Result* %0 -} - -define %Result* @Microsoft__Quantum__Intrinsic__Mxz__body(%Array* %qubits) { -entry: - call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i64 1) - %0 = call %Result* @__quantum__qis__joint_measure(i64 103, i64 1, %Array* %qubits) - call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i64 -1) - ret %Result* %0 -} - -define %Result* @Microsoft__Quantum__Intrinsic__Mz__body(%Qubit* %qb) { -entry: - %0 = call %Result* @__quantum__qis__single_qubit_measure(i64 100, i64 1, %Qubit* %qb) - ret %Result* %0 -} +test6__1: ; preds = %test5__1 + %74 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 0) + %75 = bitcast i8* %74 to i2* + %76 = load i2, i2* %75 + %77 = load i2, i2* @PauliZ + %78 = icmp eq i2 %76, %77 + %79 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 1) + %80 = bitcast i8* %79 to i2* + %81 = load i2, i2* %80 + %82 = load i2, i2* @PauliZ + %83 = icmp eq i2 %81, %82 + %84 = and i1 %78, %83 + br i1 %84, label %then6__1, label %continue__2 -define %Result* @Microsoft__Quantum__Intrinsic__Mzx__body(%Array* %qubits) { -entry: +then6__1: ; preds = %test6__1 call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i64 1) - %0 = call %Result* @__quantum__qis__joint_measure(i64 104, i64 1, %Array* %qubits) + %85 = call %Result* @__quantum__qis__joint_measure(i64 102, i64 1, %Array* %qubits) call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i64 -1) - ret %Result* %0 -} + call void @__quantum__rt__result_update_reference_count(%Result* %85, i64 1) + %86 = load %Result*, %Result** %res + store %Result* %85, %Result** %res + call void @__quantum__rt__result_update_reference_count(%Result* %85, i64 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %86, i64 -1) + br label %continue__2 -define %Result* @Microsoft__Quantum__Intrinsic__Mzz__body(%Array* %qubits) { -entry: - call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i64 1) - %0 = call %Result* @__quantum__qis__joint_measure(i64 102, i64 1, %Array* %qubits) +continue__2: ; preds = %then6__1, %test6__1, %then5__1, %then4__1, %then3__1, %continue__3, %then1__1, %then0__2 + %87 = load %Result*, %Result** %res + call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i64 -1) call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i64 -1) - ret %Result* %0 + ret %Result* %87 } define void @Microsoft__Quantum__Intrinsic__Rx__body(double %theta, %Qubit* %qb) { @@ -395,9 +1026,9 @@ entry: define void @Microsoft__Quantum__Intrinsic__Rx__ctl(%Array* %ctls, { double, %Qubit* }* %0) { entry: call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 1) - %1 = getelementptr { double, %Qubit* }, { double, %Qubit* }* %0, i64 0, i32 0 + %1 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %0, i32 0, i32 0 %theta = load double, double* %1 - %2 = getelementptr { double, %Qubit* }, { double, %Qubit* }* %0, i64 0, i32 1 + %2 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %0, i32 0, i32 1 %qb = load %Qubit*, %Qubit** %2 call void @__quantum__qis__single_qubit_op_ctl(i64 20, i64 1, %Array* %ctls, %Qubit* %qb) call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 -1) @@ -407,9 +1038,9 @@ entry: define void @Microsoft__Quantum__Intrinsic__Rx__ctladj(%Array* %ctls, { double, %Qubit* }* %0) { entry: call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 1) - %1 = getelementptr { double, %Qubit* }, { double, %Qubit* }* %0, i64 0, i32 0 + %1 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %0, i32 0, i32 0 %theta = load double, double* %1 - %2 = getelementptr { double, %Qubit* }, { double, %Qubit* }* %0, i64 0, i32 1 + %2 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %0, i32 0, i32 1 %qb = load %Qubit*, %Qubit** %2 call void @__quantum__qis__single_qubit_op_ctl(i64 20, i64 1, %Array* %ctls, %Qubit* %qb) call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 -1) @@ -431,9 +1062,9 @@ entry: define void @Microsoft__Quantum__Intrinsic__Ry__ctl(%Array* %ctls, { double, %Qubit* }* %0) { entry: call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 1) - %1 = getelementptr { double, %Qubit* }, { double, %Qubit* }* %0, i64 0, i32 0 + %1 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %0, i32 0, i32 0 %theta = load double, double* %1 - %2 = getelementptr { double, %Qubit* }, { double, %Qubit* }* %0, i64 0, i32 1 + %2 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %0, i32 0, i32 1 %qb = load %Qubit*, %Qubit** %2 call void @__quantum__qis__single_qubit_op_ctl(i64 22, i64 1, %Array* %ctls, %Qubit* %qb) call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 -1) @@ -443,9 +1074,9 @@ entry: define void @Microsoft__Quantum__Intrinsic__Ry__ctladj(%Array* %ctls, { double, %Qubit* }* %0) { entry: call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 1) - %1 = getelementptr { double, %Qubit* }, { double, %Qubit* }* %0, i64 0, i32 0 + %1 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %0, i32 0, i32 0 %theta = load double, double* %1 - %2 = getelementptr { double, %Qubit* }, { double, %Qubit* }* %0, i64 0, i32 1 + %2 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %0, i32 0, i32 1 %qb = load %Qubit*, %Qubit** %2 call void @__quantum__qis__single_qubit_op_ctl(i64 22, i64 1, %Array* %ctls, %Qubit* %qb) call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 -1) @@ -467,9 +1098,9 @@ entry: define void @Microsoft__Quantum__Intrinsic__Rz__ctl(%Array* %ctls, { double, %Qubit* }* %0) { entry: call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 1) - %1 = getelementptr { double, %Qubit* }, { double, %Qubit* }* %0, i64 0, i32 0 + %1 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %0, i32 0, i32 0 %theta = load double, double* %1 - %2 = getelementptr { double, %Qubit* }, { double, %Qubit* }* %0, i64 0, i32 1 + %2 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %0, i32 0, i32 1 %qb = load %Qubit*, %Qubit** %2 call void @__quantum__qis__single_qubit_op_ctl(i64 25, i64 1, %Array* %ctls, %Qubit* %qb) call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 -1) @@ -479,9 +1110,9 @@ entry: define void @Microsoft__Quantum__Intrinsic__Rz__ctladj(%Array* %ctls, { double, %Qubit* }* %0) { entry: call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 1) - %1 = getelementptr { double, %Qubit* }, { double, %Qubit* }* %0, i64 0, i32 0 + %1 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %0, i32 0, i32 0 %theta = load double, double* %1 - %2 = getelementptr { double, %Qubit* }, { double, %Qubit* }* %0, i64 0, i32 1 + %2 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %0, i32 0, i32 1 %qb = load %Qubit*, %Qubit** %2 call void @__quantum__qis__single_qubit_op_ctl(i64 25, i64 1, %Array* %ctls, %Qubit* %qb) call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 -1) @@ -520,62 +1151,6 @@ entry: ret void } -define void @Microsoft__Quantum__Intrinsic__Sx__body(%Qubit* %qb) { -entry: - call void @__quantum__qis__single_qubit_op(i64 17, i64 1, %Qubit* %qb) - ret void -} - -define void @Microsoft__Quantum__Intrinsic__Sx__adj(%Qubit* %qb) { -entry: - call void @__quantum__qis__single_qubit_op(i64 17, i64 1, %Qubit* %qb) - ret void -} - -define void @Microsoft__Quantum__Intrinsic__Sx__ctl(%Array* %ctls, %Qubit* %qb) { -entry: - call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 1) - call void @__quantum__qis__single_qubit_op_ctl(i64 18, i64 1, %Array* %ctls, %Qubit* %qb) - call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 -1) - ret void -} - -define void @Microsoft__Quantum__Intrinsic__Sx__ctladj(%Array* %ctls, %Qubit* %qb) { -entry: - call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 1) - call void @__quantum__qis__single_qubit_op_ctl(i64 18, i64 1, %Array* %ctls, %Qubit* %qb) - call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 -1) - ret void -} - -define void @Microsoft__Quantum__Intrinsic__Sz__body(%Qubit* %qb) { -entry: - call void @__quantum__qis__single_qubit_op(i64 15, i64 1, %Qubit* %qb) - ret void -} - -define void @Microsoft__Quantum__Intrinsic__Sz__adj(%Qubit* %qb) { -entry: - call void @__quantum__qis__single_qubit_op(i64 15, i64 1, %Qubit* %qb) - ret void -} - -define void @Microsoft__Quantum__Intrinsic__Sz__ctl(%Array* %ctls, %Qubit* %qb) { -entry: - call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 1) - call void @__quantum__qis__single_qubit_op_ctl(i64 16, i64 1, %Array* %ctls, %Qubit* %qb) - call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 -1) - ret void -} - -define void @Microsoft__Quantum__Intrinsic__Sz__ctladj(%Array* %ctls, %Qubit* %qb) { -entry: - call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 1) - call void @__quantum__qis__single_qubit_op_ctl(i64 16, i64 1, %Array* %ctls, %Qubit* %qb) - call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 -1) - ret void -} - define void @Microsoft__Quantum__Intrinsic__T__body(%Qubit* %qb) { entry: call void @__quantum__qis__single_qubit_op(i64 11, i64 1, %Qubit* %qb) @@ -598,865 +1173,357 @@ entry: ret void } -define void @Microsoft__Quantum__Intrinsic__T__ctladj(%Array* %ctls, %Qubit* %qb) { -entry: - call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 1) - call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 1) - call void @__quantum__qis__single_qubit_op_ctl(i64 12, i64 1, %Array* %ctls, %Qubit* %qb) - call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 -1) - call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 -1) - ret void -} - -define void @Microsoft__Quantum__Intrinsic__Tx__body(%Qubit* %qb) { -entry: - call void @__quantum__qis__single_qubit_op(i64 13, i64 1, %Qubit* %qb) - ret void -} - -define void @Microsoft__Quantum__Intrinsic__Tx__adj(%Qubit* %qb) { -entry: - call void @__quantum__qis__single_qubit_op(i64 13, i64 1, %Qubit* %qb) - ret void -} - -define void @Microsoft__Quantum__Intrinsic__Tx__ctl(%Array* %ctls, %Qubit* %qb) { -entry: - call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 1) - call void @__quantum__qis__single_qubit_op_ctl(i64 14, i64 1, %Array* %ctls, %Qubit* %qb) - call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 -1) - ret void -} - -define void @Microsoft__Quantum__Intrinsic__Tx__ctladj(%Array* %ctls, %Qubit* %qb) { -entry: - call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 1) - call void @__quantum__qis__single_qubit_op_ctl(i64 14, i64 1, %Array* %ctls, %Qubit* %qb) - call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 -1) - ret void -} - -define void @Microsoft__Quantum__Intrinsic__Tz__body(%Qubit* %qb) { -entry: - call void @__quantum__qis__single_qubit_op(i64 11, i64 1, %Qubit* %qb) - ret void -} - -define void @Microsoft__Quantum__Intrinsic__Tz__adj(%Qubit* %qb) { -entry: - call void @__quantum__qis__single_qubit_op(i64 11, i64 1, %Qubit* %qb) - ret void -} - -define void @Microsoft__Quantum__Intrinsic__Tz__ctl(%Array* %ctls, %Qubit* %qb) { -entry: - call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 1) - call void @__quantum__qis__single_qubit_op_ctl(i64 12, i64 1, %Array* %ctls, %Qubit* %qb) - call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 -1) - ret void -} - -define void @Microsoft__Quantum__Intrinsic__Tz__ctladj(%Array* %ctls, %Qubit* %qb) { -entry: - call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 1) - call void @__quantum__qis__single_qubit_op_ctl(i64 12, i64 1, %Array* %ctls, %Qubit* %qb) - call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 -1) - ret void -} - -define void @Microsoft__Quantum__Intrinsic__X__body(%Qubit* %qb) { -entry: - call void @__quantum__qis__single_qubit_op(i64 0, i64 1, %Qubit* %qb) - ret void -} - -define void @Microsoft__Quantum__Intrinsic__X__adj(%Qubit* %qb) { -entry: - call void @__quantum__qis__single_qubit_op(i64 0, i64 1, %Qubit* %qb) - ret void -} - -define void @Microsoft__Quantum__Intrinsic__X__ctl(%Array* %ctls, %Qubit* %qb) { -entry: - call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 1) - %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %ctls) - %1 = icmp eq i64 %0, 1 - br i1 %1, label %then0__1, label %else__1 - -then0__1: ; preds = %entry - call void @__quantum__qis__single_qubit_op_ctl(i64 1, i64 1, %Array* %ctls, %Qubit* %qb) - br label %continue__1 - -else__1: ; preds = %entry - call void @__quantum__qis__single_qubit_op_ctl(i64 2, i64 1, %Array* %ctls, %Qubit* %qb) - br label %continue__1 - -continue__1: ; preds = %else__1, %then0__1 - call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 -1) - ret void -} - -define void @Microsoft__Quantum__Intrinsic__X__ctladj(%Array* %ctls, %Qubit* %qb) { -entry: - call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 1) - %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %ctls) - %1 = icmp eq i64 %0, 1 - br i1 %1, label %then0__1, label %else__1 - -then0__1: ; preds = %entry - call void @__quantum__qis__single_qubit_op_ctl(i64 1, i64 1, %Array* %ctls, %Qubit* %qb) - br label %continue__1 - -else__1: ; preds = %entry - call void @__quantum__qis__single_qubit_op_ctl(i64 2, i64 1, %Array* %ctls, %Qubit* %qb) - br label %continue__1 - -continue__1: ; preds = %else__1, %then0__1 - call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 -1) - ret void -} - -define void @Microsoft__Quantum__Intrinsic__Y__body(%Qubit* %qb) { -entry: - call void @__quantum__qis__single_qubit_op(i64 3, i64 1, %Qubit* %qb) - ret void -} - -define void @Microsoft__Quantum__Intrinsic__Y__adj(%Qubit* %qb) { -entry: - call void @__quantum__qis__single_qubit_op(i64 3, i64 1, %Qubit* %qb) - ret void -} - -define void @Microsoft__Quantum__Intrinsic__Y__ctl(%Array* %ctls, %Qubit* %qb) { -entry: - call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 1) - %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %ctls) - %1 = icmp eq i64 %0, 1 - br i1 %1, label %then0__1, label %else__1 - -then0__1: ; preds = %entry - call void @__quantum__qis__single_qubit_op_ctl(i64 4, i64 1, %Array* %ctls, %Qubit* %qb) - br label %continue__1 - -else__1: ; preds = %entry - call void @__quantum__qis__single_qubit_op_ctl(i64 5, i64 1, %Array* %ctls, %Qubit* %qb) - br label %continue__1 - -continue__1: ; preds = %else__1, %then0__1 - call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 -1) - ret void -} - -define void @Microsoft__Quantum__Intrinsic__Y__ctladj(%Array* %ctls, %Qubit* %qb) { -entry: - call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 1) - %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %ctls) - %1 = icmp eq i64 %0, 1 - br i1 %1, label %then0__1, label %else__1 - -then0__1: ; preds = %entry - call void @__quantum__qis__single_qubit_op_ctl(i64 4, i64 1, %Array* %ctls, %Qubit* %qb) - br label %continue__1 - -else__1: ; preds = %entry - call void @__quantum__qis__single_qubit_op_ctl(i64 5, i64 1, %Array* %ctls, %Qubit* %qb) - br label %continue__1 - -continue__1: ; preds = %else__1, %then0__1 - call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 -1) - ret void -} - -define void @Microsoft__Quantum__Intrinsic__Z__body(%Qubit* %qb) { -entry: - call void @__quantum__qis__single_qubit_op(i64 6, i64 1, %Qubit* %qb) - ret void -} - -define void @Microsoft__Quantum__Intrinsic__Z__adj(%Qubit* %qb) { -entry: - call void @__quantum__qis__single_qubit_op(i64 6, i64 1, %Qubit* %qb) - ret void -} - -define void @Microsoft__Quantum__Intrinsic__Z__ctl(%Array* %ctls, %Qubit* %qb) { +define void @Microsoft__Quantum__Intrinsic__T__ctladj(%Array* %ctls, %Qubit* %qb) { entry: call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 1) - %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %ctls) - %1 = icmp eq i64 %0, 1 - br i1 %1, label %then0__1, label %else__1 - -then0__1: ; preds = %entry - call void @__quantum__qis__single_qubit_op_ctl(i64 7, i64 1, %Array* %ctls, %Qubit* %qb) - br label %continue__1 - -else__1: ; preds = %entry - call void @__quantum__qis__single_qubit_op_ctl(i64 8, i64 1, %Array* %ctls, %Qubit* %qb) - br label %continue__1 - -continue__1: ; preds = %else__1, %then0__1 + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 1) + call void @__quantum__qis__single_qubit_op_ctl(i64 12, i64 1, %Array* %ctls, %Qubit* %qb) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 -1) call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 -1) ret void } -define void @Microsoft__Quantum__Intrinsic__Z__ctladj(%Array* %ctls, %Qubit* %qb) { +define void @Microsoft__Quantum__Intrinsic__X__body(%Qubit* %qb) { entry: - call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 1) - %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %ctls) - %1 = icmp eq i64 %0, 1 - br i1 %1, label %then0__1, label %else__1 - -then0__1: ; preds = %entry - call void @__quantum__qis__single_qubit_op_ctl(i64 7, i64 1, %Array* %ctls, %Qubit* %qb) - br label %continue__1 - -else__1: ; preds = %entry - call void @__quantum__qis__single_qubit_op_ctl(i64 8, i64 1, %Array* %ctls, %Qubit* %qb) - br label %continue__1 - -continue__1: ; preds = %else__1, %then0__1 - call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 -1) + call void @__quantum__qis__single_qubit_op(i64 0, i64 1, %Qubit* %qb) ret void } -define void @Microsoft__Quantum__Testing__Tracer__Fixup__body(%Array* %qs) { +define void @Microsoft__Quantum__Intrinsic__X__adj(%Qubit* %qb) { entry: - call void @__quantum__rt__array_update_alias_count(%Array* %qs, i64 1) - %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %qs) - %1 = sub i64 %0, 1 - br label %header__1 - -header__1: ; preds = %exiting__1, %entry - %i = phi i64 [ 0, %entry ], [ %5, %exiting__1 ] - %2 = icmp sle i64 %i, %1 - br i1 %2, label %body__1, label %exit__1 - -body__1: ; preds = %header__1 - %3 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 %i) - %4 = bitcast i8* %3 to %Qubit** - %qb = load %Qubit*, %Qubit** %4 call void @__quantum__qis__single_qubit_op(i64 0, i64 1, %Qubit* %qb) - br label %exiting__1 - -exiting__1: ; preds = %body__1 - %5 = add i64 %i, 1 - br label %header__1 - -exit__1: ; preds = %header__1 - call void @__quantum__rt__array_update_alias_count(%Array* %qs, i64 -1) ret void } -define void @Microsoft__Quantum__Testing__Tracer__TestCoreIntrinsics__body() { +define void @Microsoft__Quantum__Intrinsic__X__ctl(%Array* %ctls, %Qubit* %qb) { entry: - %qs = call %Array* @__quantum__rt__qubit_allocate_array(i64 3) - call void @__quantum__rt__array_update_alias_count(%Array* %qs, i64 1) - %0 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 0) - %1 = bitcast i8* %0 to %Qubit** - %qb = load %Qubit*, %Qubit** %1 - call void @__quantum__qis__single_qubit_op(i64 0, i64 1, %Qubit* %qb) - %2 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 0) - %3 = bitcast i8* %2 to %Qubit** - %qb__1 = load %Qubit*, %Qubit** %3 - call void @__quantum__qis__single_qubit_op(i64 3, i64 1, %Qubit* %qb__1) - %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 1) - %5 = bitcast i8* %4 to %Qubit** - %qb__2 = load %Qubit*, %Qubit** %5 - call void @__quantum__qis__single_qubit_op(i64 6, i64 1, %Qubit* %qb__2) - %6 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 1) - %7 = bitcast i8* %6 to %Qubit** - %qb__3 = load %Qubit*, %Qubit** %7 - call void @__quantum__qis__single_qubit_op(i64 9, i64 1, %Qubit* %qb__3) - %8 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 1) - %9 = bitcast i8* %8 to %Qubit** - %10 = load %Qubit*, %Qubit** %9 - %11 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 2) - %12 = bitcast i8* %11 to %Qubit** - %13 = load %Qubit*, %Qubit** %12 - call void @Microsoft__Quantum__Intrinsic__CNOT__body(%Qubit* %10, %Qubit* %13) - %14 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 0) - %15 = bitcast i8* %14 to %Qubit** - %qb__4 = load %Qubit*, %Qubit** %15 - call void @__quantum__qis__single_qubit_op(i64 19, i64 1, %Qubit* %qb__4) - %16 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 1) - %17 = bitcast i8* %16 to %Qubit** - %qb__5 = load %Qubit*, %Qubit** %17 - call void @__quantum__qis__single_qubit_op(i64 21, i64 1, %Qubit* %qb__5) - %18 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 2) - %19 = bitcast i8* %18 to %Qubit** - %qb__6 = load %Qubit*, %Qubit** %19 - call void @__quantum__qis__single_qubit_op(i64 23, i64 1, %Qubit* %qb__6) - %20 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 1) - %21 = bitcast i8* %20 to %Qubit** - %qb__7 = load %Qubit*, %Qubit** %21 - call void @__quantum__qis__single_qubit_op(i64 15, i64 1, %Qubit* %qb__7) - %22 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 2) - %23 = bitcast i8* %22 to %Qubit** - %qb__9 = load %Qubit*, %Qubit** %23 - call void @__quantum__qis__single_qubit_op(i64 11, i64 1, %Qubit* %qb__9) - call void @__quantum__qis__inject_barrier(i64 42, i64 1) - %24 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 0) - %25 = bitcast i8* %24 to %Qubit** - %qb__11 = load %Qubit*, %Qubit** %25 - call void @__quantum__qis__single_qubit_op(i64 0, i64 1, %Qubit* %qb__11) - %26 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 0) - %27 = bitcast i8* %26 to %Qubit** - %qb__12 = load %Qubit*, %Qubit** %27 - call void @__quantum__qis__single_qubit_op(i64 3, i64 1, %Qubit* %qb__12) - %28 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 1) - %29 = bitcast i8* %28 to %Qubit** - %qb__13 = load %Qubit*, %Qubit** %29 - call void @__quantum__qis__single_qubit_op(i64 6, i64 1, %Qubit* %qb__13) - %30 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 1) - %31 = bitcast i8* %30 to %Qubit** - %qb__14 = load %Qubit*, %Qubit** %31 - call void @__quantum__qis__single_qubit_op(i64 9, i64 1, %Qubit* %qb__14) - %32 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 1) - %33 = bitcast i8* %32 to %Qubit** - %34 = load %Qubit*, %Qubit** %33 - %35 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 2) - %36 = bitcast i8* %35 to %Qubit** - %37 = load %Qubit*, %Qubit** %36 - call void @Microsoft__Quantum__Intrinsic__CNOT__adj(%Qubit* %34, %Qubit* %37) - %38 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 0) - %39 = bitcast i8* %38 to %Qubit** - %qb__15 = load %Qubit*, %Qubit** %39 - call void @__quantum__qis__single_qubit_op(i64 19, i64 1, %Qubit* %qb__15) - %40 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 1) - %41 = bitcast i8* %40 to %Qubit** - %qb__16 = load %Qubit*, %Qubit** %41 - call void @__quantum__qis__single_qubit_op(i64 21, i64 1, %Qubit* %qb__16) - %42 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 2) - %43 = bitcast i8* %42 to %Qubit** - %qb__17 = load %Qubit*, %Qubit** %43 - call void @__quantum__qis__single_qubit_op(i64 24, i64 1, %Qubit* %qb__17) - %44 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 1) - %45 = bitcast i8* %44 to %Qubit** - %qb__18 = load %Qubit*, %Qubit** %45 - call void @__quantum__qis__single_qubit_op(i64 15, i64 1, %Qubit* %qb__18) - %46 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 2) - %47 = bitcast i8* %46 to %Qubit** - %qb__20 = load %Qubit*, %Qubit** %47 - call void @__quantum__qis__single_qubit_op(i64 11, i64 1, %Qubit* %qb__20) - %c = call %Qubit* @__quantum__rt__qubit_allocate() - %ctls = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) - %48 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ctls, i64 0) - %49 = bitcast i8* %48 to %Qubit** - store %Qubit* %c, %Qubit** %49 call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 1) - %50 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 0) - %51 = bitcast i8* %50 to %Qubit** - %qb__22 = load %Qubit*, %Qubit** %51 - br i1 true, label %then0__1, label %else__1 - -then0__1: ; preds = %entry - call void @__quantum__qis__single_qubit_op_ctl(i64 1, i64 1, %Array* %ctls, %Qubit* %qb__22) - br label %continue__1 - -else__1: ; preds = %entry - call void @__quantum__qis__single_qubit_op_ctl(i64 2, i64 1, %Array* %ctls, %Qubit* %qb__22) - br label %continue__1 - -continue__1: ; preds = %else__1, %then0__1 - call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 -1) - call void @__quantum__rt__array_update_reference_count(%Array* %ctls, i64 -1) - %ctls__1 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) - %52 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ctls__1, i64 0) - %53 = bitcast i8* %52 to %Qubit** - store %Qubit* %c, %Qubit** %53 - call void @__quantum__rt__array_update_alias_count(%Array* %ctls__1, i64 1) - %54 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 0) - %55 = bitcast i8* %54 to %Qubit** - %qb__23 = load %Qubit*, %Qubit** %55 - br i1 true, label %then0__2, label %else__2 - -then0__2: ; preds = %continue__1 - call void @__quantum__qis__single_qubit_op_ctl(i64 4, i64 1, %Array* %ctls__1, %Qubit* %qb__23) - br label %continue__2 - -else__2: ; preds = %continue__1 - call void @__quantum__qis__single_qubit_op_ctl(i64 5, i64 1, %Array* %ctls__1, %Qubit* %qb__23) - br label %continue__2 - -continue__2: ; preds = %else__2, %then0__2 - call void @__quantum__rt__array_update_alias_count(%Array* %ctls__1, i64 -1) - call void @__quantum__rt__array_update_reference_count(%Array* %ctls__1, i64 -1) - %ctls__2 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) - %56 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ctls__2, i64 0) - %57 = bitcast i8* %56 to %Qubit** - store %Qubit* %c, %Qubit** %57 - call void @__quantum__rt__array_update_alias_count(%Array* %ctls__2, i64 1) - %58 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 1) - %59 = bitcast i8* %58 to %Qubit** - %qb__24 = load %Qubit*, %Qubit** %59 - br i1 true, label %then0__3, label %else__3 - -then0__3: ; preds = %continue__2 - call void @__quantum__qis__single_qubit_op_ctl(i64 7, i64 1, %Array* %ctls__2, %Qubit* %qb__24) - br label %continue__3 - -else__3: ; preds = %continue__2 - call void @__quantum__qis__single_qubit_op_ctl(i64 8, i64 1, %Array* %ctls__2, %Qubit* %qb__24) - br label %continue__3 - -continue__3: ; preds = %else__3, %then0__3 - call void @__quantum__rt__array_update_alias_count(%Array* %ctls__2, i64 -1) - call void @__quantum__rt__array_update_reference_count(%Array* %ctls__2, i64 -1) - %ctls__3 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) - %60 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ctls__3, i64 0) - %61 = bitcast i8* %60 to %Qubit** - store %Qubit* %c, %Qubit** %61 - call void @__quantum__rt__array_update_alias_count(%Array* %ctls__3, i64 1) - %62 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 1) - %63 = bitcast i8* %62 to %Qubit** - %qb__25 = load %Qubit*, %Qubit** %63 - call void @__quantum__qis__single_qubit_op_ctl(i64 10, i64 1, %Array* %ctls__3, %Qubit* %qb__25) - call void @__quantum__rt__array_update_alias_count(%Array* %ctls__3, i64 -1) - call void @__quantum__rt__array_update_reference_count(%Array* %ctls__3, i64 -1) - %ctls__4 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) - %64 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ctls__4, i64 0) - %65 = bitcast i8* %64 to %Qubit** - store %Qubit* %c, %Qubit** %65 - call void @__quantum__rt__array_update_alias_count(%Array* %ctls__4, i64 1) - %66 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 0) - %67 = bitcast i8* %66 to %Qubit** - %qb__26 = load %Qubit*, %Qubit** %67 - call void @__quantum__qis__single_qubit_op_ctl(i64 20, i64 1, %Array* %ctls__4, %Qubit* %qb__26) - call void @__quantum__rt__array_update_alias_count(%Array* %ctls__4, i64 -1) - call void @__quantum__rt__array_update_reference_count(%Array* %ctls__4, i64 -1) - %ctls__5 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) - %68 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ctls__5, i64 0) - %69 = bitcast i8* %68 to %Qubit** - store %Qubit* %c, %Qubit** %69 - call void @__quantum__rt__array_update_alias_count(%Array* %ctls__5, i64 1) - %70 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 1) - %71 = bitcast i8* %70 to %Qubit** - %qb__27 = load %Qubit*, %Qubit** %71 - call void @__quantum__qis__single_qubit_op_ctl(i64 22, i64 1, %Array* %ctls__5, %Qubit* %qb__27) - call void @__quantum__rt__array_update_alias_count(%Array* %ctls__5, i64 -1) - call void @__quantum__rt__array_update_reference_count(%Array* %ctls__5, i64 -1) - %ctls__6 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) - %72 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ctls__6, i64 0) - %73 = bitcast i8* %72 to %Qubit** - store %Qubit* %c, %Qubit** %73 - call void @__quantum__rt__array_update_alias_count(%Array* %ctls__6, i64 1) - %74 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 2) - %75 = bitcast i8* %74 to %Qubit** - %qb__28 = load %Qubit*, %Qubit** %75 - call void @__quantum__qis__single_qubit_op_ctl(i64 25, i64 1, %Array* %ctls__6, %Qubit* %qb__28) - call void @__quantum__rt__array_update_alias_count(%Array* %ctls__6, i64 -1) - call void @__quantum__rt__array_update_reference_count(%Array* %ctls__6, i64 -1) - %ctls__7 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) - %76 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ctls__7, i64 0) - %77 = bitcast i8* %76 to %Qubit** - store %Qubit* %c, %Qubit** %77 - call void @__quantum__rt__array_update_alias_count(%Array* %ctls__7, i64 1) - %78 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 1) - %79 = bitcast i8* %78 to %Qubit** - %qb__29 = load %Qubit*, %Qubit** %79 - call void @__quantum__rt__array_update_alias_count(%Array* %ctls__7, i64 1) - call void @__quantum__qis__single_qubit_op_ctl(i64 16, i64 1, %Array* %ctls__7, %Qubit* %qb__29) - call void @__quantum__rt__array_update_alias_count(%Array* %ctls__7, i64 -1) - call void @__quantum__rt__array_update_alias_count(%Array* %ctls__7, i64 -1) - call void @__quantum__rt__array_update_reference_count(%Array* %ctls__7, i64 -1) - %ctls__9 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) - %80 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ctls__9, i64 0) - %81 = bitcast i8* %80 to %Qubit** - store %Qubit* %c, %Qubit** %81 - call void @__quantum__rt__array_update_alias_count(%Array* %ctls__9, i64 1) - %82 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 2) - %83 = bitcast i8* %82 to %Qubit** - %qb__31 = load %Qubit*, %Qubit** %83 - call void @__quantum__rt__array_update_alias_count(%Array* %ctls__9, i64 1) - call void @__quantum__qis__single_qubit_op_ctl(i64 12, i64 1, %Array* %ctls__9, %Qubit* %qb__31) - call void @__quantum__rt__array_update_alias_count(%Array* %ctls__9, i64 -1) - call void @__quantum__rt__array_update_alias_count(%Array* %ctls__9, i64 -1) - call void @__quantum__rt__array_update_reference_count(%Array* %ctls__9, i64 -1) - call void @__quantum__rt__qubit_release(%Qubit* %c) - %cc = call %Array* @__quantum__rt__qubit_allocate_array(i64 2) - call void @__quantum__rt__array_update_alias_count(%Array* %cc, i64 1) - call void @__quantum__rt__array_update_alias_count(%Array* %cc, i64 1) - %84 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 0) - %85 = bitcast i8* %84 to %Qubit** - %qb__33 = load %Qubit*, %Qubit** %85 - %86 = call i64 @__quantum__rt__array_get_size_1d(%Array* %cc) - %87 = icmp eq i64 %86, 1 - br i1 %87, label %then0__4, label %else__4 + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %ctls) + %1 = icmp eq i64 %0, 1 + br i1 %1, label %then0__1, label %else__1 -then0__4: ; preds = %continue__3 - call void @__quantum__qis__single_qubit_op_ctl(i64 1, i64 1, %Array* %cc, %Qubit* %qb__33) - br label %continue__4 +then0__1: ; preds = %entry + call void @__quantum__qis__single_qubit_op_ctl(i64 1, i64 1, %Array* %ctls, %Qubit* %qb) + br label %continue__1 -else__4: ; preds = %continue__3 - call void @__quantum__qis__single_qubit_op_ctl(i64 2, i64 1, %Array* %cc, %Qubit* %qb__33) - br label %continue__4 +else__1: ; preds = %entry + call void @__quantum__qis__single_qubit_op_ctl(i64 2, i64 1, %Array* %ctls, %Qubit* %qb) + br label %continue__1 -continue__4: ; preds = %else__4, %then0__4 - call void @__quantum__rt__array_update_alias_count(%Array* %cc, i64 -1) - call void @__quantum__rt__array_update_alias_count(%Array* %cc, i64 1) - %88 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 0) - %89 = bitcast i8* %88 to %Qubit** - %qb__34 = load %Qubit*, %Qubit** %89 - %90 = icmp eq i64 %86, 1 - br i1 %90, label %then0__5, label %else__5 +continue__1: ; preds = %else__1, %then0__1 + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 -1) + ret void +} -then0__5: ; preds = %continue__4 - call void @__quantum__qis__single_qubit_op_ctl(i64 4, i64 1, %Array* %cc, %Qubit* %qb__34) - br label %continue__5 +define void @Microsoft__Quantum__Intrinsic__X__ctladj(%Array* %__controlQubits__, %Qubit* %qb) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i64 1) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i64 1) + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %__controlQubits__) + %1 = icmp eq i64 %0, 1 + br i1 %1, label %then0__1, label %else__1 -else__5: ; preds = %continue__4 - call void @__quantum__qis__single_qubit_op_ctl(i64 5, i64 1, %Array* %cc, %Qubit* %qb__34) - br label %continue__5 +then0__1: ; preds = %entry + call void @__quantum__qis__single_qubit_op_ctl(i64 1, i64 1, %Array* %__controlQubits__, %Qubit* %qb) + br label %continue__1 -continue__5: ; preds = %else__5, %then0__5 - call void @__quantum__rt__array_update_alias_count(%Array* %cc, i64 -1) - call void @__quantum__rt__array_update_alias_count(%Array* %cc, i64 1) - %91 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 1) - %92 = bitcast i8* %91 to %Qubit** - %qb__35 = load %Qubit*, %Qubit** %92 - %93 = icmp eq i64 %86, 1 - br i1 %93, label %then0__6, label %else__6 +else__1: ; preds = %entry + call void @__quantum__qis__single_qubit_op_ctl(i64 2, i64 1, %Array* %__controlQubits__, %Qubit* %qb) + br label %continue__1 -then0__6: ; preds = %continue__5 - call void @__quantum__qis__single_qubit_op_ctl(i64 7, i64 1, %Array* %cc, %Qubit* %qb__35) - br label %continue__6 +continue__1: ; preds = %else__1, %then0__1 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i64 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i64 -1) + ret void +} -else__6: ; preds = %continue__5 - call void @__quantum__qis__single_qubit_op_ctl(i64 8, i64 1, %Array* %cc, %Qubit* %qb__35) - br label %continue__6 +define void @Microsoft__Quantum__Intrinsic__Y__body(%Qubit* %qb) { +entry: + call void @__quantum__qis__single_qubit_op(i64 3, i64 1, %Qubit* %qb) + ret void +} -continue__6: ; preds = %else__6, %then0__6 - call void @__quantum__rt__array_update_alias_count(%Array* %cc, i64 -1) - call void @__quantum__rt__array_update_alias_count(%Array* %cc, i64 1) - %94 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 1) - %95 = bitcast i8* %94 to %Qubit** - %qb__36 = load %Qubit*, %Qubit** %95 - call void @__quantum__qis__single_qubit_op_ctl(i64 10, i64 1, %Array* %cc, %Qubit* %qb__36) - call void @__quantum__rt__array_update_alias_count(%Array* %cc, i64 -1) - call void @__quantum__rt__array_update_alias_count(%Array* %cc, i64 1) - %96 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 0) - %97 = bitcast i8* %96 to %Qubit** - %qb__37 = load %Qubit*, %Qubit** %97 - call void @__quantum__qis__single_qubit_op_ctl(i64 20, i64 1, %Array* %cc, %Qubit* %qb__37) - call void @__quantum__rt__array_update_alias_count(%Array* %cc, i64 -1) - call void @__quantum__rt__array_update_alias_count(%Array* %cc, i64 1) - %98 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 1) - %99 = bitcast i8* %98 to %Qubit** - %qb__38 = load %Qubit*, %Qubit** %99 - call void @__quantum__qis__single_qubit_op_ctl(i64 22, i64 1, %Array* %cc, %Qubit* %qb__38) - call void @__quantum__rt__array_update_alias_count(%Array* %cc, i64 -1) - call void @__quantum__rt__array_update_alias_count(%Array* %cc, i64 1) - %100 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 2) - %101 = bitcast i8* %100 to %Qubit** - %qb__39 = load %Qubit*, %Qubit** %101 - call void @__quantum__qis__single_qubit_op_ctl(i64 25, i64 1, %Array* %cc, %Qubit* %qb__39) - call void @__quantum__rt__array_update_alias_count(%Array* %cc, i64 -1) - call void @__quantum__rt__array_update_alias_count(%Array* %cc, i64 1) - %102 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 1) - %103 = bitcast i8* %102 to %Qubit** - %qb__40 = load %Qubit*, %Qubit** %103 - call void @__quantum__rt__array_update_alias_count(%Array* %cc, i64 1) - call void @__quantum__qis__single_qubit_op_ctl(i64 16, i64 1, %Array* %cc, %Qubit* %qb__40) - call void @__quantum__rt__array_update_alias_count(%Array* %cc, i64 -1) - call void @__quantum__rt__array_update_alias_count(%Array* %cc, i64 -1) - call void @__quantum__rt__array_update_alias_count(%Array* %cc, i64 1) - %104 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 2) - %105 = bitcast i8* %104 to %Qubit** - %qb__42 = load %Qubit*, %Qubit** %105 - call void @__quantum__rt__array_update_alias_count(%Array* %cc, i64 1) - call void @__quantum__qis__single_qubit_op_ctl(i64 12, i64 1, %Array* %cc, %Qubit* %qb__42) - call void @__quantum__rt__array_update_alias_count(%Array* %cc, i64 -1) - call void @__quantum__rt__array_update_alias_count(%Array* %cc, i64 -1) - call void @__quantum__rt__qubit_release_array(%Array* %cc) - call void @__quantum__rt__array_update_alias_count(%Array* %cc, i64 -1) - call void @__quantum__rt__array_update_reference_count(%Array* %cc, i64 -1) - call void @__quantum__rt__qubit_release_array(%Array* %qs) - call void @__quantum__rt__array_update_alias_count(%Array* %qs, i64 -1) - call void @__quantum__rt__array_update_reference_count(%Array* %qs, i64 -1) +define void @Microsoft__Quantum__Intrinsic__Y__adj(%Qubit* %qb) { +entry: + call void @__quantum__qis__single_qubit_op(i64 3, i64 1, %Qubit* %qb) ret void } -declare %Qubit* @__quantum__rt__qubit_allocate() +define void @Microsoft__Quantum__Intrinsic__Y__ctl(%Array* %ctls, %Qubit* %qb) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 1) + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %ctls) + %1 = icmp eq i64 %0, 1 + br i1 %1, label %then0__1, label %else__1 -declare %Array* @__quantum__rt__qubit_allocate_array(i64) +then0__1: ; preds = %entry + call void @__quantum__qis__single_qubit_op_ctl(i64 4, i64 1, %Array* %ctls, %Qubit* %qb) + br label %continue__1 -declare void @__quantum__qis__inject_barrier(i64, i64) +else__1: ; preds = %entry + call void @__quantum__qis__single_qubit_op_ctl(i64 5, i64 1, %Array* %ctls, %Qubit* %qb) + br label %continue__1 -declare void @__quantum__rt__qubit_release(%Qubit*) +continue__1: ; preds = %else__1, %then0__1 + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 -1) + ret void +} -declare void @__quantum__rt__qubit_release_array(%Array*) +define void @Microsoft__Quantum__Intrinsic__Y__ctladj(%Array* %__controlQubits__, %Qubit* %qb) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i64 1) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i64 1) + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %__controlQubits__) + %1 = icmp eq i64 %0, 1 + br i1 %1, label %then0__1, label %else__1 -define void @Microsoft__Quantum__Testing__Tracer__TestMeasurements__body(i1 %compare) { +then0__1: ; preds = %entry + call void @__quantum__qis__single_qubit_op_ctl(i64 4, i64 1, %Array* %__controlQubits__, %Qubit* %qb) + br label %continue__1 + +else__1: ; preds = %entry + call void @__quantum__qis__single_qubit_op_ctl(i64 5, i64 1, %Array* %__controlQubits__, %Qubit* %qb) + br label %continue__1 + +continue__1: ; preds = %else__1, %then0__1 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i64 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i64 -1) + ret void +} + +define void @Microsoft__Quantum__Intrinsic__Z__body(%Qubit* %qb) { entry: - %qs = call %Array* @__quantum__rt__qubit_allocate_array(i64 3) - call void @__quantum__rt__array_update_alias_count(%Array* %qs, i64 1) - %0 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 0) - %1 = bitcast i8* %0 to %Qubit** - %qb = load %Qubit*, %Qubit** %1 - %r0 = call %Result* @__quantum__qis__single_qubit_measure(i64 100, i64 1, %Qubit* %qb) - %qs12 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 2) - %2 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs12, i64 0) - %3 = bitcast i8* %2 to %Qubit** - %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs12, i64 1) - %5 = bitcast i8* %4 to %Qubit** - %6 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 1) - %7 = bitcast i8* %6 to %Qubit** - %8 = load %Qubit*, %Qubit** %7 - %9 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 2) - %10 = bitcast i8* %9 to %Qubit** - %11 = load %Qubit*, %Qubit** %10 - store %Qubit* %8, %Qubit** %3 - store %Qubit* %11, %Qubit** %5 - call void @__quantum__rt__array_update_alias_count(%Array* %qs12, i64 1) - %paulis = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 2) - %12 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 0) - %13 = bitcast i8* %12 to i2* - %14 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 1) - %15 = bitcast i8* %14 to i2* - %16 = load i2, i2* @PauliY - %17 = load i2, i2* @PauliX - store i2 %16, i2* %13 - store i2 %17, i2* %15 - call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i64 1) - call void @__quantum__rt__array_update_alias_count(%Array* %qs12, i64 1) - %18 = load %Result*, %Result** @ResultOne - %res = alloca %Result* - store %Result* %18, %Result** %res - call void @__quantum__rt__result_update_reference_count(%Result* %18, i64 1) - %haveY = alloca i1 - store i1 false, i1* %haveY - br label %header__1 + call void @__quantum__qis__single_qubit_op(i64 6, i64 1, %Qubit* %qb) + ret void +} -header__1: ; preds = %exiting__1, %entry - %i = phi i64 [ 0, %entry ], [ %31, %exiting__1 ] - %19 = icmp sle i64 %i, 1 - br i1 %19, label %body__1, label %exit__1 +define void @Microsoft__Quantum__Intrinsic__Z__adj(%Qubit* %qb) { +entry: + call void @__quantum__qis__single_qubit_op(i64 6, i64 1, %Qubit* %qb) + ret void +} -body__1: ; preds = %header__1 - %20 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 %i) - %21 = bitcast i8* %20 to i2* - %22 = load i2, i2* %21 - %23 = load i2, i2* @PauliY - %24 = icmp eq i2 %22, %23 - %25 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 %i) - %26 = bitcast i8* %25 to i2* - %27 = load i2, i2* %26 - %28 = load i2, i2* @PauliI - %29 = icmp eq i2 %27, %28 - %30 = or i1 %24, %29 - br i1 %30, label %then0__1, label %continue__1 +define void @Microsoft__Quantum__Intrinsic__Z__ctl(%Array* %ctls, %Qubit* %qb) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 1) + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %ctls) + %1 = icmp eq i64 %0, 1 + br i1 %1, label %then0__1, label %else__1 -then0__1: ; preds = %body__1 - store i1 true, i1* %haveY +then0__1: ; preds = %entry + call void @__quantum__qis__single_qubit_op_ctl(i64 7, i64 1, %Array* %ctls, %Qubit* %qb) br label %continue__1 -continue__1: ; preds = %then0__1, %body__1 - br label %exiting__1 +else__1: ; preds = %entry + call void @__quantum__qis__single_qubit_op_ctl(i64 8, i64 1, %Array* %ctls, %Qubit* %qb) + br label %continue__1 -exiting__1: ; preds = %continue__1 - %31 = add i64 %i, 1 - br label %header__1 +continue__1: ; preds = %else__1, %then0__1 + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 -1) + ret void +} -exit__1: ; preds = %header__1 - %32 = load i1, i1* %haveY - br i1 %32, label %then0__2, label %test1__1 +define void @Microsoft__Quantum__Intrinsic__Z__ctladj(%Array* %__controlQubits__, %Qubit* %qb) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i64 1) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i64 1) + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %__controlQubits__) + %1 = icmp eq i64 %0, 1 + br i1 %1, label %then0__1, label %else__1 -then0__2: ; preds = %exit__1 - %33 = call %Result* @__quantum__qis__joint_measure(i64 106, i64 1, %Array* %qs12) - call void @__quantum__rt__result_update_reference_count(%Result* %33, i64 1) - store %Result* %33, %Result** %res - call void @__quantum__rt__result_update_reference_count(%Result* %33, i64 -1) - call void @__quantum__rt__result_update_reference_count(%Result* %18, i64 -1) - br label %continue__2 +then0__1: ; preds = %entry + call void @__quantum__qis__single_qubit_op_ctl(i64 7, i64 1, %Array* %__controlQubits__, %Qubit* %qb) + br label %continue__1 -test1__1: ; preds = %exit__1 - br i1 false, label %then1__1, label %test2__1 +else__1: ; preds = %entry + call void @__quantum__qis__single_qubit_op_ctl(i64 8, i64 1, %Array* %__controlQubits__, %Qubit* %qb) + br label %continue__1 + +continue__1: ; preds = %else__1, %then0__1 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i64 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i64 -1) + ret void +} + +define %Tuple* @Microsoft__Quantum__Core__Attribute__body() { +entry: + ret %Tuple* null +} + +define %Tuple* @Microsoft__Quantum__Core__EntryPoint__body() { +entry: + ret %Tuple* null +} + +define %Tuple* @Microsoft__Quantum__Core__Inline__body() { +entry: + ret %Tuple* null +} + +define %Result* @Microsoft__Quantum__Instructions__Mx__body(%Qubit* %qb) { +entry: + %0 = call %Result* @__quantum__qis__single_qubit_measure(i64 101, i64 1, %Qubit* %qb) + ret %Result* %0 +} + +define %Result* @Microsoft__Quantum__Instructions__Mxx__body(%Array* %qubits) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i64 1) + %0 = call %Result* @__quantum__qis__joint_measure(i64 105, i64 1, %Array* %qubits) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i64 -1) + ret %Result* %0 +} -then1__1: ; preds = %test1__1 - %34 = call %Result* @__quantum__qis__joint_measure(i64 107, i64 1, %Array* %qs12) - call void @__quantum__rt__result_update_reference_count(%Result* %34, i64 1) - %35 = load %Result*, %Result** %res - store %Result* %34, %Result** %res - call void @__quantum__rt__result_update_reference_count(%Result* %34, i64 -1) - call void @__quantum__rt__result_update_reference_count(%Result* %35, i64 -1) - br label %continue__2 +define %Result* @Microsoft__Quantum__Instructions__Mxz__body(%Array* %qubits) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i64 1) + %0 = call %Result* @__quantum__qis__joint_measure(i64 103, i64 1, %Array* %qubits) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i64 -1) + ret %Result* %0 +} -test2__1: ; preds = %test1__1 - br i1 false, label %then2__1, label %test3__1 +define %Result* @Microsoft__Quantum__Instructions__Mz__body(%Qubit* %qb) { +entry: + %0 = call %Result* @__quantum__qis__single_qubit_measure(i64 100, i64 1, %Qubit* %qb) + ret %Result* %0 +} -then2__1: ; preds = %test2__1 - %36 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 0) - %37 = bitcast i8* %36 to i2* - %38 = load i2, i2* %37 - %39 = load i2, i2* @PauliX - %40 = icmp eq i2 %38, %39 - br i1 %40, label %then0__3, label %else__1 +define %Result* @Microsoft__Quantum__Instructions__Mzx__body(%Array* %qubits) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i64 1) + %0 = call %Result* @__quantum__qis__joint_measure(i64 104, i64 1, %Array* %qubits) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i64 -1) + ret %Result* %0 +} -then0__3: ; preds = %then2__1 - %41 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs12, i64 0) - %42 = bitcast i8* %41 to %Qubit** - %qb__2 = load %Qubit*, %Qubit** %42 - %43 = call %Result* @__quantum__qis__single_qubit_measure(i64 101, i64 1, %Qubit* %qb__2) - call void @__quantum__rt__result_update_reference_count(%Result* %43, i64 1) - %44 = load %Result*, %Result** %res - store %Result* %43, %Result** %res - call void @__quantum__rt__result_update_reference_count(%Result* %43, i64 -1) - call void @__quantum__rt__result_update_reference_count(%Result* %44, i64 -1) - br label %continue__3 +define %Result* @Microsoft__Quantum__Instructions__Mzz__body(%Array* %qubits) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i64 1) + %0 = call %Result* @__quantum__qis__joint_measure(i64 102, i64 1, %Array* %qubits) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i64 -1) + ret %Result* %0 +} -else__1: ; preds = %then2__1 - %45 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs12, i64 0) - %46 = bitcast i8* %45 to %Qubit** - %qb__3 = load %Qubit*, %Qubit** %46 - %47 = call %Result* @__quantum__qis__single_qubit_measure(i64 100, i64 1, %Qubit* %qb__3) - call void @__quantum__rt__result_update_reference_count(%Result* %47, i64 1) - %48 = load %Result*, %Result** %res - store %Result* %47, %Result** %res - call void @__quantum__rt__result_update_reference_count(%Result* %47, i64 -1) - call void @__quantum__rt__result_update_reference_count(%Result* %48, i64 -1) - br label %continue__3 +define void @Microsoft__Quantum__Instructions__Sx__body(%Qubit* %qb) { +entry: + call void @__quantum__qis__single_qubit_op(i64 17, i64 1, %Qubit* %qb) + ret void +} -continue__3: ; preds = %else__1, %then0__3 - br label %continue__2 +define void @Microsoft__Quantum__Instructions__Sx__adj(%Qubit* %qb) { +entry: + call void @__quantum__qis__single_qubit_op(i64 17, i64 1, %Qubit* %qb) + ret void +} -test3__1: ; preds = %test2__1 - %49 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 0) - %50 = bitcast i8* %49 to i2* - %51 = load i2, i2* %50 - %52 = load i2, i2* @PauliX - %53 = icmp eq i2 %51, %52 - %54 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 1) - %55 = bitcast i8* %54 to i2* - %56 = load i2, i2* %55 - %57 = load i2, i2* @PauliX - %58 = icmp eq i2 %56, %57 - %59 = and i1 %53, %58 - br i1 %59, label %then3__1, label %test4__1 +define void @Microsoft__Quantum__Instructions__Sx__ctl(%Array* %ctls, %Qubit* %qb) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 1) + call void @__quantum__qis__single_qubit_op_ctl(i64 18, i64 1, %Array* %ctls, %Qubit* %qb) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 -1) + ret void +} -then3__1: ; preds = %test3__1 - %60 = call %Result* @__quantum__qis__joint_measure(i64 108, i64 1, %Array* %qs12) - call void @__quantum__rt__result_update_reference_count(%Result* %60, i64 1) - %61 = load %Result*, %Result** %res - store %Result* %60, %Result** %res - call void @__quantum__rt__result_update_reference_count(%Result* %60, i64 -1) - call void @__quantum__rt__result_update_reference_count(%Result* %61, i64 -1) - br label %continue__2 +define void @Microsoft__Quantum__Instructions__Sx__ctladj(%Array* %ctls, %Qubit* %qb) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 1) + call void @__quantum__qis__single_qubit_op_ctl(i64 18, i64 1, %Array* %ctls, %Qubit* %qb) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 -1) + ret void +} -test4__1: ; preds = %test3__1 - %62 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 0) - %63 = bitcast i8* %62 to i2* - %64 = load i2, i2* %63 - %65 = load i2, i2* @PauliX - %66 = icmp eq i2 %64, %65 - %67 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 1) - %68 = bitcast i8* %67 to i2* - %69 = load i2, i2* %68 - %70 = load i2, i2* @PauliZ - %71 = icmp eq i2 %69, %70 - %72 = and i1 %66, %71 - br i1 %72, label %then4__1, label %test5__1 +define void @Microsoft__Quantum__Instructions__Sz__body(%Qubit* %qb) { +entry: + call void @__quantum__qis__single_qubit_op(i64 15, i64 1, %Qubit* %qb) + ret void +} -then4__1: ; preds = %test4__1 - %73 = call %Result* @__quantum__qis__joint_measure(i64 109, i64 1, %Array* %qs12) - call void @__quantum__rt__result_update_reference_count(%Result* %73, i64 1) - %74 = load %Result*, %Result** %res - store %Result* %73, %Result** %res - call void @__quantum__rt__result_update_reference_count(%Result* %73, i64 -1) - call void @__quantum__rt__result_update_reference_count(%Result* %74, i64 -1) - br label %continue__2 +define void @Microsoft__Quantum__Instructions__Sz__adj(%Qubit* %qb) { +entry: + call void @__quantum__qis__single_qubit_op(i64 15, i64 1, %Qubit* %qb) + ret void +} -test5__1: ; preds = %test4__1 - %75 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 0) - %76 = bitcast i8* %75 to i2* - %77 = load i2, i2* %76 - %78 = load i2, i2* @PauliZ - %79 = icmp eq i2 %77, %78 - %80 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 1) - %81 = bitcast i8* %80 to i2* - %82 = load i2, i2* %81 - %83 = load i2, i2* @PauliX - %84 = icmp eq i2 %82, %83 - %85 = and i1 %79, %84 - br i1 %85, label %then5__1, label %test6__1 +define void @Microsoft__Quantum__Instructions__Sz__ctl(%Array* %ctls, %Qubit* %qb) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 1) + call void @__quantum__qis__single_qubit_op_ctl(i64 16, i64 1, %Array* %ctls, %Qubit* %qb) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 -1) + ret void +} -then5__1: ; preds = %test5__1 - %86 = call %Result* @__quantum__qis__joint_measure(i64 110, i64 1, %Array* %qs12) - call void @__quantum__rt__result_update_reference_count(%Result* %86, i64 1) - %87 = load %Result*, %Result** %res - store %Result* %86, %Result** %res - call void @__quantum__rt__result_update_reference_count(%Result* %86, i64 -1) - call void @__quantum__rt__result_update_reference_count(%Result* %87, i64 -1) - br label %continue__2 +define void @Microsoft__Quantum__Instructions__Sz__ctladj(%Array* %ctls, %Qubit* %qb) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 1) + call void @__quantum__qis__single_qubit_op_ctl(i64 16, i64 1, %Array* %ctls, %Qubit* %qb) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 -1) + ret void +} -test6__1: ; preds = %test5__1 - %88 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 0) - %89 = bitcast i8* %88 to i2* - %90 = load i2, i2* %89 - %91 = load i2, i2* @PauliZ - %92 = icmp eq i2 %90, %91 - %93 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 1) - %94 = bitcast i8* %93 to i2* - %95 = load i2, i2* %94 - %96 = load i2, i2* @PauliZ - %97 = icmp eq i2 %95, %96 - %98 = and i1 %92, %97 - br i1 %98, label %then6__1, label %continue__2 +define void @Microsoft__Quantum__Instructions__Tx__body(%Qubit* %qb) { +entry: + call void @__quantum__qis__single_qubit_op(i64 13, i64 1, %Qubit* %qb) + ret void +} -then6__1: ; preds = %test6__1 - %99 = call %Result* @__quantum__qis__joint_measure(i64 111, i64 1, %Array* %qs12) - call void @__quantum__rt__result_update_reference_count(%Result* %99, i64 1) - %100 = load %Result*, %Result** %res - store %Result* %99, %Result** %res - call void @__quantum__rt__result_update_reference_count(%Result* %99, i64 -1) - call void @__quantum__rt__result_update_reference_count(%Result* %100, i64 -1) - br label %continue__2 +define void @Microsoft__Quantum__Instructions__Tx__adj(%Qubit* %qb) { +entry: + call void @__quantum__qis__single_qubit_op(i64 13, i64 1, %Qubit* %qb) + ret void +} -continue__2: ; preds = %then6__1, %test6__1, %then5__1, %then4__1, %then3__1, %continue__3, %then1__1, %then0__2 - %r12 = load %Result*, %Result** %res - call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i64 -1) - call void @__quantum__rt__array_update_alias_count(%Array* %qs12, i64 -1) - call void @__quantum__rt__array_update_reference_count(%Array* %paulis, i64 -1) - br i1 %compare, label %then0__4, label %continue__4 +define void @Microsoft__Quantum__Instructions__Tx__ctl(%Array* %ctls, %Qubit* %qb) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 1) + call void @__quantum__qis__single_qubit_op_ctl(i64 14, i64 1, %Array* %ctls, %Qubit* %qb) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 -1) + ret void +} -then0__4: ; preds = %continue__2 - %101 = load %Result*, %Result** @ResultZero - %102 = call i1 @__quantum__rt__result_equal(%Result* %r0, %Result* %101) - br i1 %102, label %then0__5, label %continue__5 +define void @Microsoft__Quantum__Instructions__Tx__ctladj(%Array* %ctls, %Qubit* %qb) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 1) + call void @__quantum__qis__single_qubit_op_ctl(i64 14, i64 1, %Array* %ctls, %Qubit* %qb) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 -1) + ret void +} -then0__5: ; preds = %then0__4 - %103 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 1) - %104 = bitcast i8* %103 to %Qubit** - %qb__4 = load %Qubit*, %Qubit** %104 - call void @__quantum__qis__single_qubit_op(i64 0, i64 1, %Qubit* %qb__4) - br label %continue__5 +define void @Microsoft__Quantum__Instructions__Tz__body(%Qubit* %qb) { +entry: + call void @__quantum__qis__single_qubit_op(i64 11, i64 1, %Qubit* %qb) + ret void +} -continue__5: ; preds = %then0__5, %then0__4 - br label %continue__4 +define void @Microsoft__Quantum__Instructions__Tz__adj(%Qubit* %qb) { +entry: + call void @__quantum__qis__single_qubit_op(i64 11, i64 1, %Qubit* %qb) + ret void +} -continue__4: ; preds = %continue__5, %continue__2 - call void @__quantum__rt__qubit_release_array(%Array* %qs) - call void @__quantum__rt__array_update_alias_count(%Array* %qs, i64 -1) - call void @__quantum__rt__array_update_alias_count(%Array* %qs12, i64 -1) - call void @__quantum__rt__array_update_reference_count(%Array* %qs, i64 -1) - call void @__quantum__rt__result_update_reference_count(%Result* %r0, i64 -1) - call void @__quantum__rt__array_update_reference_count(%Array* %qs12, i64 -1) - call void @__quantum__rt__result_update_reference_count(%Result* %r12, i64 -1) +define void @Microsoft__Quantum__Instructions__Tz__ctl(%Array* %ctls, %Qubit* %qb) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 1) + call void @__quantum__qis__single_qubit_op_ctl(i64 12, i64 1, %Array* %ctls, %Qubit* %qb) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 -1) ret void } -declare i1 @__quantum__rt__result_equal(%Result*, %Result*) +define void @Microsoft__Quantum__Instructions__Tz__ctladj(%Array* %ctls, %Qubit* %qb) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 1) + call void @__quantum__qis__single_qubit_op_ctl(i64 12, i64 1, %Array* %ctls, %Qubit* %qb) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 -1) + ret void +} define { %String* }* @Microsoft__Quantum__Targeting__TargetInstruction__body(%String* %__Item1__) { entry: %0 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64)) %1 = bitcast %Tuple* %0 to { %String* }* - %2 = getelementptr { %String* }, { %String* }* %1, i64 0, i32 0 + %2 = getelementptr inbounds { %String* }, { %String* }* %1, i32 0, i32 0 store %String* %__Item1__, %String** %2 call void @__quantum__rt__string_update_reference_count(%String* %__Item1__, i64 1) ret { %String* }* %1 } -declare %Tuple* @__quantum__rt__tuple_create(i64) - declare void @__quantum__rt__string_update_reference_count(%String*, i64) diff --git a/src/QirRuntime/test/QIR-tracer/tracer-target.qs b/src/QirRuntime/test/QIR-tracer/tracer-target.qs index 19f7d51abde..f40b8f9aa4d 100644 --- a/src/QirRuntime/test/QIR-tracer/tracer-target.qs +++ b/src/QirRuntime/test/QIR-tracer/tracer-target.qs @@ -3,26 +3,35 @@ namespace Microsoft.Quantum.Instructions { + // We'll use TargetInstruction attribute to suppress Q#'s compiler decoration of names for the generated callbacks. + open Microsoft.Quantum.Targeting; + + @TargetInstruction("single_qubit_op") operation single_qubit_op(op_id: Int, duration: Int, qb : Qubit) : Unit { body intrinsic; } + @TargetInstruction("multi_qubit_op") operation multi_qubit_op(op_id: Int, duration: Int, qbs : Qubit[]) : Unit { body intrinsic; } + @TargetInstruction("single_qubit_op_ctl") operation single_qubit_op_ctl(op_id: Int, duration: Int, ctl : Qubit[], qb : Qubit) : Unit { body intrinsic; } + @TargetInstruction("multi_qubit_op_ctl") operation multi_qubit_op_ctl(op_id: Int, duration: Int, ctl : Qubit[], qbs : Qubit[]) : Unit { body intrinsic; } + @TargetInstruction("single_qubit_measure") operation single_qubit_measure(op_id: Int, duration: Int, qb : Qubit) : Result { body intrinsic; } + @TargetInstruction("joint_measure") operation joint_measure(op_id: Int, duration: Int, qbs : Qubit[]) : Result { body intrinsic; } @@ -98,7 +107,9 @@ namespace Microsoft.Quantum.Instructions { namespace Microsoft.Quantum.Tracer { - @TargetInstruction("inject_global_barrier") + open Microsoft.Quantum.Targeting; + + @TargetInstruction("inject_barrier") operation Barrier(id : Int, duration : Int) : Unit { body intrinsic; } @@ -125,7 +136,7 @@ namespace Microsoft.Quantum.Intrinsic { is Adj + Ctl { body (...) { Controlled X([control], target); } adjoint self; - controlled (ctls, ...) { Controlled X(ctls + control, target); } + controlled (ctls, ...) { Controlled X(ctls + [control], target); } } @Inline() @@ -205,7 +216,7 @@ namespace Microsoft.Quantum.Intrinsic { @Inline() operation M(qb : Qubit) : Result { - body (...) { return Phyz.Mz(qb); } + body (...) { return Phys.Mz(qb); } } @Inline() @@ -226,8 +237,8 @@ namespace Microsoft.Quantum.Intrinsic { // Single qubit measurement -- differentiate between Mx and Mz elif Length(paulis) == 1 { - if (paulis[0] == PauliX) { set res = Mx(qubits[0]); } - else { set res = Mz(qubits[0]); } + if (paulis[0] == PauliX) { set res = Phys.Mx(qubits[0]); } + else { set res = Phys.Mz(qubits[0]); } } // Specialize for two-qubit measurements: Mxx, Mxz, Mzx, Mzz From 80d7a507b7aaec9154dfbac2964996dba671f480 Mon Sep 17 00:00:00 2001 From: Robin Kuzmin Date: Thu, 25 Feb 2021 18:29:07 -0800 Subject: [PATCH 15/30] Added Parity() to the Q# library, and the tests. (#531) * Cleanup, adding Parity(). * Added Parity(). * CR changes. * CR changes. --- AdvantageBenchmark/privateBuild/host.csproj | 2 +- .../releasedBuild/quantum/quantum.csproj | 2 +- src/QirRuntime/test/QIR-static/CMakeLists.txt | 1 + .../test/QIR-static/qir-test-other.cpp | 11 +++++ .../test/QIR-static/qsharp/qir-test-arrays.qs | 2 + .../test/QIR-static/qsharp/qir-test-math.qs | 44 ------------------- .../test/QIR-static/qsharp/qir-test-other.qs | 27 ++++++++++++ .../QSharpFoundation/Bitwise/Bitwise.qs | 32 +++++++++++++- 8 files changed, 73 insertions(+), 48 deletions(-) create mode 100644 src/QirRuntime/test/QIR-static/qir-test-other.cpp create mode 100644 src/QirRuntime/test/QIR-static/qsharp/qir-test-other.qs diff --git a/AdvantageBenchmark/privateBuild/host.csproj b/AdvantageBenchmark/privateBuild/host.csproj index 0af33ce55d9..c38a2e4750d 100644 --- a/AdvantageBenchmark/privateBuild/host.csproj +++ b/AdvantageBenchmark/privateBuild/host.csproj @@ -1,4 +1,4 @@ - + diff --git a/AdvantageBenchmark/releasedBuild/quantum/quantum.csproj b/AdvantageBenchmark/releasedBuild/quantum/quantum.csproj index e89bfdce2bb..d92408e6a0e 100644 --- a/AdvantageBenchmark/releasedBuild/quantum/quantum.csproj +++ b/AdvantageBenchmark/releasedBuild/quantum/quantum.csproj @@ -1,4 +1,4 @@ - + diff --git a/src/QirRuntime/test/QIR-static/CMakeLists.txt b/src/QirRuntime/test/QIR-static/CMakeLists.txt index 9fc202a931a..f82e0b7a928 100644 --- a/src/QirRuntime/test/QIR-static/CMakeLists.txt +++ b/src/QirRuntime/test/QIR-static/CMakeLists.txt @@ -21,6 +21,7 @@ add_executable(qir-static-tests qir-test-math.cpp qir-test-strings.cpp qir-test-ouput.cpp + qir-test-other.cpp ) target_link_libraries(qir-static-tests PUBLIC diff --git a/src/QirRuntime/test/QIR-static/qir-test-other.cpp b/src/QirRuntime/test/QIR-static/qir-test-other.cpp new file mode 100644 index 00000000000..41bd6b4b530 --- /dev/null +++ b/src/QirRuntime/test/QIR-static/qir-test-other.cpp @@ -0,0 +1,11 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +#include "catch.hpp" + +extern "C" uint64_t Microsoft__Quantum__Testing__QIR__Other__ParityTest__body(); // NOLINT + +TEST_CASE("QIR: Other.Parity", "[qir.Other][qir.Other.Parity]") +{ + REQUIRE(0 == Microsoft__Quantum__Testing__QIR__Other__ParityTest__body()); +} diff --git a/src/QirRuntime/test/QIR-static/qsharp/qir-test-arrays.qs b/src/QirRuntime/test/QIR-static/qsharp/qir-test-arrays.qs index 58ced577cd5..195e9905ec1 100644 --- a/src/QirRuntime/test/QIR-static/qsharp/qir-test-arrays.qs +++ b/src/QirRuntime/test/QIR-static/qsharp/qir-test-arrays.qs @@ -2,6 +2,7 @@ // Licensed under the MIT License. namespace Microsoft.Quantum.Testing.QIR { + open Microsoft.Quantum.Testing.QIR.Other; open Microsoft.Quantum.Testing.QIR.Math; open Microsoft.Quantum.Testing.QIR.Str; open Microsoft.Quantum.Testing.QIR.Out; @@ -53,6 +54,7 @@ namespace Microsoft.Quantum.Testing.QIR { let res16 = ArcSinTest(); let res17 = ArcCosTest(); let res18 = ArcTanTest(); + let res19 = ParityTest(); MessageTest("Test"); // Conditionals: diff --git a/src/QirRuntime/test/QIR-static/qsharp/qir-test-math.qs b/src/QirRuntime/test/QIR-static/qsharp/qir-test-math.qs index b89d8777f8a..137718d76cd 100644 --- a/src/QirRuntime/test/QIR-static/qsharp/qir-test-math.qs +++ b/src/QirRuntime/test/QIR-static/qsharp/qir-test-math.qs @@ -273,49 +273,6 @@ namespace Microsoft.Quantum.Testing.QIR.Math { return 0; } - // Remove when the Q# compiler bug https://github.com/microsoft/qsharp-compiler/issues/877 is resolved: - //function MyRound(value : Double) : Int { // 4.x 4.5 5.x 5.5 -4.x -4.5 -5.x -5.5 - // - // // Temporary piece of code to test a Q# compiler bug: - // - // let truncated = Truncate(value); // 4 4 5 5 -4 -4 -5 -5 - // if truncated >= 0 { - // let diff = value - IntAsDouble(truncated); // 0.x 0.5 0.x 0.5 diff - // if diff < 0.5 { return truncated; } // 4 5 return - // if diff > 0.5 { return (truncated + 1); } // 5 6 return - // if truncated % 2 == 0 { return truncated; } // 4 return - // else { return truncated + 1; } // 6 return - // } - // else { - // let diff = IntAsDouble(truncated) - value; // 0.x 0.5 0.x 0.5 diff - // if diff < 0.5 { return truncated; } // -4 -5 - // if diff > 0.5 { return (truncated - 1); } // -5 -6 - // if truncated % 2 == 0 { return truncated; } // -4 - // else { return truncated - 1; } // -6 - // } - // - // // End of temporary piece of code. - // - // - // // Temporary piece of code to work around the clang++ crash upon `Round()` (resolved in `0.15.2102129370-alpha`): - // - // //let truncated = Truncate(value); // 4 4 5 5 -4 -4 -5 -5 - // //if truncated >= 0 { - // // let diff = value - IntAsDouble(truncated); // 0.x 0.5 0.x 0.5 diff - // // if diff < 0.5 { return truncated; } // 4 5 return - // // if diff > 0.5 { return (truncated + 1); } // 5 6 return - // // if truncated % 2 == 0 { return truncated; } // 4 return - // // return truncated + 1; // 6 return - // //} - // //let diff2 = IntAsDouble(truncated) - value; // 0.x 0.5 0.x 0.5 diff - // //if diff2 < 0.5 { return truncated; } // -4 -5 - // //if diff2 > 0.5 { return (truncated - 1); } // -5 -6 - // //if truncated % 2 == 0 { return truncated; } // -4 - // //return truncated - 1; // -6 - // - // // End of temporary piece of code to work around the clang++ crash. - //} - function IeeeRemainderTest() : Int { // function IeeeRemainder(x : Double, y : Double) : Double @@ -332,7 +289,6 @@ namespace Microsoft.Quantum.Testing.QIR.Math { // the `remainder` and `IEEERemainder()` calculated below. // That is normal but we avoid that. let remainder = dividend - (divisor * IntAsDouble(Round(dividend / divisor))); - //MyRound(dividend / divisor))); // Remove when the https://github.com/microsoft/qsharp-compiler/issues/877 is resolved. if not Close(remainder, IEEERemainder(dividend, divisor)) { Message(DoubleAsString(remainder)); // The output for the test faiulure analysis, Message(DoubleAsString(dividend)); // if the failure happens. diff --git a/src/QirRuntime/test/QIR-static/qsharp/qir-test-other.qs b/src/QirRuntime/test/QIR-static/qsharp/qir-test-other.qs new file mode 100644 index 00000000000..efb30081098 --- /dev/null +++ b/src/QirRuntime/test/QIR-static/qsharp/qir-test-other.qs @@ -0,0 +1,27 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +namespace Microsoft.Quantum.Testing.QIR.Other { + + open Microsoft.Quantum.Bitwise; + + function ParityTest() : Int { + //function Parity (a : Int) : Int + if 0 != Parity(0) { return 1; } + if 1 != Parity(1) { return 2; } + if 1 != Parity(2) { return 3; } + if 0 != Parity(3) { return 4; } + if 0 != Parity(0xFF) { return 5; } + if 1 != Parity(0x100) { return 6; } + if 0 != Parity(0xFFFF) { return 7; } + if 1 != Parity(0x10000) { return 8; } + if 1 != Parity(0x7F00000000000000) { return 9; } + if 0 != Parity(0x0F00000000000000) { return 10; } + + if 0 != Parity(-1) { return 11; } // 0xFFFFFFFFFFFFFFFF + if 1 != Parity(-2) { return 12; } // 0xFFFFFFFFFFFFFFFE + + return 0; + } + +} diff --git a/src/Simulation/QSharpFoundation/Bitwise/Bitwise.qs b/src/Simulation/QSharpFoundation/Bitwise/Bitwise.qs index 95b98b4e2d5..8e2b5a40215 100644 --- a/src/Simulation/QSharpFoundation/Bitwise/Bitwise.qs +++ b/src/Simulation/QSharpFoundation/Bitwise/Bitwise.qs @@ -75,7 +75,13 @@ namespace Microsoft.Quantum.Bitwise { /// # Summary - /// Returns the bitwise PARITY of an integer (1 if its binary representation contains odd number of ones and 0 otherwise). + /// Returns the bitwise PARITY of an integer. + /// + /// # Description + /// This function returns the bitwise parity of the + /// [two's complement](https://en.wikipedia.org/wiki/Signed_number_representations#Two's_complement) + /// representation of its input, returning `1` if that representation + /// contains an odd number of ones, and returning `0` otherwise. /// /// # Example /// ```qsharp @@ -83,7 +89,29 @@ namespace Microsoft.Quantum.Bitwise { /// let x = Parity(a); // x : Int = 1. /// ``` function Parity (a : Int) : Int { - body intrinsic; + mutable v = a; + // http://graphics.stanford.edu/~seander/bithacks.html#ParityMultiply + // XOR the bits in every 2-bit pair, save the result in the least significant bit (LSB) of the pair: + set v ^^^= (v >>> 1); // bit[0] = bit[0] ^ bit[1]; bit[2] = bit[2] ^ bit[3]; .. + // Now only the even bits contain the information. + // XOR the even bits in every 4-bit nibble, save the result in the LSB of the nibble: + set v ^^^= (v >>> 2); // bit[0] = bit[0] ^ bit[2]; bit[4] = bit[4] ^ bit[6]; .. + // Now only the LSB of each nibble contains the information. + set v = + (v &&& 0x1111111111111111) // In every 4-bit nibble clear (to '0') all the bits except the LSB. + * 0x1111111111111111; // Explanation with a 32-bit example: + // V (Down arrow) We are interested in the LSB of the most significant 4-bit nibble. + // 0x11111111 The multiplier `* 0x1111111111111111UL` above. + // * 0x10010011 The result of `(v & 0x1111111111111111UL)`, we will designate this value as (A). + // ---------- + // 0x11111111 + // + 0x11111111 + // 0x11111111 + // 0x1111111 + //----------------- + // 4 The value in the most significant 4-bit nibble is equal to the number of 1s in (A), + // modulo 16. The LSB is 0. + return (v >>> 60) &&& 1; // Return the LSB of the most significant 4-bit nibble. } // Common implementation for XBits and ZBits. From 03fe767ae4a50890950bdab95a6f109c48d69bd7 Mon Sep 17 00:00:00 2001 From: "Stefan J. Wernli" Date: Sat, 27 Feb 2021 00:27:03 -0800 Subject: [PATCH 16/30] Clean up ClassicalControl.qs (#541) Minor clean-up for ClassicalControl.qs and associated C# implementation Related to work in Classical Control rewrite support should be refactored #534 --- .../test/QIR-static/qsharp/qir-gen.csproj | 1 - .../QSharpFoundation/ClassicalControl.qs | 26 +-- .../QuantumSimulator/SimulatorBase.cs | 160 ------------------ 3 files changed, 13 insertions(+), 174 deletions(-) diff --git a/src/QirRuntime/test/QIR-static/qsharp/qir-gen.csproj b/src/QirRuntime/test/QIR-static/qsharp/qir-gen.csproj index dc0df356f9f..cafd2d3365a 100644 --- a/src/QirRuntime/test/QIR-static/qsharp/qir-gen.csproj +++ b/src/QirRuntime/test/QIR-static/qsharp/qir-gen.csproj @@ -3,7 +3,6 @@ Exe netcoreapp3.1 - false True false false diff --git a/src/Simulation/QSharpFoundation/ClassicalControl.qs b/src/Simulation/QSharpFoundation/ClassicalControl.qs index 38d486e8e70..1666d930aa7 100644 --- a/src/Simulation/QSharpFoundation/ClassicalControl.qs +++ b/src/Simulation/QSharpFoundation/ClassicalControl.qs @@ -3,22 +3,22 @@ namespace Microsoft.Quantum.Simulation.QuantumProcessor.Extensions //ToDo: update namespace to a more appropriate name { - operation NoOp() : Unit is Ctl + Adj {} + open Microsoft.Quantum.Canon; // Private helper operations. - operation Delay<'T>(op : ('T => Unit), arg : 'T, aux : Unit) : Unit { + internal operation Delay<'T>(op : ('T => Unit), arg : 'T, aux : Unit) : Unit { op(arg); } - operation DelayC<'T>(op : ('T => Unit is Ctl), arg : 'T, aux : Unit) : Unit is Ctl { + internal operation DelayC<'T>(op : ('T => Unit is Ctl), arg : 'T, aux : Unit) : Unit is Ctl { op(arg); } - operation DelayA<'T>(op : ('T => Unit is Adj), arg : 'T, aux : Unit) : Unit is Adj { + internal operation DelayA<'T>(op : ('T => Unit is Adj), arg : 'T, aux : Unit) : Unit is Adj { op(arg); } - operation DelayCA<'T>(op : ('T => Unit is Ctl + Adj), arg : 'T, aux : Unit) : Unit is Ctl + Adj { + internal operation DelayCA<'T>(op : ('T => Unit is Ctl + Adj), arg : 'T, aux : Unit) : Unit is Ctl + Adj { op(arg); } @@ -134,25 +134,25 @@ namespace Microsoft.Quantum.Simulation.QuantumProcessor.Extensions //ToDo: updat // if (measurementResult == Zero) {onResultZeroOp(zeroArg);} operation ApplyIfZero<'T>(measurementResult : Result, (onResultZeroOp : ('T => Unit), zeroArg : 'T)) : Unit { let zeroOp = Delay(onResultZeroOp, zeroArg, _); - let oneOp = Delay(NoOp, (), _); + let oneOp = Delay(NoOp, (), _); ApplyIfElseIntrinsic(measurementResult, zeroOp, oneOp); } operation ApplyIfZeroA<'T>(measurementResult : Result, (onResultZeroOp : ('T => Unit is Adj), zeroArg : 'T)) : Unit is Adj{ let zeroOp = DelayA(onResultZeroOp, zeroArg, _); - let oneOp = DelayA(NoOp, (), _); + let oneOp = DelayA(NoOp, (), _); ApplyIfElseIntrinsicA(measurementResult, zeroOp, oneOp); } operation ApplyIfZeroC<'T>(measurementResult : Result, (onResultZeroOp : ('T => Unit is Ctl), zeroArg : 'T)) : Unit is Ctl { let zeroOp = DelayC(onResultZeroOp, zeroArg, _); - let oneOp = DelayC(NoOp, (), _); + let oneOp = DelayC(NoOp, (), _); ApplyIfElseIntrinsicC(measurementResult, zeroOp, oneOp); } operation ApplyIfZeroCA<'T>(measurementResult : Result, (onResultZeroOp : ('T => Unit is Ctl + Adj), zeroArg : 'T)) : Unit is Ctl + Adj { let zeroOp = DelayCA(onResultZeroOp, zeroArg, _); - let oneOp = DelayCA(NoOp, (), _); + let oneOp = DelayCA(NoOp, (), _); ApplyIfElseIntrinsicCA(measurementResult, zeroOp, oneOp); } @@ -162,25 +162,25 @@ namespace Microsoft.Quantum.Simulation.QuantumProcessor.Extensions //ToDo: updat // if (measurementResult == One) {onResultOneOp(oneArg);} operation ApplyIfOne<'T>(measurementResult : Result, (onResultOneOp : ('T => Unit), oneArg : 'T)) : Unit { let oneOp = Delay(onResultOneOp, oneArg, _); - let zeroOp = Delay(NoOp, (), _); + let zeroOp = Delay(NoOp, (), _); ApplyIfElseIntrinsic(measurementResult, zeroOp, oneOp); } operation ApplyIfOneA<'T>(measurementResult : Result, (onResultOneOp : ('T => Unit is Adj), oneArg : 'T)) : Unit is Adj { let oneOp = DelayA(onResultOneOp, oneArg, _); - let zeroOp = DelayA(NoOp, (), _); + let zeroOp = DelayA(NoOp, (), _); ApplyIfElseIntrinsicA(measurementResult, zeroOp, oneOp); } operation ApplyIfOneC<'T>(measurementResult : Result, (onResultOneOp : ('T => Unit is Ctl), oneArg : 'T)) : Unit is Ctl { let oneOp = DelayC(onResultOneOp, oneArg, _); - let zeroOp = DelayC(NoOp, (), _); + let zeroOp = DelayC(NoOp, (), _); ApplyIfElseIntrinsicC(measurementResult, zeroOp, oneOp); } operation ApplyIfOneCA<'T>(measurementResult : Result, (onResultOneOp : ('T => Unit is Ctl + Adj), oneArg : 'T)) : Unit is Ctl + Adj { let oneOp = DelayCA(onResultOneOp, oneArg, _); - let zeroOp = DelayCA(NoOp, (), _); + let zeroOp = DelayCA(NoOp, (), _); ApplyIfElseIntrinsicCA(measurementResult, zeroOp, oneOp); } diff --git a/src/Simulation/Simulators/QuantumSimulator/SimulatorBase.cs b/src/Simulation/Simulators/QuantumSimulator/SimulatorBase.cs index 0d5d5b53824..eb8b27f1ee6 100644 --- a/src/Simulation/Simulators/QuantumSimulator/SimulatorBase.cs +++ b/src/Simulation/Simulators/QuantumSimulator/SimulatorBase.cs @@ -505,86 +505,6 @@ public ApplyIfElse(SimulatorBase m) : base(m) => }; } - public class ApplyIfElseA : ApplyIfElseIntrinsicA - { - protected readonly SimulatorBase sim; - public ApplyIfElseA(SimulatorBase m) : base(m) => - sim = m; - - public override Func<(Result, IAdjointable, IAdjointable), QVoid> __Body__ => (q) => - { - (Result measurementResult, ICallable onZero, ICallable onOne) = q; - this.sim.BranchingBasedOnMeasurement(measurementResult, Result.Zero, onZero, onOne, OperationFunctor.Body, null); - return QVoid.Instance; - }; - - public override Func<(Result, IAdjointable, IAdjointable), QVoid> __AdjointBody__ => (q) => - { - (Result measurementResult, ICallable onZero, ICallable onOne) = q; - this.sim.BranchingBasedOnMeasurement(measurementResult, Result.Zero, onZero, onOne, OperationFunctor.Adjoint, null); - return QVoid.Instance; - }; - } - - public class ApplyIfElseC : ApplyIfElseIntrinsicC - { - protected readonly SimulatorBase sim; - public ApplyIfElseC(SimulatorBase m) : base(m) => - sim = m; - - public override Func<(Result, IControllable, IControllable), QVoid> __Body__ => (q) => - { - (Result measurementResult, ICallable onZero, ICallable onOne) = q; - this.sim.BranchingBasedOnMeasurement(measurementResult, Result.Zero, onZero, onOne, OperationFunctor.Body, null); - return QVoid.Instance; - }; - - public override Func<(IQArray, (Result, IControllable, IControllable)), QVoid> __ControlledBody__ => (q) => - { - (IQArray ctrls, (Result measurementResult, ICallable onZero, ICallable onOne)) = q; - (var specKind, IQArray? controls) = ctrls?.Count == 0 ? (OperationFunctor.Body, null) : (OperationFunctor.Controlled, ctrls); - this.sim.BranchingBasedOnMeasurement(measurementResult, Result.Zero, onZero, onOne, specKind, controls); - return QVoid.Instance; - }; - } - - public class ApplyIfElseCA : ApplyIfElseIntrinsicCA - { - protected readonly SimulatorBase sim; - public ApplyIfElseCA(SimulatorBase m) : base(m) => - sim = m; - - public override Func<(Result, IUnitary, IUnitary), QVoid> __Body__ => (q) => - { - (Result measurementResult, ICallable onZero, ICallable onOne) = q; - this.sim.BranchingBasedOnMeasurement(measurementResult, Result.Zero, onZero, onOne, OperationFunctor.Body, null); - return QVoid.Instance; - }; - - public override Func<(Result, IUnitary, IUnitary), QVoid> __AdjointBody__ => (q) => - { - (Result measurementResult, ICallable onZero, ICallable onOne) = q; - this.sim.BranchingBasedOnMeasurement(measurementResult, Result.Zero, onZero, onOne, OperationFunctor.Adjoint, null); - return QVoid.Instance; - }; - - public override Func<(IQArray, (Result, IUnitary, IUnitary)), QVoid> __ControlledBody__ => (q) => - { - (IQArray ctrls, (Result measurementResult, ICallable onZero, ICallable onOne)) = q; - (var specKind, IQArray? controls) = ctrls?.Count == 0 ? (OperationFunctor.Body, null) : (OperationFunctor.Controlled, ctrls); - this.sim.BranchingBasedOnMeasurement(measurementResult, Result.Zero, onZero, onOne, specKind, controls); - return QVoid.Instance; - }; - - public override Func<(IQArray, (Result, IUnitary, IUnitary)), QVoid> __ControlledAdjointBody__ => (q) => - { - (IQArray ctrls, (Result measurementResult, ICallable onZero, ICallable onOne)) = q; - (var specKind, IQArray? controls) = ctrls?.Count == 0 ? (OperationFunctor.Adjoint, null) : (OperationFunctor.ControlledAdjoint, ctrls); - this.sim.BranchingBasedOnMeasurement(measurementResult, Result.Zero, onZero, onOne, specKind, controls); - return QVoid.Instance; - }; - } - public class ApplyConditionally : ApplyConditionallyIntrinsic { protected readonly SimulatorBase sim; @@ -599,86 +519,6 @@ public ApplyConditionally(SimulatorBase m) : base(m) => }; } - public class ApplyConditionallyA : ApplyConditionallyIntrinsicA - { - protected readonly SimulatorBase sim; - public ApplyConditionallyA(SimulatorBase m) : base(m) => - sim = m; - - public override Func<(IQArray, IQArray, IAdjointable, IAdjointable), QVoid> __Body__ => (q) => - { - (IQArray measurementResults, IQArray resultsValues, ICallable onEqualOp, ICallable onNonEqualOp) = q; - this.sim.BranchingBasedOnMeasurement(measurementResults, resultsValues, onEqualOp, onNonEqualOp, OperationFunctor.Body, null); - return QVoid.Instance; - }; - - public override Func<(IQArray, IQArray, IAdjointable, IAdjointable), QVoid> __AdjointBody__ => (q) => - { - (IQArray measurementResults, IQArray resultsValues, ICallable onEqualOp, ICallable onNonEqualOp) = q; - this.sim.BranchingBasedOnMeasurement(measurementResults, resultsValues, onEqualOp, onNonEqualOp, OperationFunctor.Adjoint, null); - return QVoid.Instance; - }; - } - - public class ApplyConditionallyC : ApplyConditionallyIntrinsicC - { - protected readonly SimulatorBase sim; - public ApplyConditionallyC(SimulatorBase m) : base(m) => - sim = m; - - public override Func<(IQArray, IQArray, IControllable, IControllable), QVoid> __Body__ => (q) => - { - (IQArray measurementResults, IQArray resultsValues, ICallable onEqualOp, ICallable onNonEqualOp) = q; - this.sim.BranchingBasedOnMeasurement(measurementResults, resultsValues, onEqualOp, onNonEqualOp, OperationFunctor.Body, null); - return QVoid.Instance; - }; - - public override Func<(IQArray, (IQArray, IQArray, IControllable, IControllable)), QVoid> __ControlledBody__ => (q) => - { - (IQArray ctrls, (IQArray measurementResults, IQArray resultsValues, ICallable onEqualOp, ICallable onNonEqualOp)) = q; - (var specKind, IQArray? controls) = ctrls?.Count == 0 ? (OperationFunctor.Body, null) : (OperationFunctor.Controlled, ctrls); - this.sim.BranchingBasedOnMeasurement(measurementResults, resultsValues, onEqualOp, onNonEqualOp, specKind, controls); - return QVoid.Instance; - }; - } - - public class ApplyConditionallyCA : ApplyConditionallyIntrinsicCA - { - protected readonly SimulatorBase sim; - public ApplyConditionallyCA(SimulatorBase m) : base(m) => - sim = m; - - public override Func<(IQArray, IQArray, IUnitary, IUnitary), QVoid> __Body__ => (q) => - { - (IQArray measurementResults, IQArray resultsValues, ICallable onEqualOp, ICallable onNonEqualOp) = q; - this.sim.BranchingBasedOnMeasurement(measurementResults, resultsValues, onEqualOp, onNonEqualOp, OperationFunctor.Body, null); - return QVoid.Instance; - }; - - public override Func<(IQArray, IQArray, IUnitary, IUnitary), QVoid> __AdjointBody__ => (q) => - { - (IQArray measurementResults, IQArray resultsValues, ICallable onEqualOp, ICallable onNonEqualOp) = q; - this.sim.BranchingBasedOnMeasurement(measurementResults, resultsValues, onEqualOp, onNonEqualOp, OperationFunctor.Adjoint, null); - return QVoid.Instance; - }; - - public override Func<(IQArray, (IQArray, IQArray, IUnitary, IUnitary)), QVoid> __ControlledBody__ => (q) => - { - (IQArray ctrls, (IQArray measurementResults, IQArray resultsValues, ICallable onEqualOp, ICallable onNonEqualOp)) = q; - (var specKind, IQArray? controls) = ctrls?.Count == 0 ? (OperationFunctor.Body, null) : (OperationFunctor.Controlled, ctrls); - this.sim.BranchingBasedOnMeasurement(measurementResults, resultsValues, onEqualOp, onNonEqualOp, specKind, controls); - return QVoid.Instance; - }; - - public override Func<(IQArray, (IQArray, IQArray, IUnitary, IUnitary)), QVoid> __ControlledAdjointBody__ => (q) => - { - (IQArray ctrls, (IQArray measurementResults, IQArray resultsValues, ICallable onEqualOp, ICallable onNonEqualOp)) = q; - (var specKind, IQArray? controls) = ctrls?.Count == 0 ? (OperationFunctor.Adjoint, null) : (OperationFunctor.ControlledAdjoint, ctrls); - this.sim.BranchingBasedOnMeasurement(measurementResults, resultsValues, onEqualOp, onNonEqualOp, specKind, controls); - return QVoid.Instance; - }; - } - private Action BuildClause(ICallable op, OperationFunctor type, IQArray? ctrls) => type switch { From 78c7cd7d13762fc8482e09c44690a9032e07b348 Mon Sep 17 00:00:00 2001 From: Irina Yatsenko <36858951+irinayat-MS@users.noreply.github.com> Date: Mon, 1 Mar 2021 12:14:26 -0800 Subject: [PATCH 17/30] Tracer: Support of conditionals on measurement results (#539) --- src/QirRuntime/lib/QIR/callables.cpp | 8 + src/QirRuntime/lib/QIR/conditionals.cpp | 15 +- src/QirRuntime/lib/Tracer/README.md | 26 +- src/QirRuntime/lib/Tracer/tracer-bridge.ll | 17 + src/QirRuntime/lib/Tracer/tracer-qis.cpp | 14 + src/QirRuntime/lib/Tracer/tracer.cpp | 120 +- src/QirRuntime/lib/Tracer/tracer.hpp | 38 +- src/QirRuntime/public/QirTypes.hpp | 1 + src/QirRuntime/public/TracerTypes.hpp | 5 +- .../test/QIR-tracer/qir-tracer-driver.cpp | 14 +- .../test/QIR-tracer/tracer-conditionals.qs | 25 + .../test/QIR-tracer/tracer-config.cpp | 4 +- .../test/QIR-tracer/tracer-config.hpp | 2 +- .../test/QIR-tracer/tracer-measurements.qs | 27 - src/QirRuntime/test/QIR-tracer/tracer-qir.ll | 2392 ++++++++++------- .../test/QIR-tracer/tracer-target.qs | 24 +- src/QirRuntime/test/unittests/TracerTests.cpp | 190 +- 17 files changed, 1878 insertions(+), 1044 deletions(-) create mode 100644 src/QirRuntime/test/QIR-tracer/tracer-conditionals.qs delete mode 100644 src/QirRuntime/test/QIR-tracer/tracer-measurements.qs diff --git a/src/QirRuntime/lib/QIR/callables.cpp b/src/QirRuntime/lib/QIR/callables.cpp index b1fb38cc51c..e354d55e959 100644 --- a/src/QirRuntime/lib/QIR/callables.cpp +++ b/src/QirRuntime/lib/QIR/callables.cpp @@ -406,6 +406,14 @@ void QirCallable::Invoke(PTuple args, PTuple result) } } +void QirCallable::Invoke() +{ + assert((this->appliedFunctor & QirCallable::Controlled) == 0 && "Cannot invoke controlled callable without args"); + PTuple args = quantum__rt__tuple_create(0); + this->Invoke(args, nullptr); + quantum__rt__tuple_update_reference_count(args, -1); +} + // A + A = I; A + C = C + A = CA; C + C = C; CA + A = C; CA + C = CA void QirCallable::ApplyFunctor(int functor) { diff --git a/src/QirRuntime/lib/QIR/conditionals.cpp b/src/QirRuntime/lib/QIR/conditionals.cpp index 72c46e21db6..e9e0bb8d083 100644 --- a/src/QirRuntime/lib/QIR/conditionals.cpp +++ b/src/QirRuntime/lib/QIR/conditionals.cpp @@ -9,13 +9,6 @@ #include "QirTypes.hpp" #include "quantum__rt.hpp" -static void Apply(QirCallable* clb) -{ - PTuple argsTuple = quantum__rt__tuple_create(0); - quantum__rt__callable_invoke(clb, argsTuple /*args*/, nullptr /*result*/); - quantum__rt__tuple_update_reference_count(argsTuple, -1); -} - static bool ArraysContainEqualResults(QirArray* rs1, QirArray* rs2) { assert(rs1 != nullptr && rs2 != nullptr && rs1->count == rs2->count); @@ -38,8 +31,8 @@ extern "C" { void quantum__qis__applyifelseintrinsic__body(RESULT* r, QirCallable* clbOnZero, QirCallable* clbOnOne) { - QirCallable* clbApply = quantum__rt__result_equal(r, quantum__rt__result_zero()) ? clbOnZero : clbOnOne; - Apply(clbApply); + QirCallable* clb = quantum__rt__result_equal(r, quantum__rt__result_zero()) ? clbOnZero : clbOnOne; + clb->Invoke(); } void quantum__qis__applyconditionallyintrinsic__body( @@ -48,7 +41,7 @@ extern "C" QirCallable* clbOnAllEqual, QirCallable* clbOnSomeDifferent) { - QirCallable* clbApply = ArraysContainEqualResults(rs1, rs2) ? clbOnAllEqual : clbOnSomeDifferent; - Apply(clbApply); + QirCallable* clb = ArraysContainEqualResults(rs1, rs2) ? clbOnAllEqual : clbOnSomeDifferent; + clb->Invoke(); } } \ No newline at end of file diff --git a/src/QirRuntime/lib/Tracer/README.md b/src/QirRuntime/lib/Tracer/README.md index 3ba42c9ca35..43993131345 100644 --- a/src/QirRuntime/lib/Tracer/README.md +++ b/src/QirRuntime/lib/Tracer/README.md @@ -72,20 +72,24 @@ As the tracer is executing a sequential quantum program, it will compute a time using the _conceptual_ algorithm, described below (aka "tetris algorithm"). The actual implementation of layering might be done differently, as long as the resulting layering is the same as if running the conceptual algorithm. -A ___barrier___ is a layer that acts as if it was containing all currently allocated qubits and no operation can be added - into it. - -A user can inject _barriers_ by calling `__quantum__qis__global_barrier` function. The user can choose duration of - a barrier which would affect start time of the following layers but no operations will be added to a barrier, - independent of its duration. +A layer _L(T,N)_ acts as a ___fence___ if it does _not_ accepts any new operations, even if these operations don't + involve qubits from _Qubits(T,N)_. __Conditional execution on measurement results__: The Tracer will execute LLVM IR's branching structures "as is", depending on the values of the corresponding variables at runtime. To enable estimation of branches that depend on a - measurement result, the source Q# program will have to be authored in such a way that the Q# compiler will translate the - conditionals into corresponding callbacks to the tracer. The tracer will add operations from _both branches_ into the - layers it creates to compute the upper bound estimate. + measurement result, the source Q# program must be authored in such a way that the Q# compiler will translate the + conditionals into corresponding callbacks to the tracer (`__quantum__qis__apply_conditionally`). The tracer will + execute _both branches_ of the conditional statement to compute the upper bound estimate. The conditional callbacks + will mark the layers that contain measurements that produced the results used in conditionals as _fences_ for the + duration of the conditional callback. + +A user can create special layers that act as permanent _fences_ by calling `__quantum__qis__inject_barrier` function. The + user can choose duration of a barrier which would affect start time of the following layers but no operations will be + added to a barrier, independent of its duration. _Terminology note_: 'fence' is a role of layer, which might be assigned + to a layer temporarily or permanently; 'barrier' is a special layer the user can inject that has the role of a permanent + fence and contains no operations. -The following operations are _not_ supported inside conditional callbacks and would cause a runtime failure: +__TODO__: figure out which operations should or should _not_ be supported inside conditional callbacks. For example: - nested conditional callbacks; - measurements; @@ -201,7 +205,7 @@ TBD but lower priority. | `%Result* @__quantum__qis__single_qubit_measure(i32 %id, i32 %duration, %Qubit* %q)` | Function for counting measurements of a single qubit. The user can assign different operation ids for different measurement bases. | | `%Result* @__quantum__qis__joint_measure(i32 %id, i32 %duration, %Array* %qs)` | Function for counting joint-measurements of qubits. The user can assign different operation ids for different measurement bases. | | `void __quantum__qis__swap(%Qubit* %q1, %Qubit* %q2)` | See [Special handling of SWAP](#special-handling-of-swap) for details. | -| TODO: handling of conditionals on measurement results | | +| `void __quantum__qis__apply_conditionally(%Array* %.rs1, %Array* %.rs2, %Callable* %.clb_on_equal, %Callable* %.clb_on_different)` | The first two arguments contain arrays of results to be compared pairwise. The third argument is a callable that represents the branch that would be executed if all results compared equal and the forth argument is a callable that represents the branch that would be executed if any of the results compared different. The tracer executes _both_ branches.| _Note on operation ids_: The user is responsible for using operation ids in a consistent manner. Operations with the same id will be counted by the tracer as the _same_ operation, even accross invocations with different number of target diff --git a/src/QirRuntime/lib/Tracer/tracer-bridge.ll b/src/QirRuntime/lib/Tracer/tracer-bridge.ll index 1f0fa216c76..740ad7970f9 100644 --- a/src/QirRuntime/lib/Tracer/tracer-bridge.ll +++ b/src/QirRuntime/lib/Tracer/tracer-bridge.ll @@ -5,6 +5,7 @@ ; QIR types ; %Array = type opaque +%Callable = type opaque %Qubit = type opaque %Result = type opaque @@ -15,6 +16,7 @@ %class.QUBIT = type opaque %class.RESULT = type opaque %struct.QirArray = type opaque +%struct.QirCallable = type opaque ;=============================================================================== @@ -28,6 +30,8 @@ declare void @quantum__qis__multi_qubit_op_ctl(i32 %id, i32 %duration, %struct.Q declare void @quantum__qis__inject_barrier(i32 %id, i32 %duration) declare %class.RESULT* @quantum__qis__single_qubit_measure(i32 %id, i32 %duration, %class.QUBIT*) declare %class.RESULT* @quantum__qis__joint_measure(i32 %id, i32 %duration, %struct.QirArray*) +declare void @quantum__qis__apply_conditionally( + %struct.QirArray*, %struct.QirArray*, %struct.QirCallable*, %struct.QirCallable*) ;=============================================================================== ; quantum__trc namespace implementations @@ -82,4 +86,17 @@ define %Result* @__quantum__qis__joint_measure(i32 %id, i32 %duration, %Array* % %r = call %class.RESULT* @quantum__qis__joint_measure(i32 %id, i32 %duration, %struct.QirArray* %qs) %.r = bitcast %class.RESULT* %r to %Result* ret %Result* %.r +} + +define void @__quantum__qis__apply_conditionally( + %Array* %.rs1, %Array* %.rs2, %Callable* %.clb_on_equal, %Callable* %.clb_on_different) { + + %rs1 = bitcast %Array* %.rs1 to %struct.QirArray* + %rs2 = bitcast %Array* %.rs2 to %struct.QirArray* + %clb_on_equal = bitcast %Callable* %.clb_on_equal to %struct.QirCallable* + %clb_on_different = bitcast %Callable* %.clb_on_different to %struct.QirCallable* + call void @quantum__qis__apply_conditionally( + %struct.QirArray* %rs1, %struct.QirArray* %rs2, + %struct.QirCallable* %clb_on_equal, %struct.QirCallable* %clb_on_different) + ret void } \ No newline at end of file diff --git a/src/QirRuntime/lib/Tracer/tracer-qis.cpp b/src/QirRuntime/lib/Tracer/tracer-qis.cpp index ed7a5eeb370..dd608602215 100644 --- a/src/QirRuntime/lib/Tracer/tracer-qis.cpp +++ b/src/QirRuntime/lib/Tracer/tracer-qis.cpp @@ -63,4 +63,18 @@ extern "C" { return tracer->TraceMultiQubitMeasurement(id, duration, qs->count, reinterpret_cast(qs->buffer)); } + + void quantum__qis__apply_conditionally( // NOLINT + QirArray* rs1, + QirArray* rs2, + QirCallable* clbOnAllEqual, + QirCallable* clbOnSomeDifferent) + { + CTracer::FenceScope sf( + tracer.get(), rs1->count, reinterpret_cast(rs1->buffer), rs2->count, + reinterpret_cast(rs2->buffer)); + + clbOnAllEqual->Invoke(); + clbOnSomeDifferent->Invoke(); + } } \ No newline at end of file diff --git a/src/QirRuntime/lib/Tracer/tracer.cpp b/src/QirRuntime/lib/Tracer/tracer.cpp index c9e906f4a29..02afaeafb4d 100644 --- a/src/QirRuntime/lib/Tracer/tracer.cpp +++ b/src/QirRuntime/lib/Tracer/tracer.cpp @@ -1,6 +1,7 @@ // Copyright (c) Microsoft Corporation. // Licensed under the MIT License. +#include #include #include #include @@ -19,12 +20,22 @@ namespace Quantum tracer = std::make_shared(preferredLayerDuration); return tracer; } - std::shared_ptr CreateTracer(int preferredLayerDuration, const std::unordered_map& opNames) + std::shared_ptr CreateTracer( + int preferredLayerDuration, + const std::unordered_map& opNames) { tracer = std::make_shared(preferredLayerDuration, opNames); return tracer; } + //------------------------------------------------------------------------------------------------------------------ + // CTracer::LaterLayerOf + //------------------------------------------------------------------------------------------------------------------ + /*static*/ LayerId CTracer::LaterLayerOf(LayerId l1, LayerId l2) + { + return std::max(l1, l2); + } + //------------------------------------------------------------------------------------------------------------------ // CTracer's ISimulator implementation //------------------------------------------------------------------------------------------------------------------ @@ -47,7 +58,8 @@ namespace Quantum const QubitState& qstate = this->UseQubit(q); stringstream str(std::to_string(qubitIndex)); - str << " last used in layer " << qstate.layer << "(pending zero ops: " << qstate.pendingZeroDurationOps.size() << ")"; + str << " last used in layer " << qstate.layer << "(pending zero ops: " << qstate.pendingZeroDurationOps.size() + << ")"; return str.str(); } @@ -81,11 +93,19 @@ namespace Quantum layerStartTime = lastLayer.startTime + lastLayer.duration; } this->metricsByLayer.emplace_back( - Layer {layerStartTime, max(this->preferredLayerDuration, minRequiredDuration)}); + Layer{layerStartTime, max(this->preferredLayerDuration, minRequiredDuration)}); return this->metricsByLayer.size() - 1; } + //------------------------------------------------------------------------------------------------------------------ + // CTracer::GetEffectiveFence + //------------------------------------------------------------------------------------------------------------------ + LayerId CTracer::GetEffectiveFence() const + { + return CTracer::LaterLayerOf(this->globalBarrier, this->latestConditionalFence); + } + //------------------------------------------------------------------------------------------------------------------ // CTracer::FindLayerToInsertOperationInto //------------------------------------------------------------------------------------------------------------------ @@ -93,16 +113,17 @@ namespace Quantum { const QubitState& qstate = this->UseQubit(q); - LayerId layerToInsertInto = INVALID; + LayerId layerToInsertInto = REQUESTNEW; + const LayerId barrier = this->GetEffectiveFence(); const LayerId firstLayerAfterBarrier = - this->globalBarrier == INVALID - ? this->metricsByLayer.empty() ? INVALID : 0 - : this->globalBarrier + 1 == this->metricsByLayer.size() ? INVALID : this->globalBarrier + 1; + (barrier == INVALID ? (this->metricsByLayer.empty() ? REQUESTNEW : 0) + : ((barrier + 1 == this->metricsByLayer.size()) ? REQUESTNEW : barrier + 1)); - LayerId candidate = max(qstate.layer, firstLayerAfterBarrier); + LayerId candidate = CTracer::LaterLayerOf(qstate.layer, firstLayerAfterBarrier); + assert(candidate != INVALID); - if (candidate != INVALID) + if (candidate != REQUESTNEW) { // Find the earliest layer that the operation fits in by duration const Layer& candidateLayer = this->metricsByLayer[candidate]; @@ -123,10 +144,6 @@ namespace Quantum } } } - else if (opDuration <= this->preferredLayerDuration) - { - layerToInsertInto = firstLayerAfterBarrier; - } return layerToInsertInto; } @@ -137,6 +154,8 @@ namespace Quantum void CTracer::AddOperationToLayer(OpId id, LayerId layer) { assert(layer < this->metricsByLayer.size()); + assert(this->metricsByLayer[layer].barrierId == -1 && "Should not add operations to barriers"); + this->metricsByLayer[layer].operations[id] += 1; } @@ -166,8 +185,8 @@ namespace Quantum this->seenOps.insert(id); QubitState& qstate = this->UseQubit(target); - if (opDuration == 0 && - (qstate.layer == INVALID || (this->globalBarrier != INVALID && qstate.layer < this->globalBarrier))) + const LayerId barrier = this->GetEffectiveFence(); + if (opDuration == 0 && (qstate.layer == INVALID || (barrier != INVALID && qstate.layer < barrier))) { qstate.pendingZeroDurationOps.push_back(id); return INVALID; @@ -175,7 +194,7 @@ namespace Quantum // Figure out the layer this operation should go into. LayerId layerToInsertInto = this->FindLayerToInsertOperationInto(target, opDuration); - if (layerToInsertInto == INVALID) + if (layerToInsertInto == REQUESTNEW) { layerToInsertInto = this->CreateNewLayer(opDuration); } @@ -188,7 +207,7 @@ namespace Quantum } //------------------------------------------------------------------------------------------------------------------ - // CTracer::TraceControlledSingleQubitOp + // CTracer::TraceMultiQubitOp //------------------------------------------------------------------------------------------------------------------ LayerId CTracer::TraceMultiQubitOp( OpId id, @@ -213,16 +232,16 @@ namespace Quantum // Figure out the layer this operation should go into. LayerId layerToInsertInto = this->FindLayerToInsertOperationInto(secondGroup[0], opDuration); - for (long i = 1; i < nSecondGroup && layerToInsertInto != INVALID; i++) + for (long i = 1; i < nSecondGroup && layerToInsertInto != REQUESTNEW; i++) { layerToInsertInto = max(layerToInsertInto, this->FindLayerToInsertOperationInto(secondGroup[i], opDuration)); } - for (long i = 0; i < nFirstGroup && layerToInsertInto != INVALID; i++) + for (long i = 0; i < nFirstGroup && layerToInsertInto != REQUESTNEW; i++) { layerToInsertInto = max(layerToInsertInto, this->FindLayerToInsertOperationInto(firstGroup[i], opDuration)); } - if (layerToInsertInto == INVALID) + if (layerToInsertInto == REQUESTNEW) { layerToInsertInto = this->CreateNewLayer(opDuration); } @@ -243,6 +262,9 @@ namespace Quantum return layerToInsertInto; } + //------------------------------------------------------------------------------------------------------------------ + // CTracer::InjectGlobalBarrier + //------------------------------------------------------------------------------------------------------------------ LayerId CTracer::InjectGlobalBarrier(OpId id, Duration duration) { LayerId layer = this->CreateNewLayer(duration); @@ -251,18 +273,76 @@ namespace Quantum return layer; } + //------------------------------------------------------------------------------------------------------------------ + // CTracer::TraceSingleQubitMeasurement + //------------------------------------------------------------------------------------------------------------------ Result CTracer::TraceSingleQubitMeasurement(OpId id, Duration duration, Qubit target) { LayerId layerId = this->TraceSingleQubitOp(id, duration, target); return reinterpret_cast(layerId); } + //------------------------------------------------------------------------------------------------------------------ + // CTracer::TraceMultiQubitMeasurement + //------------------------------------------------------------------------------------------------------------------ Result CTracer::TraceMultiQubitMeasurement(OpId id, Duration duration, long nTargets, Qubit* targets) { LayerId layerId = this->TraceMultiQubitOp(id, duration, 0, nullptr, nTargets, targets); return reinterpret_cast(layerId); } + //------------------------------------------------------------------------------------------------------------------ + // CTracer::FindLatestMeasurementLayer + //------------------------------------------------------------------------------------------------------------------ + LayerId CTracer::FindLatestMeasurementLayer(long count, Result* results) + { + LayerId latest = INVALID; + for (long i = 0; i < count; i++) + { + const LayerId id = this->GetLayerIdOfSourceMeasurement(results[i]); + latest = CTracer::LaterLayerOf(latest, id); + } + return latest; + } + + //------------------------------------------------------------------------------------------------------------------ + // CTracer::FenceScope + //------------------------------------------------------------------------------------------------------------------ + CTracer::FenceScope::FenceScope(CTracer* tracer, long count1, Result* rs1, long count2, Result* rs2) + : tracer(tracer) + { + const LayerId fence1 = + (rs1 != nullptr && count1 > 0) ? this->tracer->FindLatestMeasurementLayer(count1, rs1) : INVALID; + const LayerId fence2 = + (rs2 != nullptr && count2 > 0) ? this->tracer->FindLatestMeasurementLayer(count2, rs2) : INVALID; + + this->fence = CTracer::LaterLayerOf(fence1, fence2); + if (this->fence == INVALID) + { + return; + } + assert(this->fence < this->tracer->metricsByLayer.size()); + + this->tracer->conditionalFences.push_back(this->fence); + this->tracer->latestConditionalFence = CTracer::LaterLayerOf(this->tracer->latestConditionalFence, this->fence); + } + CTracer::FenceScope::~FenceScope() + { + if (this->fence == INVALID) + { + return; + } + + vector& fences = this->tracer->conditionalFences; + assert(!fences.empty()); + this->tracer->conditionalFences.pop_back(); + + // Update the latest layer (we expect the stack of fences to be shallow so a linear search through it + // should be OK). + this->tracer->latestConditionalFence = + fences.empty() ? INVALID : *std::max_element(fences.begin(), fences.end()); + } + //------------------------------------------------------------------------------------------------------------------ // CTracer::PrintLayerMetrics //------------------------------------------------------------------------------------------------------------------ diff --git a/src/QirRuntime/lib/Tracer/tracer.hpp b/src/QirRuntime/lib/Tracer/tracer.hpp index c5548e4bd18..508d525e68d 100644 --- a/src/QirRuntime/lib/Tracer/tracer.hpp +++ b/src/QirRuntime/lib/Tracer/tracer.hpp @@ -9,8 +9,8 @@ #include #include "CoreTypes.hpp" -#include "TracerTypes.hpp" #include "QuantumApi_I.hpp" +#include "TracerTypes.hpp" namespace Microsoft { @@ -71,10 +71,21 @@ namespace Quantum // The index into the vector is treated as implicit id of the layer. std::vector metricsByLayer; - // The last global barrier, injected by the user. No new operations can be added to the barrier or to any of the - // layer that preceeded it, even if the new operations involve completely new qubits. + // The last barrier, injected by the user. No new operations can be added to the barrier or to any of the + // layer that preceeded it, even if the new operations involve completely new qubits. Thus, the barriers act + // as permanent fences, that are activated at the moment the tracer executes the corresponding user code and are + // never removed. LayerId globalBarrier = INVALID; + // The conditional fences are layers that contain measurements for results used to guard conditional branches. + // The set of fences is a stack (for nested conditionals) but we use vector to store them so we can recalculate + // the latest (by time) fence when the stack is popped. + std::vector conditionalFences; + + // We don't expect the stack of conditional fences to be deep, so it's OK to recalculate the latest layer when + // the stack is modified. + LayerId latestConditionalFence = INVALID; + // Mapping of operation ids to user-chosen names, for operations that user didn't name, the output will use // operation ids. std::unordered_map opNames; @@ -96,7 +107,7 @@ namespace Quantum return this->qubits[qubitIndex]; } - // If no appropriate layer found, return `INVALID` + // If no appropriate layer found, returns `REQUESTNEW`. LayerId FindLayerToInsertOperationInto(Qubit q, Duration opDuration) const; // Returns the index of the created layer. @@ -105,10 +116,19 @@ namespace Quantum // Adds operation with given id into the given layer. Assumes that duration contraints have been satisfied. void AddOperationToLayer(OpId id, LayerId layer); - // Update the qubit state with the new layer information + // Update the qubit state with the new layer information. void UpdateQubitState(Qubit q, LayerId layer, Duration opDuration); + // Considers global barriers and conditional fences to find the fence currently in effect. + LayerId GetEffectiveFence() const; + + // For the given results finds the latest layer of the measurements that produced the results. + LayerId FindLatestMeasurementLayer(long count, Result* results); + public: + // Returns the later layer of the two. INVALID LayerId is treated as -Infinity, and REQUESTNEW -- as +Infinity. + static LayerId LaterLayerOf(LayerId l1, LayerId l2); + explicit CTracer(int preferredLayerDuration) : preferredLayerDuration(preferredLayerDuration) { @@ -184,6 +204,14 @@ namespace Quantum // ------------------------------------------------------------------------------------------------------------- LayerId InjectGlobalBarrier(OpId id, Duration duration); + struct FenceScope + { + CTracer* tracer = nullptr; + LayerId fence = INVALID; + explicit FenceScope(CTracer* tracer, long count1, Result* results1, long count2, Result* results2); + ~FenceScope(); + }; + // ------------------------------------------------------------------------------------------------------------- // Configuring the tracer and getting data back from it. // ------------------------------------------------------------------------------------------------------------- diff --git a/src/QirRuntime/public/QirTypes.hpp b/src/QirRuntime/public/QirTypes.hpp index a9ca03fa72f..dc0313995dc 100644 --- a/src/QirRuntime/public/QirTypes.hpp +++ b/src/QirRuntime/public/QirTypes.hpp @@ -166,6 +166,7 @@ struct QirCallable void UpdateAliasCount(int increment); void Invoke(PTuple args, PTuple result); + void Invoke(); // a shortcut to invoke a callable with no arguments and Unit result void ApplyFunctor(int functor); void InvokeCaptureCallback(int index, int64_t parameter); diff --git a/src/QirRuntime/public/TracerTypes.hpp b/src/QirRuntime/public/TracerTypes.hpp index 9b7d242f9f7..77e4565e974 100644 --- a/src/QirRuntime/public/TracerTypes.hpp +++ b/src/QirRuntime/public/TracerTypes.hpp @@ -10,8 +10,9 @@ namespace Quantum using OpId = int; using Time = int; using Duration = int; - using LayerId = size_t; + using LayerId = int64_t; - constexpr LayerId INVALID = std::numeric_limits::max(); + constexpr LayerId INVALID = std::numeric_limits::min(); + constexpr LayerId REQUESTNEW = std::numeric_limits::max(); } } \ No newline at end of file diff --git a/src/QirRuntime/test/QIR-tracer/qir-tracer-driver.cpp b/src/QirRuntime/test/QIR-tracer/qir-tracer-driver.cpp index ba2fd6e361a..09199fe5782 100644 --- a/src/QirRuntime/test/QIR-tracer/qir-tracer-driver.cpp +++ b/src/QirRuntime/test/QIR-tracer/qir-tracer-driver.cpp @@ -29,19 +29,21 @@ TEST_CASE("Invoke each intrinsic from Q# core once", "[qir-tracer]") tr->PrintLayerMetrics(out, ",", true /*printZeroMetrics*/); INFO(out.str()); - // TestCoreIntrinsics happens to produce 24 layers right now and we are not checking whether that's expected -- as + // TestCoreIntrinsics happens to produce 24 layers right now and we are not checking whether that's expected -- as // testing of layering logic is better done by unit tests. CHECK(layers.size() == 24); } -TEST_CASE("Measurements can be counted but cannot be compared", "[qir-tracer]") +TEST_CASE("Conditional execution on measurement result", "[qir-tracer]") { shared_ptr tr = CreateTracer(1 /*layer duration*/, g_operationNames); QirContextScope qirctx(tr.get(), false /*trackAllocatedObjects*/); - REQUIRE_NOTHROW(Microsoft__Quantum__Testing__Tracer__TestMeasurements__body(false /*compare*/)); - CHECK(tr->UseLayers().size() == 1); + REQUIRE_NOTHROW(Microsoft__Quantum__Testing__Tracer__TestMeasurements__body()); - REQUIRE_THROWS(Microsoft__Quantum__Testing__Tracer__TestMeasurements__body(true /*compare*/)); + std::stringstream out; + tr->PrintLayerMetrics(out, ",", true /*printZeroMetrics*/); + INFO(out.str()); + CHECK(tr->UseLayers().size() == 5); } -} \ No newline at end of file +} // namespace TracerUser \ No newline at end of file diff --git a/src/QirRuntime/test/QIR-tracer/tracer-conditionals.qs b/src/QirRuntime/test/QIR-tracer/tracer-conditionals.qs new file mode 100644 index 00000000000..e9c0c512421 --- /dev/null +++ b/src/QirRuntime/test/QIR-tracer/tracer-conditionals.qs @@ -0,0 +1,25 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +namespace Microsoft.Quantum.Testing.Tracer { + open Microsoft.Quantum.Intrinsic; + + // Private helper operations. + operation Delay(op : (Qubit => Unit), arg : Qubit, aux : Unit) : Unit { + op(arg); + } + + operation TestMeasurements() : Unit { + use qs = Qubit[6]; + T(qs[0]); // layer 0 + let r0 = M(qs[0]); // layer 1 + T(qs[1]); // layer 0 + CNOT(qs[1], qs[2]); // layer 1 + let qs12 = [qs[1], qs[2]]; + let r12 = Measure([PauliY, PauliX], qs12); // layer 2 + + ApplyIfElseIntrinsic(r0, Delay(X, qs[3], _), Delay(Y, qs[3], _)); // layers 2, 3 + ApplyIfElseIntrinsic(r12, Delay(Z, qs[4], _), Delay(S, qs[4], _)); // layer 3, 4 + Rx(4.2, qs[5]); // layer 0 + } +} \ No newline at end of file diff --git a/src/QirRuntime/test/QIR-tracer/tracer-config.cpp b/src/QirRuntime/test/QIR-tracer/tracer-config.cpp index eab02e878af..b47f1c70ba9 100644 --- a/src/QirRuntime/test/QIR-tracer/tracer-config.cpp +++ b/src/QirRuntime/test/QIR-tracer/tracer-config.cpp @@ -12,5 +12,7 @@ namespace TracerUser { const std::unordered_map g_operationNames = { - {0, "X"}, {1, "CX"}, {2, "MCX"}, {3, "Y"}, {4, "CY"}, {5, "MCY"} /*etc.*/}; + {0, "X"}, {1, "CX"}, {2, "MCX"}, {3, "Y"}, {4, "CY"}, {5, "MCY"}, {6, "Z"}, + {7, "CZ"}, {8, "MCZ"}, {19, "Rx"}, {20, "MCRx"}, {21, "Ry"}, {22, "MCRy"}, {23, "Rz"}, + {24, "MCRz"}, {9, "H"}, {10, "MCH"}, {11, "T"}, {12, "MCT"}, {15, "S"}, {16, "MCS"} /*etc.*/}; } diff --git a/src/QirRuntime/test/QIR-tracer/tracer-config.hpp b/src/QirRuntime/test/QIR-tracer/tracer-config.hpp index 8163231286d..ac7eef038d3 100644 --- a/src/QirRuntime/test/QIR-tracer/tracer-config.hpp +++ b/src/QirRuntime/test/QIR-tracer/tracer-config.hpp @@ -17,4 +17,4 @@ extern const std::unordered_map g_operati // Available function in generated QIR extern "C" void Microsoft__Quantum__Testing__Tracer__TestCoreIntrinsics__body(); // NOLINT -extern "C" void Microsoft__Quantum__Testing__Tracer__TestMeasurements__body(bool compare); // NOLINT +extern "C" void Microsoft__Quantum__Testing__Tracer__TestMeasurements__body(); // NOLINT diff --git a/src/QirRuntime/test/QIR-tracer/tracer-measurements.qs b/src/QirRuntime/test/QIR-tracer/tracer-measurements.qs deleted file mode 100644 index 7c4aab5eea1..00000000000 --- a/src/QirRuntime/test/QIR-tracer/tracer-measurements.qs +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright (c) Microsoft Corporation. -// Licensed under the MIT License. - -namespace Microsoft.Quantum.Testing.Tracer { - open Microsoft.Quantum.Intrinsic; - - operation Fixup(qs : Qubit[]) : Unit { - for i in 0..Length(qs)-1 { - X(qs[i]); - } - } - - operation TestMeasurements(compare : Bool) : Unit { - use qs = Qubit[3]; - let r0 = M(qs[0]); - let qs12 = [qs[1], qs[2]]; - let r12 = Measure([PauliY, PauliX], qs12); - - if compare { - if r0 == Zero { - X(qs[1]); - } - - //ApplyIfOne(r12, (Fixup, qs12)); - } - } -} \ No newline at end of file diff --git a/src/QirRuntime/test/QIR-tracer/tracer-qir.ll b/src/QirRuntime/test/QIR-tracer/tracer-qir.ll index 3da1a0f7fa6..edf24b70e7c 100644 --- a/src/QirRuntime/test/QIR-tracer/tracer-qir.ll +++ b/src/QirRuntime/test/QIR-tracer/tracer-qir.ll @@ -1,13 +1,10 @@ -;This file was generated using: -;commit 722ec70a97b65f8d3ee1085368142a91183969db (HEAD, origin/swernli/standalone-llvm-2) -;Author: Stefan J. Wernli -;Date: Wed Feb 24 21:51:15 2021 -0800 %Result = type opaque %Range = type { i64, i64, i64 } +%Tuple = type opaque %Array = type opaque +%Callable = type opaque %Qubit = type opaque -%Tuple = type opaque %String = type opaque @ResultZero = external global %Result* @@ -17,476 +14,280 @@ @PauliY = constant i2 -1 @PauliZ = constant i2 -2 @EmptyRange = internal constant %Range { i64 0, i64 1, i64 -1 } +@Microsoft__Quantum__Testing__Tracer__Delay = constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Testing__Tracer__Delay__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null] +@Microsoft__Quantum__Intrinsic__X = constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__X__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__X__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__X__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__X__ctladj__wrapper] +@PartialApplication__1 = constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__1__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null] +@MemoryManagement__1 = constant [2 x void (%Tuple*, i64)*] [void (%Tuple*, i64)* @MemoryManagement__1__RefCount, void (%Tuple*, i64)* @MemoryManagement__1__AliasCount] +@Microsoft__Quantum__Intrinsic__Y = constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__Y__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__Y__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__Y__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__Y__ctladj__wrapper] +@PartialApplication__2 = constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__2__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null] +@Microsoft__Quantum__Intrinsic__Z = constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__Z__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__Z__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__Z__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__Z__ctladj__wrapper] +@PartialApplication__3 = constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__3__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null] +@Microsoft__Quantum__Intrinsic__S = constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__S__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__S__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__S__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__S__ctladj__wrapper] +@PartialApplication__4 = constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__4__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null] +@MemoryManagement__2 = constant [2 x void (%Tuple*, i64)*] [void (%Tuple*, i64)* @MemoryManagement__2__RefCount, void (%Tuple*, i64)* @MemoryManagement__2__AliasCount] + +define void @Microsoft__Quantum__Intrinsic__ApplyConditionallyIntrinsic__body(%Array* %measurementResults, %Array* %resultsValues, %Callable* %onEqualOp, %Callable* %onNonEqualOp) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i64 1) + call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i64 1) + call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %onEqualOp, i64 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp, i64 1) + call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %onNonEqualOp, i64 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp, i64 1) + call void @__quantum__qis__apply_conditionally(%Array* %measurementResults, %Array* %resultsValues, %Callable* %onEqualOp, %Callable* %onNonEqualOp) + call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i64 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %onEqualOp, i64 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %onNonEqualOp, i64 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp, i64 -1) + ret void +} + +declare void @__quantum__rt__array_update_alias_count(%Array*, i64) + +declare void @__quantum__rt__callable_memory_management(i32, %Callable*, i64) + +declare void @__quantum__rt__callable_update_alias_count(%Callable*, i64) -define void @Microsoft__Quantum__Testing__Tracer__Fixup__body(%Array* %qs) { +declare void @__quantum__qis__apply_conditionally(%Array*, %Array*, %Callable*, %Callable*) + +define void @Microsoft__Quantum__Intrinsic__ApplyIfElseIntrinsic__body(%Result* %measurementResult, %Callable* %onResultZeroOp, %Callable* %onResultOneOp) { entry: - call void @__quantum__rt__array_update_alias_count(%Array* %qs, i64 1) - %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %qs) - %1 = sub i64 %0, 1 + call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %onResultZeroOp, i64 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp, i64 1) + call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %onResultOneOp, i64 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp, i64 1) + %0 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %1 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %0, i64 0) + %2 = bitcast i8* %1 to %Result** + store %Result* %measurementResult, %Result** %2 + %3 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %3, i64 0) + %5 = bitcast i8* %4 to %Result** + %6 = load %Result*, %Result** @ResultZero + store %Result* %6, %Result** %5 + call void @__quantum__qis__apply_conditionally(%Array* %0, %Array* %3, %Callable* %onResultZeroOp, %Callable* %onResultOneOp) + call void @__quantum__rt__result_update_reference_count(%Result* %measurementResult, i64 1) + call void @__quantum__rt__result_update_reference_count(%Result* %6, i64 1) + call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %onResultZeroOp, i64 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %onResultOneOp, i64 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp, i64 -1) br label %header__1 header__1: ; preds = %exiting__1, %entry - %i = phi i64 [ 0, %entry ], [ %5, %exiting__1 ] - %2 = icmp sle i64 %i, %1 - br i1 %2, label %body__1, label %exit__1 + %7 = phi i64 [ 0, %entry ], [ %12, %exiting__1 ] + %8 = icmp sle i64 %7, 0 + br i1 %8, label %body__1, label %exit__1 body__1: ; preds = %header__1 - %3 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 %i) - %4 = bitcast i8* %3 to %Qubit** - %qb = load %Qubit*, %Qubit** %4 - call void @__quantum__qis__single_qubit_op(i64 0, i64 1, %Qubit* %qb) + %9 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %0, i64 %7) + %10 = bitcast i8* %9 to %Result** + %11 = load %Result*, %Result** %10 + call void @__quantum__rt__result_update_reference_count(%Result* %11, i64 -1) br label %exiting__1 exiting__1: ; preds = %body__1 - %5 = add i64 %i, 1 + %12 = add i64 %7, 1 br label %header__1 exit__1: ; preds = %header__1 - call void @__quantum__rt__array_update_alias_count(%Array* %qs, i64 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %0, i64 -1) + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %13 = phi i64 [ 0, %exit__1 ], [ %18, %exiting__2 ] + %14 = icmp sle i64 %13, 0 + br i1 %14, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %15 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %3, i64 %13) + %16 = bitcast i8* %15 to %Result** + %17 = load %Result*, %Result** %16 + call void @__quantum__rt__result_update_reference_count(%Result* %17, i64 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %18 = add i64 %13, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_reference_count(%Array* %3, i64 -1) ret void } -declare void @__quantum__rt__array_update_alias_count(%Array*, i64) - -declare i64 @__quantum__rt__array_get_size_1d(%Array*) +declare %Array* @__quantum__rt__array_create_1d(i32, i64) declare i8* @__quantum__rt__array_get_element_ptr_1d(%Array*, i64) -declare void @__quantum__qis__single_qubit_op(i64, i64, %Qubit*) +declare void @__quantum__rt__result_update_reference_count(%Result*, i64) -define void @Microsoft__Quantum__Testing__Tracer__TestCoreIntrinsics__body() { +declare void @__quantum__rt__array_update_reference_count(%Array*, i64) + +define void @Microsoft__Quantum__Intrinsic__CNOT__body(%Qubit* %control, %Qubit* %target) { entry: - %qs = call %Array* @__quantum__rt__qubit_allocate_array(i64 3) - call void @__quantum__rt__array_update_alias_count(%Array* %qs, i64 1) - %0 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 0) - %1 = bitcast i8* %0 to %Qubit** - %qb = load %Qubit*, %Qubit** %1 - call void @__quantum__qis__single_qubit_op(i64 0, i64 1, %Qubit* %qb) - %2 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 0) - %3 = bitcast i8* %2 to %Qubit** - %qb__1 = load %Qubit*, %Qubit** %3 - call void @__quantum__qis__single_qubit_op(i64 3, i64 1, %Qubit* %qb__1) - %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 1) - %5 = bitcast i8* %4 to %Qubit** - %qb__2 = load %Qubit*, %Qubit** %5 - call void @__quantum__qis__single_qubit_op(i64 6, i64 1, %Qubit* %qb__2) - %6 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 1) - %7 = bitcast i8* %6 to %Qubit** - %qb__3 = load %Qubit*, %Qubit** %7 - call void @__quantum__qis__single_qubit_op(i64 9, i64 1, %Qubit* %qb__3) - %8 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 1) - %9 = bitcast i8* %8 to %Qubit** - %10 = load %Qubit*, %Qubit** %9 - %11 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 2) - %12 = bitcast i8* %11 to %Qubit** - %13 = load %Qubit*, %Qubit** %12 - call void @Microsoft__Quantum__Intrinsic__CNOT__body(%Qubit* %10, %Qubit* %13) - %14 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 0) - %15 = bitcast i8* %14 to %Qubit** - %qb__4 = load %Qubit*, %Qubit** %15 - call void @__quantum__qis__single_qubit_op(i64 19, i64 1, %Qubit* %qb__4) - %16 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 1) - %17 = bitcast i8* %16 to %Qubit** - %qb__5 = load %Qubit*, %Qubit** %17 - call void @__quantum__qis__single_qubit_op(i64 21, i64 1, %Qubit* %qb__5) - %18 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 2) - %19 = bitcast i8* %18 to %Qubit** - %qb__6 = load %Qubit*, %Qubit** %19 - call void @__quantum__qis__single_qubit_op(i64 23, i64 1, %Qubit* %qb__6) - %20 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 1) - %21 = bitcast i8* %20 to %Qubit** - %qb__7 = load %Qubit*, %Qubit** %21 - call void @__quantum__qis__single_qubit_op(i64 15, i64 1, %Qubit* %qb__7) - %22 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 2) - %23 = bitcast i8* %22 to %Qubit** - %qb__9 = load %Qubit*, %Qubit** %23 - call void @__quantum__qis__single_qubit_op(i64 11, i64 1, %Qubit* %qb__9) - call void @__quantum__qis__inject_barrier(i64 42, i64 0) - %24 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 0) - %25 = bitcast i8* %24 to %Qubit** - %qb__11 = load %Qubit*, %Qubit** %25 - call void @__quantum__qis__single_qubit_op(i64 0, i64 1, %Qubit* %qb__11) - %26 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 0) - %27 = bitcast i8* %26 to %Qubit** - %qb__13 = load %Qubit*, %Qubit** %27 - call void @__quantum__qis__single_qubit_op(i64 3, i64 1, %Qubit* %qb__13) - %28 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 1) - %29 = bitcast i8* %28 to %Qubit** - %qb__15 = load %Qubit*, %Qubit** %29 - call void @__quantum__qis__single_qubit_op(i64 6, i64 1, %Qubit* %qb__15) - %30 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 1) - %31 = bitcast i8* %30 to %Qubit** - %qb__17 = load %Qubit*, %Qubit** %31 - call void @__quantum__qis__single_qubit_op(i64 9, i64 1, %Qubit* %qb__17) - %32 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 1) - %33 = bitcast i8* %32 to %Qubit** - %34 = load %Qubit*, %Qubit** %33 - %35 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 2) - %36 = bitcast i8* %35 to %Qubit** - %37 = load %Qubit*, %Qubit** %36 - call void @Microsoft__Quantum__Intrinsic__CNOT__adj(%Qubit* %34, %Qubit* %37) - %38 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 0) - %39 = bitcast i8* %38 to %Qubit** - %qb__19 = load %Qubit*, %Qubit** %39 - call void @__quantum__qis__single_qubit_op(i64 19, i64 1, %Qubit* %qb__19) - %40 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 1) - %41 = bitcast i8* %40 to %Qubit** - %qb__20 = load %Qubit*, %Qubit** %41 - call void @__quantum__qis__single_qubit_op(i64 21, i64 1, %Qubit* %qb__20) - %42 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 2) - %43 = bitcast i8* %42 to %Qubit** - %qb__21 = load %Qubit*, %Qubit** %43 - call void @__quantum__qis__single_qubit_op(i64 24, i64 1, %Qubit* %qb__21) - %44 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 1) - %45 = bitcast i8* %44 to %Qubit** - %qb__22 = load %Qubit*, %Qubit** %45 - call void @__quantum__qis__single_qubit_op(i64 15, i64 1, %Qubit* %qb__22) - %46 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 2) - %47 = bitcast i8* %46 to %Qubit** - %qb__24 = load %Qubit*, %Qubit** %47 - call void @__quantum__qis__single_qubit_op(i64 11, i64 1, %Qubit* %qb__24) - %c = call %Qubit* @__quantum__rt__qubit_allocate() %ctls = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) - %48 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ctls, i64 0) - %49 = bitcast i8* %48 to %Qubit** - store %Qubit* %c, %Qubit** %49 + %0 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ctls, i64 0) + %1 = bitcast i8* %0 to %Qubit** + store %Qubit* %control, %Qubit** %1 call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 1) - %50 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 0) - %51 = bitcast i8* %50 to %Qubit** - %qb__26 = load %Qubit*, %Qubit** %51 br i1 true, label %then0__1, label %else__1 then0__1: ; preds = %entry - call void @__quantum__qis__single_qubit_op_ctl(i64 1, i64 1, %Array* %ctls, %Qubit* %qb__26) + call void @__quantum__qis__single_qubit_op_ctl(i64 1, i64 1, %Array* %ctls, %Qubit* %target) br label %continue__1 else__1: ; preds = %entry - call void @__quantum__qis__single_qubit_op_ctl(i64 2, i64 1, %Array* %ctls, %Qubit* %qb__26) + call void @__quantum__qis__single_qubit_op_ctl(i64 2, i64 1, %Array* %ctls, %Qubit* %target) br label %continue__1 continue__1: ; preds = %else__1, %then0__1 call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 -1) call void @__quantum__rt__array_update_reference_count(%Array* %ctls, i64 -1) - %ctls__1 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) - %52 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ctls__1, i64 0) - %53 = bitcast i8* %52 to %Qubit** - store %Qubit* %c, %Qubit** %53 + ret void +} + +declare void @__quantum__qis__single_qubit_op_ctl(i64, i64, %Array*, %Qubit*) + +define void @Microsoft__Quantum__Intrinsic__CNOT__adj(%Qubit* %control, %Qubit* %target) { +entry: + call void @Microsoft__Quantum__Intrinsic__CNOT__body(%Qubit* %control, %Qubit* %target) + ret void +} + +define void @Microsoft__Quantum__Intrinsic__CNOT__ctl(%Array* %ctls, { %Qubit*, %Qubit* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 1) + %1 = getelementptr inbounds { %Qubit*, %Qubit* }, { %Qubit*, %Qubit* }* %0, i32 0, i32 0 + %control = load %Qubit*, %Qubit** %1 + %2 = getelementptr inbounds { %Qubit*, %Qubit* }, { %Qubit*, %Qubit* }* %0, i32 0, i32 1 + %target = load %Qubit*, %Qubit** %2 + %3 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %3, i64 0) + %5 = bitcast i8* %4 to %Qubit** + store %Qubit* %control, %Qubit** %5 + %ctls__1 = call %Array* @__quantum__rt__array_concatenate(%Array* %ctls, %Array* %3) call void @__quantum__rt__array_update_alias_count(%Array* %ctls__1, i64 1) - %54 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 0) - %55 = bitcast i8* %54 to %Qubit** - %qb__27 = load %Qubit*, %Qubit** %55 - br i1 true, label %then0__2, label %else__2 + %6 = call i64 @__quantum__rt__array_get_size_1d(%Array* %ctls__1) + %7 = icmp eq i64 %6, 1 + br i1 %7, label %then0__1, label %else__1 -then0__2: ; preds = %continue__1 - call void @__quantum__qis__single_qubit_op_ctl(i64 4, i64 1, %Array* %ctls__1, %Qubit* %qb__27) - br label %continue__2 +then0__1: ; preds = %entry + call void @__quantum__qis__single_qubit_op_ctl(i64 1, i64 1, %Array* %ctls__1, %Qubit* %target) + br label %continue__1 -else__2: ; preds = %continue__1 - call void @__quantum__qis__single_qubit_op_ctl(i64 5, i64 1, %Array* %ctls__1, %Qubit* %qb__27) - br label %continue__2 +else__1: ; preds = %entry + call void @__quantum__qis__single_qubit_op_ctl(i64 2, i64 1, %Array* %ctls__1, %Qubit* %target) + br label %continue__1 -continue__2: ; preds = %else__2, %then0__2 +continue__1: ; preds = %else__1, %then0__1 call void @__quantum__rt__array_update_alias_count(%Array* %ctls__1, i64 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %3, i64 -1) call void @__quantum__rt__array_update_reference_count(%Array* %ctls__1, i64 -1) - %ctls__2 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) - %56 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ctls__2, i64 0) - %57 = bitcast i8* %56 to %Qubit** - store %Qubit* %c, %Qubit** %57 - call void @__quantum__rt__array_update_alias_count(%Array* %ctls__2, i64 1) - %58 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 1) - %59 = bitcast i8* %58 to %Qubit** - %qb__28 = load %Qubit*, %Qubit** %59 - br i1 true, label %then0__3, label %else__3 + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 -1) + ret void +} -then0__3: ; preds = %continue__2 - call void @__quantum__qis__single_qubit_op_ctl(i64 7, i64 1, %Array* %ctls__2, %Qubit* %qb__28) - br label %continue__3 +declare %Array* @__quantum__rt__array_concatenate(%Array*, %Array*) -else__3: ; preds = %continue__2 - call void @__quantum__qis__single_qubit_op_ctl(i64 8, i64 1, %Array* %ctls__2, %Qubit* %qb__28) - br label %continue__3 +declare i64 @__quantum__rt__array_get_size_1d(%Array*) -continue__3: ; preds = %else__3, %then0__3 - call void @__quantum__rt__array_update_alias_count(%Array* %ctls__2, i64 -1) - call void @__quantum__rt__array_update_reference_count(%Array* %ctls__2, i64 -1) - %ctls__3 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) - %60 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ctls__3, i64 0) - %61 = bitcast i8* %60 to %Qubit** - store %Qubit* %c, %Qubit** %61 - call void @__quantum__rt__array_update_alias_count(%Array* %ctls__3, i64 1) - %62 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 1) - %63 = bitcast i8* %62 to %Qubit** - %qb__29 = load %Qubit*, %Qubit** %63 - call void @__quantum__qis__single_qubit_op_ctl(i64 10, i64 1, %Array* %ctls__3, %Qubit* %qb__29) - call void @__quantum__rt__array_update_alias_count(%Array* %ctls__3, i64 -1) - call void @__quantum__rt__array_update_reference_count(%Array* %ctls__3, i64 -1) - %ctls__4 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) - %64 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ctls__4, i64 0) - %65 = bitcast i8* %64 to %Qubit** - store %Qubit* %c, %Qubit** %65 - call void @__quantum__rt__array_update_alias_count(%Array* %ctls__4, i64 1) - %66 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 0) - %67 = bitcast i8* %66 to %Qubit** - %qb__30 = load %Qubit*, %Qubit** %67 - call void @__quantum__qis__single_qubit_op_ctl(i64 20, i64 1, %Array* %ctls__4, %Qubit* %qb__30) - call void @__quantum__rt__array_update_alias_count(%Array* %ctls__4, i64 -1) - call void @__quantum__rt__array_update_reference_count(%Array* %ctls__4, i64 -1) - %ctls__5 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) - %68 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ctls__5, i64 0) - %69 = bitcast i8* %68 to %Qubit** - store %Qubit* %c, %Qubit** %69 - call void @__quantum__rt__array_update_alias_count(%Array* %ctls__5, i64 1) - %70 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 1) - %71 = bitcast i8* %70 to %Qubit** - %qb__31 = load %Qubit*, %Qubit** %71 - call void @__quantum__qis__single_qubit_op_ctl(i64 22, i64 1, %Array* %ctls__5, %Qubit* %qb__31) - call void @__quantum__rt__array_update_alias_count(%Array* %ctls__5, i64 -1) - call void @__quantum__rt__array_update_reference_count(%Array* %ctls__5, i64 -1) - %ctls__6 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) - %72 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ctls__6, i64 0) - %73 = bitcast i8* %72 to %Qubit** - store %Qubit* %c, %Qubit** %73 - call void @__quantum__rt__array_update_alias_count(%Array* %ctls__6, i64 1) - %74 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 2) - %75 = bitcast i8* %74 to %Qubit** - %qb__32 = load %Qubit*, %Qubit** %75 - call void @__quantum__qis__single_qubit_op_ctl(i64 25, i64 1, %Array* %ctls__6, %Qubit* %qb__32) - call void @__quantum__rt__array_update_alias_count(%Array* %ctls__6, i64 -1) - call void @__quantum__rt__array_update_reference_count(%Array* %ctls__6, i64 -1) - %ctls__7 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) - %76 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ctls__7, i64 0) - %77 = bitcast i8* %76 to %Qubit** - store %Qubit* %c, %Qubit** %77 - call void @__quantum__rt__array_update_alias_count(%Array* %ctls__7, i64 1) - %78 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 1) - %79 = bitcast i8* %78 to %Qubit** - %qb__33 = load %Qubit*, %Qubit** %79 - call void @__quantum__rt__array_update_alias_count(%Array* %ctls__7, i64 1) - call void @__quantum__qis__single_qubit_op_ctl(i64 16, i64 1, %Array* %ctls__7, %Qubit* %qb__33) - call void @__quantum__rt__array_update_alias_count(%Array* %ctls__7, i64 -1) - call void @__quantum__rt__array_update_alias_count(%Array* %ctls__7, i64 -1) - call void @__quantum__rt__array_update_reference_count(%Array* %ctls__7, i64 -1) - %ctls__9 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) - %80 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ctls__9, i64 0) - %81 = bitcast i8* %80 to %Qubit** - store %Qubit* %c, %Qubit** %81 - call void @__quantum__rt__array_update_alias_count(%Array* %ctls__9, i64 1) - %82 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 2) - %83 = bitcast i8* %82 to %Qubit** - %qb__35 = load %Qubit*, %Qubit** %83 - call void @__quantum__rt__array_update_alias_count(%Array* %ctls__9, i64 1) - call void @__quantum__qis__single_qubit_op_ctl(i64 12, i64 1, %Array* %ctls__9, %Qubit* %qb__35) - call void @__quantum__rt__array_update_alias_count(%Array* %ctls__9, i64 -1) - call void @__quantum__rt__array_update_alias_count(%Array* %ctls__9, i64 -1) - call void @__quantum__rt__array_update_reference_count(%Array* %ctls__9, i64 -1) - call void @__quantum__rt__qubit_release(%Qubit* %c) - %cc = call %Array* @__quantum__rt__qubit_allocate_array(i64 2) - call void @__quantum__rt__array_update_alias_count(%Array* %cc, i64 1) - call void @__quantum__rt__array_update_alias_count(%Array* %cc, i64 1) - %84 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 0) - %85 = bitcast i8* %84 to %Qubit** - %qb__37 = load %Qubit*, %Qubit** %85 - %86 = call i64 @__quantum__rt__array_get_size_1d(%Array* %cc) - %87 = icmp eq i64 %86, 1 - br i1 %87, label %then0__4, label %else__4 - -then0__4: ; preds = %continue__3 - call void @__quantum__qis__single_qubit_op_ctl(i64 1, i64 1, %Array* %cc, %Qubit* %qb__37) - br label %continue__4 - -else__4: ; preds = %continue__3 - call void @__quantum__qis__single_qubit_op_ctl(i64 2, i64 1, %Array* %cc, %Qubit* %qb__37) - br label %continue__4 - -continue__4: ; preds = %else__4, %then0__4 - call void @__quantum__rt__array_update_alias_count(%Array* %cc, i64 -1) - call void @__quantum__rt__array_update_alias_count(%Array* %cc, i64 1) - %88 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 0) - %89 = bitcast i8* %88 to %Qubit** - %qb__38 = load %Qubit*, %Qubit** %89 - %90 = icmp eq i64 %86, 1 - br i1 %90, label %then0__5, label %else__5 - -then0__5: ; preds = %continue__4 - call void @__quantum__qis__single_qubit_op_ctl(i64 4, i64 1, %Array* %cc, %Qubit* %qb__38) - br label %continue__5 - -else__5: ; preds = %continue__4 - call void @__quantum__qis__single_qubit_op_ctl(i64 5, i64 1, %Array* %cc, %Qubit* %qb__38) - br label %continue__5 - -continue__5: ; preds = %else__5, %then0__5 - call void @__quantum__rt__array_update_alias_count(%Array* %cc, i64 -1) - call void @__quantum__rt__array_update_alias_count(%Array* %cc, i64 1) - %91 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 1) - %92 = bitcast i8* %91 to %Qubit** - %qb__39 = load %Qubit*, %Qubit** %92 - %93 = icmp eq i64 %86, 1 - br i1 %93, label %then0__6, label %else__6 +define void @Microsoft__Quantum__Intrinsic__CNOT__ctladj(%Array* %__controlQubits__, { %Qubit*, %Qubit* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i64 1) + %1 = getelementptr inbounds { %Qubit*, %Qubit* }, { %Qubit*, %Qubit* }* %0, i32 0, i32 0 + %control = load %Qubit*, %Qubit** %1 + %2 = getelementptr inbounds { %Qubit*, %Qubit* }, { %Qubit*, %Qubit* }* %0, i32 0, i32 1 + %target = load %Qubit*, %Qubit** %2 + %3 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %4 = bitcast %Tuple* %3 to { %Qubit*, %Qubit* }* + %5 = getelementptr inbounds { %Qubit*, %Qubit* }, { %Qubit*, %Qubit* }* %4, i32 0, i32 0 + %6 = getelementptr inbounds { %Qubit*, %Qubit* }, { %Qubit*, %Qubit* }* %4, i32 0, i32 1 + store %Qubit* %control, %Qubit** %5 + store %Qubit* %target, %Qubit** %6 + call void @Microsoft__Quantum__Intrinsic__CNOT__ctl(%Array* %__controlQubits__, { %Qubit*, %Qubit* }* %4) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i64 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i64 -1) + ret void +} -then0__6: ; preds = %continue__5 - call void @__quantum__qis__single_qubit_op_ctl(i64 7, i64 1, %Array* %cc, %Qubit* %qb__39) - br label %continue__6 +declare %Tuple* @__quantum__rt__tuple_create(i64) -else__6: ; preds = %continue__5 - call void @__quantum__qis__single_qubit_op_ctl(i64 8, i64 1, %Array* %cc, %Qubit* %qb__39) - br label %continue__6 +declare void @__quantum__rt__tuple_update_reference_count(%Tuple*, i64) -continue__6: ; preds = %else__6, %then0__6 - call void @__quantum__rt__array_update_alias_count(%Array* %cc, i64 -1) - call void @__quantum__rt__array_update_alias_count(%Array* %cc, i64 1) - %94 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 1) - %95 = bitcast i8* %94 to %Qubit** - %qb__40 = load %Qubit*, %Qubit** %95 - call void @__quantum__qis__single_qubit_op_ctl(i64 10, i64 1, %Array* %cc, %Qubit* %qb__40) - call void @__quantum__rt__array_update_alias_count(%Array* %cc, i64 -1) - call void @__quantum__rt__array_update_alias_count(%Array* %cc, i64 1) - %96 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 0) - %97 = bitcast i8* %96 to %Qubit** - %qb__41 = load %Qubit*, %Qubit** %97 - call void @__quantum__qis__single_qubit_op_ctl(i64 20, i64 1, %Array* %cc, %Qubit* %qb__41) - call void @__quantum__rt__array_update_alias_count(%Array* %cc, i64 -1) - call void @__quantum__rt__array_update_alias_count(%Array* %cc, i64 1) - %98 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 1) - %99 = bitcast i8* %98 to %Qubit** - %qb__42 = load %Qubit*, %Qubit** %99 - call void @__quantum__qis__single_qubit_op_ctl(i64 22, i64 1, %Array* %cc, %Qubit* %qb__42) - call void @__quantum__rt__array_update_alias_count(%Array* %cc, i64 -1) - call void @__quantum__rt__array_update_alias_count(%Array* %cc, i64 1) - %100 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 2) - %101 = bitcast i8* %100 to %Qubit** - %qb__43 = load %Qubit*, %Qubit** %101 - call void @__quantum__qis__single_qubit_op_ctl(i64 25, i64 1, %Array* %cc, %Qubit* %qb__43) - call void @__quantum__rt__array_update_alias_count(%Array* %cc, i64 -1) - call void @__quantum__rt__array_update_alias_count(%Array* %cc, i64 1) - %102 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 1) - %103 = bitcast i8* %102 to %Qubit** - %qb__44 = load %Qubit*, %Qubit** %103 - call void @__quantum__rt__array_update_alias_count(%Array* %cc, i64 1) - call void @__quantum__qis__single_qubit_op_ctl(i64 16, i64 1, %Array* %cc, %Qubit* %qb__44) - call void @__quantum__rt__array_update_alias_count(%Array* %cc, i64 -1) - call void @__quantum__rt__array_update_alias_count(%Array* %cc, i64 -1) - call void @__quantum__rt__array_update_alias_count(%Array* %cc, i64 1) - %104 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 2) - %105 = bitcast i8* %104 to %Qubit** - %qb__46 = load %Qubit*, %Qubit** %105 - call void @__quantum__rt__array_update_alias_count(%Array* %cc, i64 1) - call void @__quantum__qis__single_qubit_op_ctl(i64 12, i64 1, %Array* %cc, %Qubit* %qb__46) - call void @__quantum__rt__array_update_alias_count(%Array* %cc, i64 -1) - call void @__quantum__rt__array_update_alias_count(%Array* %cc, i64 -1) - call void @__quantum__rt__qubit_release_array(%Array* %cc) - call void @__quantum__rt__array_update_alias_count(%Array* %cc, i64 -1) - call void @__quantum__rt__array_update_reference_count(%Array* %cc, i64 -1) - call void @__quantum__rt__qubit_release_array(%Array* %qs) - call void @__quantum__rt__array_update_alias_count(%Array* %qs, i64 -1) - call void @__quantum__rt__array_update_reference_count(%Array* %qs, i64 -1) +define void @Microsoft__Quantum__Intrinsic__H__body(%Qubit* %qb) { +entry: + call void @__quantum__qis__single_qubit_op(i64 9, i64 1, %Qubit* %qb) ret void } -declare %Qubit* @__quantum__rt__qubit_allocate() +declare void @__quantum__qis__single_qubit_op(i64, i64, %Qubit*) -declare %Array* @__quantum__rt__qubit_allocate_array(i64) +define void @Microsoft__Quantum__Intrinsic__H__adj(%Qubit* %qb) { +entry: + call void @__quantum__qis__single_qubit_op(i64 9, i64 1, %Qubit* %qb) + ret void +} -define void @Microsoft__Quantum__Intrinsic__CNOT__body(%Qubit* %control, %Qubit* %target) { +define void @Microsoft__Quantum__Intrinsic__H__ctl(%Array* %ctls, %Qubit* %qb) { entry: - %ctls = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) - %0 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ctls, i64 0) - %1 = bitcast i8* %0 to %Qubit** - store %Qubit* %control, %Qubit** %1 call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 1) - br i1 true, label %then0__1, label %else__1 - -then0__1: ; preds = %entry - call void @__quantum__qis__single_qubit_op_ctl(i64 1, i64 1, %Array* %ctls, %Qubit* %target) - br label %continue__1 - -else__1: ; preds = %entry - call void @__quantum__qis__single_qubit_op_ctl(i64 2, i64 1, %Array* %ctls, %Qubit* %target) - br label %continue__1 - -continue__1: ; preds = %else__1, %then0__1 + call void @__quantum__qis__single_qubit_op_ctl(i64 10, i64 1, %Array* %ctls, %Qubit* %qb) call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 -1) - call void @__quantum__rt__array_update_reference_count(%Array* %ctls, i64 -1) ret void } -declare void @__quantum__qis__inject_barrier(i64, i64) - -define void @Microsoft__Quantum__Intrinsic__CNOT__adj(%Qubit* %control, %Qubit* %target) { +define void @Microsoft__Quantum__Intrinsic__H__ctladj(%Array* %__controlQubits__, %Qubit* %qb) { entry: - call void @Microsoft__Quantum__Intrinsic__CNOT__body(%Qubit* %control, %Qubit* %target) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i64 1) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i64 1) + call void @__quantum__qis__single_qubit_op_ctl(i64 10, i64 1, %Array* %__controlQubits__, %Qubit* %qb) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i64 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i64 -1) ret void } -declare %Array* @__quantum__rt__array_create_1d(i32, i64) - -declare void @__quantum__qis__single_qubit_op_ctl(i64, i64, %Array*, %Qubit*) - -declare void @__quantum__rt__array_update_reference_count(%Array*, i64) - -declare void @__quantum__rt__qubit_release(%Qubit*) +define %Result* @Microsoft__Quantum__Intrinsic__M__body(%Qubit* %qb) { +entry: + %0 = call %Result* @__quantum__qis__single_qubit_measure(i64 100, i64 1, %Qubit* %qb) + ret %Result* %0 +} -declare void @__quantum__rt__qubit_release_array(%Array*) +declare %Result* @__quantum__qis__single_qubit_measure(i64, i64, %Qubit*) -define void @Microsoft__Quantum__Testing__Tracer__TestMeasurements__body(i1 %compare) { +define %Result* @Microsoft__Quantum__Intrinsic__Measure__body(%Array* %paulis, %Array* %qubits) { entry: - %qs = call %Array* @__quantum__rt__qubit_allocate_array(i64 3) - call void @__quantum__rt__array_update_alias_count(%Array* %qs, i64 1) - %0 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 0) - %1 = bitcast i8* %0 to %Qubit** - %qb = load %Qubit*, %Qubit** %1 - %r0 = call %Result* @__quantum__qis__single_qubit_measure(i64 100, i64 1, %Qubit* %qb) - %qs12 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 2) - %2 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs12, i64 0) - %3 = bitcast i8* %2 to %Qubit** - %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs12, i64 1) - %5 = bitcast i8* %4 to %Qubit** - %6 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 1) - %7 = bitcast i8* %6 to %Qubit** - %8 = load %Qubit*, %Qubit** %7 - %9 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 2) - %10 = bitcast i8* %9 to %Qubit** - %11 = load %Qubit*, %Qubit** %10 - store %Qubit* %8, %Qubit** %3 - store %Qubit* %11, %Qubit** %5 - call void @__quantum__rt__array_update_alias_count(%Array* %qs12, i64 1) - %paulis = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 2) - %12 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 0) - %13 = bitcast i8* %12 to i2* - %14 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 1) - %15 = bitcast i8* %14 to i2* - %16 = load i2, i2* @PauliY - %17 = load i2, i2* @PauliX - store i2 %16, i2* %13 - store i2 %17, i2* %15 call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i64 1) - call void @__quantum__rt__array_update_alias_count(%Array* %qs12, i64 1) - %18 = load %Result*, %Result** @ResultOne + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i64 1) + %0 = load %Result*, %Result** @ResultOne %res = alloca %Result* - store %Result* %18, %Result** %res - call void @__quantum__rt__result_update_reference_count(%Result* %18, i64 1) + store %Result* %0, %Result** %res + call void @__quantum__rt__result_update_reference_count(%Result* %0, i64 1) %haveY = alloca i1 store i1 false, i1* %haveY + %1 = call i64 @__quantum__rt__array_get_size_1d(%Array* %paulis) + %2 = sub i64 %1, 1 br label %header__1 header__1: ; preds = %exiting__1, %entry - %i = phi i64 [ 0, %entry ], [ %31, %exiting__1 ] - %19 = icmp sle i64 %i, 1 - br i1 %19, label %body__1, label %exit__1 + %i = phi i64 [ 0, %entry ], [ %15, %exiting__1 ] + %3 = icmp sle i64 %i, %2 + br i1 %3, label %body__1, label %exit__1 body__1: ; preds = %header__1 - %20 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 %i) - %21 = bitcast i8* %20 to i2* - %22 = load i2, i2* %21 - %23 = load i2, i2* @PauliY - %24 = icmp eq i2 %22, %23 - %25 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 %i) - %26 = bitcast i8* %25 to i2* - %27 = load i2, i2* %26 - %28 = load i2, i2* @PauliI - %29 = icmp eq i2 %27, %28 - %30 = or i1 %24, %29 - br i1 %30, label %then0__1, label %continue__1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 %i) + %5 = bitcast i8* %4 to i2* + %6 = load i2, i2* %5 + %7 = load i2, i2* @PauliY + %8 = icmp eq i2 %6, %7 + %9 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 %i) + %10 = bitcast i8* %9 to i2* + %11 = load i2, i2* %10 + %12 = load i2, i2* @PauliI + %13 = icmp eq i2 %11, %12 + %14 = or i1 %8, %13 + br i1 %14, label %then0__1, label %continue__1 then0__1: ; preds = %body__1 store i1 true, i1* %haveY @@ -496,386 +297,45 @@ continue__1: ; preds = %then0__1, %body__1 br label %exiting__1 exiting__1: ; preds = %continue__1 - %31 = add i64 %i, 1 + %15 = add i64 %i, 1 br label %header__1 exit__1: ; preds = %header__1 - %32 = load i1, i1* %haveY - br i1 %32, label %then0__2, label %test1__1 + %16 = load i1, i1* %haveY + br i1 %16, label %then0__2, label %test1__1 then0__2: ; preds = %exit__1 - %33 = call %Result* @__quantum__qis__joint_measure(i64 106, i64 1, %Array* %qs12) - call void @__quantum__rt__result_update_reference_count(%Result* %33, i64 1) - store %Result* %33, %Result** %res - call void @__quantum__rt__result_update_reference_count(%Result* %33, i64 -1) - call void @__quantum__rt__result_update_reference_count(%Result* %18, i64 -1) + %17 = call %Result* @__quantum__qis__joint_measure(i64 106, i64 1, %Array* %qubits) + call void @__quantum__rt__result_update_reference_count(%Result* %17, i64 1) + store %Result* %17, %Result** %res + call void @__quantum__rt__result_update_reference_count(%Result* %17, i64 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %0, i64 -1) br label %continue__2 test1__1: ; preds = %exit__1 - br i1 false, label %then1__1, label %test2__1 + %18 = icmp sgt i64 %1, 2 + br i1 %18, label %then1__1, label %test2__1 then1__1: ; preds = %test1__1 - %34 = call %Result* @__quantum__qis__joint_measure(i64 107, i64 1, %Array* %qs12) - call void @__quantum__rt__result_update_reference_count(%Result* %34, i64 1) - %35 = load %Result*, %Result** %res - store %Result* %34, %Result** %res - call void @__quantum__rt__result_update_reference_count(%Result* %34, i64 -1) - call void @__quantum__rt__result_update_reference_count(%Result* %35, i64 -1) + %19 = call %Result* @__quantum__qis__joint_measure(i64 107, i64 1, %Array* %qubits) + call void @__quantum__rt__result_update_reference_count(%Result* %19, i64 1) + %20 = load %Result*, %Result** %res + store %Result* %19, %Result** %res + call void @__quantum__rt__result_update_reference_count(%Result* %19, i64 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %20, i64 -1) br label %continue__2 test2__1: ; preds = %test1__1 - br i1 false, label %then2__1, label %test3__1 + %21 = icmp eq i64 %1, 1 + br i1 %21, label %then2__1, label %test3__1 then2__1: ; preds = %test2__1 - %36 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 0) - %37 = bitcast i8* %36 to i2* - %38 = load i2, i2* %37 - %39 = load i2, i2* @PauliX - %40 = icmp eq i2 %38, %39 - br i1 %40, label %then0__3, label %else__1 - -then0__3: ; preds = %then2__1 - %41 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs12, i64 0) - %42 = bitcast i8* %41 to %Qubit** - %qb__2 = load %Qubit*, %Qubit** %42 - %43 = call %Result* @__quantum__qis__single_qubit_measure(i64 101, i64 1, %Qubit* %qb__2) - call void @__quantum__rt__result_update_reference_count(%Result* %43, i64 1) - %44 = load %Result*, %Result** %res - store %Result* %43, %Result** %res - call void @__quantum__rt__result_update_reference_count(%Result* %43, i64 -1) - call void @__quantum__rt__result_update_reference_count(%Result* %44, i64 -1) - br label %continue__3 - -else__1: ; preds = %then2__1 - %45 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs12, i64 0) - %46 = bitcast i8* %45 to %Qubit** - %qb__3 = load %Qubit*, %Qubit** %46 - %47 = call %Result* @__quantum__qis__single_qubit_measure(i64 100, i64 1, %Qubit* %qb__3) - call void @__quantum__rt__result_update_reference_count(%Result* %47, i64 1) - %48 = load %Result*, %Result** %res - store %Result* %47, %Result** %res - call void @__quantum__rt__result_update_reference_count(%Result* %47, i64 -1) - call void @__quantum__rt__result_update_reference_count(%Result* %48, i64 -1) - br label %continue__3 - -continue__3: ; preds = %else__1, %then0__3 - br label %continue__2 - -test3__1: ; preds = %test2__1 - %49 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 0) - %50 = bitcast i8* %49 to i2* - %51 = load i2, i2* %50 - %52 = load i2, i2* @PauliX - %53 = icmp eq i2 %51, %52 - %54 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 1) - %55 = bitcast i8* %54 to i2* - %56 = load i2, i2* %55 - %57 = load i2, i2* @PauliX - %58 = icmp eq i2 %56, %57 - %59 = and i1 %53, %58 - br i1 %59, label %then3__1, label %test4__1 - -then3__1: ; preds = %test3__1 - call void @__quantum__rt__array_update_alias_count(%Array* %qs12, i64 1) - %60 = call %Result* @__quantum__qis__joint_measure(i64 105, i64 1, %Array* %qs12) - call void @__quantum__rt__array_update_alias_count(%Array* %qs12, i64 -1) - call void @__quantum__rt__result_update_reference_count(%Result* %60, i64 1) - %61 = load %Result*, %Result** %res - store %Result* %60, %Result** %res - call void @__quantum__rt__result_update_reference_count(%Result* %60, i64 -1) - call void @__quantum__rt__result_update_reference_count(%Result* %61, i64 -1) - br label %continue__2 - -test4__1: ; preds = %test3__1 - %62 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 0) - %63 = bitcast i8* %62 to i2* - %64 = load i2, i2* %63 - %65 = load i2, i2* @PauliX - %66 = icmp eq i2 %64, %65 - %67 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 1) - %68 = bitcast i8* %67 to i2* - %69 = load i2, i2* %68 - %70 = load i2, i2* @PauliZ - %71 = icmp eq i2 %69, %70 - %72 = and i1 %66, %71 - br i1 %72, label %then4__1, label %test5__1 - -then4__1: ; preds = %test4__1 - call void @__quantum__rt__array_update_alias_count(%Array* %qs12, i64 1) - %73 = call %Result* @__quantum__qis__joint_measure(i64 103, i64 1, %Array* %qs12) - call void @__quantum__rt__array_update_alias_count(%Array* %qs12, i64 -1) - call void @__quantum__rt__result_update_reference_count(%Result* %73, i64 1) - %74 = load %Result*, %Result** %res - store %Result* %73, %Result** %res - call void @__quantum__rt__result_update_reference_count(%Result* %73, i64 -1) - call void @__quantum__rt__result_update_reference_count(%Result* %74, i64 -1) - br label %continue__2 - -test5__1: ; preds = %test4__1 - %75 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 0) - %76 = bitcast i8* %75 to i2* - %77 = load i2, i2* %76 - %78 = load i2, i2* @PauliZ - %79 = icmp eq i2 %77, %78 - %80 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 1) - %81 = bitcast i8* %80 to i2* - %82 = load i2, i2* %81 - %83 = load i2, i2* @PauliX - %84 = icmp eq i2 %82, %83 - %85 = and i1 %79, %84 - br i1 %85, label %then5__1, label %test6__1 - -then5__1: ; preds = %test5__1 - call void @__quantum__rt__array_update_alias_count(%Array* %qs12, i64 1) - %86 = call %Result* @__quantum__qis__joint_measure(i64 104, i64 1, %Array* %qs12) - call void @__quantum__rt__array_update_alias_count(%Array* %qs12, i64 -1) - call void @__quantum__rt__result_update_reference_count(%Result* %86, i64 1) - %87 = load %Result*, %Result** %res - store %Result* %86, %Result** %res - call void @__quantum__rt__result_update_reference_count(%Result* %86, i64 -1) - call void @__quantum__rt__result_update_reference_count(%Result* %87, i64 -1) - br label %continue__2 - -test6__1: ; preds = %test5__1 - %88 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 0) - %89 = bitcast i8* %88 to i2* - %90 = load i2, i2* %89 - %91 = load i2, i2* @PauliZ - %92 = icmp eq i2 %90, %91 - %93 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 1) - %94 = bitcast i8* %93 to i2* - %95 = load i2, i2* %94 - %96 = load i2, i2* @PauliZ - %97 = icmp eq i2 %95, %96 - %98 = and i1 %92, %97 - br i1 %98, label %then6__1, label %continue__2 - -then6__1: ; preds = %test6__1 - call void @__quantum__rt__array_update_alias_count(%Array* %qs12, i64 1) - %99 = call %Result* @__quantum__qis__joint_measure(i64 102, i64 1, %Array* %qs12) - call void @__quantum__rt__array_update_alias_count(%Array* %qs12, i64 -1) - call void @__quantum__rt__result_update_reference_count(%Result* %99, i64 1) - %100 = load %Result*, %Result** %res - store %Result* %99, %Result** %res - call void @__quantum__rt__result_update_reference_count(%Result* %99, i64 -1) - call void @__quantum__rt__result_update_reference_count(%Result* %100, i64 -1) - br label %continue__2 - -continue__2: ; preds = %then6__1, %test6__1, %then5__1, %then4__1, %then3__1, %continue__3, %then1__1, %then0__2 - %r12 = load %Result*, %Result** %res - call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i64 -1) - call void @__quantum__rt__array_update_alias_count(%Array* %qs12, i64 -1) - call void @__quantum__rt__array_update_reference_count(%Array* %paulis, i64 -1) - br i1 %compare, label %then0__4, label %continue__4 - -then0__4: ; preds = %continue__2 - %101 = load %Result*, %Result** @ResultZero - %102 = call i1 @__quantum__rt__result_equal(%Result* %r0, %Result* %101) - br i1 %102, label %then0__5, label %continue__5 - -then0__5: ; preds = %then0__4 - %103 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 1) - %104 = bitcast i8* %103 to %Qubit** - %qb__4 = load %Qubit*, %Qubit** %104 - call void @__quantum__qis__single_qubit_op(i64 0, i64 1, %Qubit* %qb__4) - br label %continue__5 - -continue__5: ; preds = %then0__5, %then0__4 - br label %continue__4 - -continue__4: ; preds = %continue__5, %continue__2 - call void @__quantum__rt__qubit_release_array(%Array* %qs) - call void @__quantum__rt__array_update_alias_count(%Array* %qs, i64 -1) - call void @__quantum__rt__array_update_alias_count(%Array* %qs12, i64 -1) - call void @__quantum__rt__array_update_reference_count(%Array* %qs, i64 -1) - call void @__quantum__rt__result_update_reference_count(%Result* %r0, i64 -1) - call void @__quantum__rt__array_update_reference_count(%Array* %qs12, i64 -1) - call void @__quantum__rt__result_update_reference_count(%Result* %r12, i64 -1) - ret void -} - -declare %Result* @__quantum__qis__single_qubit_measure(i64, i64, %Qubit*) - -declare void @__quantum__rt__result_update_reference_count(%Result*, i64) - -declare %Result* @__quantum__qis__joint_measure(i64, i64, %Array*) - -declare i1 @__quantum__rt__result_equal(%Result*, %Result*) - -define void @Microsoft__Quantum__Intrinsic__CNOT__ctl(%Array* %ctls, { %Qubit*, %Qubit* }* %0) { -entry: - call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 1) - %1 = getelementptr inbounds { %Qubit*, %Qubit* }, { %Qubit*, %Qubit* }* %0, i32 0, i32 0 - %control = load %Qubit*, %Qubit** %1 - %2 = getelementptr inbounds { %Qubit*, %Qubit* }, { %Qubit*, %Qubit* }* %0, i32 0, i32 1 - %target = load %Qubit*, %Qubit** %2 - %3 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) - %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %3, i64 0) - %5 = bitcast i8* %4 to %Qubit** - store %Qubit* %control, %Qubit** %5 - %ctls__1 = call %Array* @__quantum__rt__array_concatenate(%Array* %ctls, %Array* %3) - call void @__quantum__rt__array_update_alias_count(%Array* %ctls__1, i64 1) - %6 = call i64 @__quantum__rt__array_get_size_1d(%Array* %ctls__1) - %7 = icmp eq i64 %6, 1 - br i1 %7, label %then0__1, label %else__1 - -then0__1: ; preds = %entry - call void @__quantum__qis__single_qubit_op_ctl(i64 1, i64 1, %Array* %ctls__1, %Qubit* %target) - br label %continue__1 - -else__1: ; preds = %entry - call void @__quantum__qis__single_qubit_op_ctl(i64 2, i64 1, %Array* %ctls__1, %Qubit* %target) - br label %continue__1 - -continue__1: ; preds = %else__1, %then0__1 - call void @__quantum__rt__array_update_alias_count(%Array* %ctls__1, i64 -1) - call void @__quantum__rt__array_update_reference_count(%Array* %3, i64 -1) - call void @__quantum__rt__array_update_reference_count(%Array* %ctls__1, i64 -1) - call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 -1) - ret void -} - -declare %Array* @__quantum__rt__array_concatenate(%Array*, %Array*) - -define void @Microsoft__Quantum__Intrinsic__CNOT__ctladj(%Array* %__controlQubits__, { %Qubit*, %Qubit* }* %0) { -entry: - call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i64 1) - %1 = getelementptr inbounds { %Qubit*, %Qubit* }, { %Qubit*, %Qubit* }* %0, i32 0, i32 0 - %control = load %Qubit*, %Qubit** %1 - %2 = getelementptr inbounds { %Qubit*, %Qubit* }, { %Qubit*, %Qubit* }* %0, i32 0, i32 1 - %target = load %Qubit*, %Qubit** %2 - %3 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %4 = bitcast %Tuple* %3 to { %Qubit*, %Qubit* }* - %5 = getelementptr inbounds { %Qubit*, %Qubit* }, { %Qubit*, %Qubit* }* %4, i32 0, i32 0 - %6 = getelementptr inbounds { %Qubit*, %Qubit* }, { %Qubit*, %Qubit* }* %4, i32 0, i32 1 - store %Qubit* %control, %Qubit** %5 - store %Qubit* %target, %Qubit** %6 - call void @Microsoft__Quantum__Intrinsic__CNOT__ctl(%Array* %__controlQubits__, { %Qubit*, %Qubit* }* %4) - call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i64 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i64 -1) - ret void -} - -declare %Tuple* @__quantum__rt__tuple_create(i64) - -declare void @__quantum__rt__tuple_update_reference_count(%Tuple*, i64) - -define void @Microsoft__Quantum__Intrinsic__H__body(%Qubit* %qb) { -entry: - call void @__quantum__qis__single_qubit_op(i64 9, i64 1, %Qubit* %qb) - ret void -} - -define void @Microsoft__Quantum__Intrinsic__H__adj(%Qubit* %qb) { -entry: - call void @__quantum__qis__single_qubit_op(i64 9, i64 1, %Qubit* %qb) - ret void -} - -define void @Microsoft__Quantum__Intrinsic__H__ctl(%Array* %ctls, %Qubit* %qb) { -entry: - call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 1) - call void @__quantum__qis__single_qubit_op_ctl(i64 10, i64 1, %Array* %ctls, %Qubit* %qb) - call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 -1) - ret void -} - -define void @Microsoft__Quantum__Intrinsic__H__ctladj(%Array* %__controlQubits__, %Qubit* %qb) { -entry: - call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i64 1) - call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i64 1) - call void @__quantum__qis__single_qubit_op_ctl(i64 10, i64 1, %Array* %__controlQubits__, %Qubit* %qb) - call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i64 -1) - call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i64 -1) - ret void -} - -define %Result* @Microsoft__Quantum__Intrinsic__M__body(%Qubit* %qb) { -entry: - %0 = call %Result* @__quantum__qis__single_qubit_measure(i64 100, i64 1, %Qubit* %qb) - ret %Result* %0 -} - -define %Result* @Microsoft__Quantum__Intrinsic__Measure__body(%Array* %paulis, %Array* %qubits) { -entry: - call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i64 1) - call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i64 1) - %0 = load %Result*, %Result** @ResultOne - %res = alloca %Result* - store %Result* %0, %Result** %res - call void @__quantum__rt__result_update_reference_count(%Result* %0, i64 1) - %haveY = alloca i1 - store i1 false, i1* %haveY - %1 = call i64 @__quantum__rt__array_get_size_1d(%Array* %paulis) - %2 = sub i64 %1, 1 - br label %header__1 - -header__1: ; preds = %exiting__1, %entry - %i = phi i64 [ 0, %entry ], [ %15, %exiting__1 ] - %3 = icmp sle i64 %i, %2 - br i1 %3, label %body__1, label %exit__1 - -body__1: ; preds = %header__1 - %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 %i) - %5 = bitcast i8* %4 to i2* - %6 = load i2, i2* %5 - %7 = load i2, i2* @PauliY - %8 = icmp eq i2 %6, %7 - %9 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 %i) - %10 = bitcast i8* %9 to i2* - %11 = load i2, i2* %10 - %12 = load i2, i2* @PauliI - %13 = icmp eq i2 %11, %12 - %14 = or i1 %8, %13 - br i1 %14, label %then0__1, label %continue__1 - -then0__1: ; preds = %body__1 - store i1 true, i1* %haveY - br label %continue__1 - -continue__1: ; preds = %then0__1, %body__1 - br label %exiting__1 - -exiting__1: ; preds = %continue__1 - %15 = add i64 %i, 1 - br label %header__1 - -exit__1: ; preds = %header__1 - %16 = load i1, i1* %haveY - br i1 %16, label %then0__2, label %test1__1 - -then0__2: ; preds = %exit__1 - %17 = call %Result* @__quantum__qis__joint_measure(i64 106, i64 1, %Array* %qubits) - call void @__quantum__rt__result_update_reference_count(%Result* %17, i64 1) - store %Result* %17, %Result** %res - call void @__quantum__rt__result_update_reference_count(%Result* %17, i64 -1) - call void @__quantum__rt__result_update_reference_count(%Result* %0, i64 -1) - br label %continue__2 - -test1__1: ; preds = %exit__1 - %18 = icmp sgt i64 %1, 2 - br i1 %18, label %then1__1, label %test2__1 - -then1__1: ; preds = %test1__1 - %19 = call %Result* @__quantum__qis__joint_measure(i64 107, i64 1, %Array* %qubits) - call void @__quantum__rt__result_update_reference_count(%Result* %19, i64 1) - %20 = load %Result*, %Result** %res - store %Result* %19, %Result** %res - call void @__quantum__rt__result_update_reference_count(%Result* %19, i64 -1) - call void @__quantum__rt__result_update_reference_count(%Result* %20, i64 -1) - br label %continue__2 - -test2__1: ; preds = %test1__1 - %21 = icmp eq i64 %1, 1 - br i1 %21, label %then2__1, label %test3__1 - -then2__1: ; preds = %test2__1 - %22 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 0) - %23 = bitcast i8* %22 to i2* - %24 = load i2, i2* %23 - %25 = load i2, i2* @PauliX - %26 = icmp eq i2 %24, %25 - br i1 %26, label %then0__3, label %else__1 + %22 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 0) + %23 = bitcast i8* %22 to i2* + %24 = load i2, i2* %23 + %25 = load i2, i2* @PauliX + %26 = icmp eq i2 %24, %25 + br i1 %26, label %then0__3, label %else__1 then0__3: ; preds = %then2__1 %27 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qubits, i64 0) @@ -1011,6 +471,8 @@ continue__2: ; preds = %then6__1, %test6__1 ret %Result* %87 } +declare %Result* @__quantum__qis__joint_measure(i64, i64, %Array*) + define void @Microsoft__Quantum__Intrinsic__Rx__body(double %theta, %Qubit* %qb) { entry: call void @__quantum__qis__single_qubit_op(i64 19, i64 1, %Qubit* %qb) @@ -1091,7 +553,7 @@ entry: define void @Microsoft__Quantum__Intrinsic__Rz__adj(double %theta, %Qubit* %qb) { entry: - call void @__quantum__qis__single_qubit_op(i64 24, i64 1, %Qubit* %qb) + call void @__quantum__qis__single_qubit_op(i64 23, i64 1, %Qubit* %qb) ret void } @@ -1102,7 +564,7 @@ entry: %theta = load double, double* %1 %2 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %0, i32 0, i32 1 %qb = load %Qubit*, %Qubit** %2 - call void @__quantum__qis__single_qubit_op_ctl(i64 25, i64 1, %Array* %ctls, %Qubit* %qb) + call void @__quantum__qis__single_qubit_op_ctl(i64 24, i64 1, %Array* %ctls, %Qubit* %qb) call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 -1) ret void } @@ -1114,7 +576,7 @@ entry: %theta = load double, double* %1 %2 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %0, i32 0, i32 1 %qb = load %Qubit*, %Qubit** %2 - call void @__quantum__qis__single_qubit_op_ctl(i64 25, i64 1, %Array* %ctls, %Qubit* %qb) + call void @__quantum__qis__single_qubit_op_ctl(i64 24, i64 1, %Array* %ctls, %Qubit* %qb) call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 -1) ret void } @@ -1223,299 +685,1325 @@ entry: %1 = icmp eq i64 %0, 1 br i1 %1, label %then0__1, label %else__1 -then0__1: ; preds = %entry - call void @__quantum__qis__single_qubit_op_ctl(i64 1, i64 1, %Array* %__controlQubits__, %Qubit* %qb) - br label %continue__1 +then0__1: ; preds = %entry + call void @__quantum__qis__single_qubit_op_ctl(i64 1, i64 1, %Array* %__controlQubits__, %Qubit* %qb) + br label %continue__1 + +else__1: ; preds = %entry + call void @__quantum__qis__single_qubit_op_ctl(i64 2, i64 1, %Array* %__controlQubits__, %Qubit* %qb) + br label %continue__1 + +continue__1: ; preds = %else__1, %then0__1 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i64 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i64 -1) + ret void +} + +define void @Microsoft__Quantum__Intrinsic__Y__body(%Qubit* %qb) { +entry: + call void @__quantum__qis__single_qubit_op(i64 3, i64 1, %Qubit* %qb) + ret void +} + +define void @Microsoft__Quantum__Intrinsic__Y__adj(%Qubit* %qb) { +entry: + call void @__quantum__qis__single_qubit_op(i64 3, i64 1, %Qubit* %qb) + ret void +} + +define void @Microsoft__Quantum__Intrinsic__Y__ctl(%Array* %ctls, %Qubit* %qb) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 1) + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %ctls) + %1 = icmp eq i64 %0, 1 + br i1 %1, label %then0__1, label %else__1 + +then0__1: ; preds = %entry + call void @__quantum__qis__single_qubit_op_ctl(i64 4, i64 1, %Array* %ctls, %Qubit* %qb) + br label %continue__1 + +else__1: ; preds = %entry + call void @__quantum__qis__single_qubit_op_ctl(i64 5, i64 1, %Array* %ctls, %Qubit* %qb) + br label %continue__1 + +continue__1: ; preds = %else__1, %then0__1 + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 -1) + ret void +} + +define void @Microsoft__Quantum__Intrinsic__Y__ctladj(%Array* %__controlQubits__, %Qubit* %qb) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i64 1) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i64 1) + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %__controlQubits__) + %1 = icmp eq i64 %0, 1 + br i1 %1, label %then0__1, label %else__1 + +then0__1: ; preds = %entry + call void @__quantum__qis__single_qubit_op_ctl(i64 4, i64 1, %Array* %__controlQubits__, %Qubit* %qb) + br label %continue__1 + +else__1: ; preds = %entry + call void @__quantum__qis__single_qubit_op_ctl(i64 5, i64 1, %Array* %__controlQubits__, %Qubit* %qb) + br label %continue__1 + +continue__1: ; preds = %else__1, %then0__1 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i64 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i64 -1) + ret void +} + +define void @Microsoft__Quantum__Intrinsic__Z__body(%Qubit* %qb) { +entry: + call void @__quantum__qis__single_qubit_op(i64 6, i64 1, %Qubit* %qb) + ret void +} + +define void @Microsoft__Quantum__Intrinsic__Z__adj(%Qubit* %qb) { +entry: + call void @__quantum__qis__single_qubit_op(i64 6, i64 1, %Qubit* %qb) + ret void +} + +define void @Microsoft__Quantum__Intrinsic__Z__ctl(%Array* %ctls, %Qubit* %qb) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 1) + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %ctls) + %1 = icmp eq i64 %0, 1 + br i1 %1, label %then0__1, label %else__1 + +then0__1: ; preds = %entry + call void @__quantum__qis__single_qubit_op_ctl(i64 7, i64 1, %Array* %ctls, %Qubit* %qb) + br label %continue__1 + +else__1: ; preds = %entry + call void @__quantum__qis__single_qubit_op_ctl(i64 8, i64 1, %Array* %ctls, %Qubit* %qb) + br label %continue__1 + +continue__1: ; preds = %else__1, %then0__1 + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 -1) + ret void +} + +define void @Microsoft__Quantum__Intrinsic__Z__ctladj(%Array* %__controlQubits__, %Qubit* %qb) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i64 1) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i64 1) + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %__controlQubits__) + %1 = icmp eq i64 %0, 1 + br i1 %1, label %then0__1, label %else__1 + +then0__1: ; preds = %entry + call void @__quantum__qis__single_qubit_op_ctl(i64 7, i64 1, %Array* %__controlQubits__, %Qubit* %qb) + br label %continue__1 + +else__1: ; preds = %entry + call void @__quantum__qis__single_qubit_op_ctl(i64 8, i64 1, %Array* %__controlQubits__, %Qubit* %qb) + br label %continue__1 + +continue__1: ; preds = %else__1, %then0__1 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i64 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i64 -1) + ret void +} + +define %Tuple* @Microsoft__Quantum__Core__Attribute__body() { +entry: + ret %Tuple* null +} + +define %Tuple* @Microsoft__Quantum__Core__EntryPoint__body() { +entry: + ret %Tuple* null +} + +define %Tuple* @Microsoft__Quantum__Core__Inline__body() { +entry: + ret %Tuple* null +} + +define %Result* @Microsoft__Quantum__Instructions__Mx__body(%Qubit* %qb) { +entry: + %0 = call %Result* @__quantum__qis__single_qubit_measure(i64 101, i64 1, %Qubit* %qb) + ret %Result* %0 +} + +define %Result* @Microsoft__Quantum__Instructions__Mxx__body(%Array* %qubits) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i64 1) + %0 = call %Result* @__quantum__qis__joint_measure(i64 105, i64 1, %Array* %qubits) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i64 -1) + ret %Result* %0 +} + +define %Result* @Microsoft__Quantum__Instructions__Mxz__body(%Array* %qubits) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i64 1) + %0 = call %Result* @__quantum__qis__joint_measure(i64 103, i64 1, %Array* %qubits) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i64 -1) + ret %Result* %0 +} + +define %Result* @Microsoft__Quantum__Instructions__Mz__body(%Qubit* %qb) { +entry: + %0 = call %Result* @__quantum__qis__single_qubit_measure(i64 100, i64 1, %Qubit* %qb) + ret %Result* %0 +} + +define %Result* @Microsoft__Quantum__Instructions__Mzx__body(%Array* %qubits) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i64 1) + %0 = call %Result* @__quantum__qis__joint_measure(i64 104, i64 1, %Array* %qubits) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i64 -1) + ret %Result* %0 +} + +define %Result* @Microsoft__Quantum__Instructions__Mzz__body(%Array* %qubits) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i64 1) + %0 = call %Result* @__quantum__qis__joint_measure(i64 102, i64 1, %Array* %qubits) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i64 -1) + ret %Result* %0 +} + +define void @Microsoft__Quantum__Instructions__Sx__body(%Qubit* %qb) { +entry: + call void @__quantum__qis__single_qubit_op(i64 17, i64 1, %Qubit* %qb) + ret void +} + +define void @Microsoft__Quantum__Instructions__Sx__adj(%Qubit* %qb) { +entry: + call void @__quantum__qis__single_qubit_op(i64 17, i64 1, %Qubit* %qb) + ret void +} + +define void @Microsoft__Quantum__Instructions__Sx__ctl(%Array* %ctls, %Qubit* %qb) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 1) + call void @__quantum__qis__single_qubit_op_ctl(i64 18, i64 1, %Array* %ctls, %Qubit* %qb) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 -1) + ret void +} + +define void @Microsoft__Quantum__Instructions__Sx__ctladj(%Array* %ctls, %Qubit* %qb) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 1) + call void @__quantum__qis__single_qubit_op_ctl(i64 18, i64 1, %Array* %ctls, %Qubit* %qb) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 -1) + ret void +} + +define void @Microsoft__Quantum__Instructions__Sz__body(%Qubit* %qb) { +entry: + call void @__quantum__qis__single_qubit_op(i64 15, i64 1, %Qubit* %qb) + ret void +} + +define void @Microsoft__Quantum__Instructions__Sz__adj(%Qubit* %qb) { +entry: + call void @__quantum__qis__single_qubit_op(i64 15, i64 1, %Qubit* %qb) + ret void +} + +define void @Microsoft__Quantum__Instructions__Sz__ctl(%Array* %ctls, %Qubit* %qb) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 1) + call void @__quantum__qis__single_qubit_op_ctl(i64 16, i64 1, %Array* %ctls, %Qubit* %qb) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 -1) + ret void +} + +define void @Microsoft__Quantum__Instructions__Sz__ctladj(%Array* %ctls, %Qubit* %qb) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 1) + call void @__quantum__qis__single_qubit_op_ctl(i64 16, i64 1, %Array* %ctls, %Qubit* %qb) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 -1) + ret void +} + +define void @Microsoft__Quantum__Instructions__Tx__body(%Qubit* %qb) { +entry: + call void @__quantum__qis__single_qubit_op(i64 13, i64 1, %Qubit* %qb) + ret void +} + +define void @Microsoft__Quantum__Instructions__Tx__adj(%Qubit* %qb) { +entry: + call void @__quantum__qis__single_qubit_op(i64 13, i64 1, %Qubit* %qb) + ret void +} + +define void @Microsoft__Quantum__Instructions__Tx__ctl(%Array* %ctls, %Qubit* %qb) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 1) + call void @__quantum__qis__single_qubit_op_ctl(i64 14, i64 1, %Array* %ctls, %Qubit* %qb) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 -1) + ret void +} + +define void @Microsoft__Quantum__Instructions__Tx__ctladj(%Array* %ctls, %Qubit* %qb) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 1) + call void @__quantum__qis__single_qubit_op_ctl(i64 14, i64 1, %Array* %ctls, %Qubit* %qb) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 -1) + ret void +} + +define void @Microsoft__Quantum__Instructions__Tz__body(%Qubit* %qb) { +entry: + call void @__quantum__qis__single_qubit_op(i64 11, i64 1, %Qubit* %qb) + ret void +} + +define void @Microsoft__Quantum__Instructions__Tz__adj(%Qubit* %qb) { +entry: + call void @__quantum__qis__single_qubit_op(i64 11, i64 1, %Qubit* %qb) + ret void +} + +define void @Microsoft__Quantum__Instructions__Tz__ctl(%Array* %ctls, %Qubit* %qb) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 1) + call void @__quantum__qis__single_qubit_op_ctl(i64 12, i64 1, %Array* %ctls, %Qubit* %qb) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 -1) + ret void +} + +define void @Microsoft__Quantum__Instructions__Tz__ctladj(%Array* %ctls, %Qubit* %qb) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 1) + call void @__quantum__qis__single_qubit_op_ctl(i64 12, i64 1, %Array* %ctls, %Qubit* %qb) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 -1) + ret void +} + +define void @Microsoft__Quantum__Testing__Tracer__Delay__body(%Callable* %op, %Qubit* %arg, %Tuple* %aux) { +entry: + call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %op, i64 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i64 1) + %0 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64)) + %1 = bitcast %Tuple* %0 to { %Qubit* }* + %2 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %1, i32 0, i32 0 + store %Qubit* %arg, %Qubit** %2 + call void @__quantum__rt__callable_invoke(%Callable* %op, %Tuple* %0, %Tuple* null) + call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %op, i64 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i64 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %0, i64 -1) + ret void +} + +declare void @__quantum__rt__callable_invoke(%Callable*, %Tuple*, %Tuple*) + +define void @Microsoft__Quantum__Testing__Tracer__TestCoreIntrinsics__body() { +entry: + %qs = call %Array* @__quantum__rt__qubit_allocate_array(i64 3) + call void @__quantum__rt__array_update_alias_count(%Array* %qs, i64 1) + %0 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 0) + %1 = bitcast i8* %0 to %Qubit** + %qb = load %Qubit*, %Qubit** %1 + call void @__quantum__qis__single_qubit_op(i64 0, i64 1, %Qubit* %qb) + %2 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 0) + %3 = bitcast i8* %2 to %Qubit** + %qb__1 = load %Qubit*, %Qubit** %3 + call void @__quantum__qis__single_qubit_op(i64 3, i64 1, %Qubit* %qb__1) + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 1) + %5 = bitcast i8* %4 to %Qubit** + %qb__2 = load %Qubit*, %Qubit** %5 + call void @__quantum__qis__single_qubit_op(i64 6, i64 1, %Qubit* %qb__2) + %6 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 1) + %7 = bitcast i8* %6 to %Qubit** + %qb__3 = load %Qubit*, %Qubit** %7 + call void @__quantum__qis__single_qubit_op(i64 9, i64 1, %Qubit* %qb__3) + %8 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 1) + %9 = bitcast i8* %8 to %Qubit** + %10 = load %Qubit*, %Qubit** %9 + %11 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 2) + %12 = bitcast i8* %11 to %Qubit** + %13 = load %Qubit*, %Qubit** %12 + call void @Microsoft__Quantum__Intrinsic__CNOT__body(%Qubit* %10, %Qubit* %13) + %14 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 0) + %15 = bitcast i8* %14 to %Qubit** + %qb__4 = load %Qubit*, %Qubit** %15 + call void @__quantum__qis__single_qubit_op(i64 19, i64 1, %Qubit* %qb__4) + %16 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 1) + %17 = bitcast i8* %16 to %Qubit** + %qb__5 = load %Qubit*, %Qubit** %17 + call void @__quantum__qis__single_qubit_op(i64 21, i64 1, %Qubit* %qb__5) + %18 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 2) + %19 = bitcast i8* %18 to %Qubit** + %qb__6 = load %Qubit*, %Qubit** %19 + call void @__quantum__qis__single_qubit_op(i64 23, i64 1, %Qubit* %qb__6) + %20 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 1) + %21 = bitcast i8* %20 to %Qubit** + %qb__7 = load %Qubit*, %Qubit** %21 + call void @__quantum__qis__single_qubit_op(i64 15, i64 1, %Qubit* %qb__7) + %22 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 2) + %23 = bitcast i8* %22 to %Qubit** + %qb__9 = load %Qubit*, %Qubit** %23 + call void @__quantum__qis__single_qubit_op(i64 11, i64 1, %Qubit* %qb__9) + call void @__quantum__qis__inject_barrier(i64 42, i64 0) + %24 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 0) + %25 = bitcast i8* %24 to %Qubit** + %qb__11 = load %Qubit*, %Qubit** %25 + call void @__quantum__qis__single_qubit_op(i64 0, i64 1, %Qubit* %qb__11) + %26 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 0) + %27 = bitcast i8* %26 to %Qubit** + %qb__13 = load %Qubit*, %Qubit** %27 + call void @__quantum__qis__single_qubit_op(i64 3, i64 1, %Qubit* %qb__13) + %28 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 1) + %29 = bitcast i8* %28 to %Qubit** + %qb__15 = load %Qubit*, %Qubit** %29 + call void @__quantum__qis__single_qubit_op(i64 6, i64 1, %Qubit* %qb__15) + %30 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 1) + %31 = bitcast i8* %30 to %Qubit** + %qb__17 = load %Qubit*, %Qubit** %31 + call void @__quantum__qis__single_qubit_op(i64 9, i64 1, %Qubit* %qb__17) + %32 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 1) + %33 = bitcast i8* %32 to %Qubit** + %34 = load %Qubit*, %Qubit** %33 + %35 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 2) + %36 = bitcast i8* %35 to %Qubit** + %37 = load %Qubit*, %Qubit** %36 + call void @Microsoft__Quantum__Intrinsic__CNOT__adj(%Qubit* %34, %Qubit* %37) + %38 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 0) + %39 = bitcast i8* %38 to %Qubit** + %qb__19 = load %Qubit*, %Qubit** %39 + call void @__quantum__qis__single_qubit_op(i64 19, i64 1, %Qubit* %qb__19) + %40 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 1) + %41 = bitcast i8* %40 to %Qubit** + %qb__20 = load %Qubit*, %Qubit** %41 + call void @__quantum__qis__single_qubit_op(i64 21, i64 1, %Qubit* %qb__20) + %42 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 2) + %43 = bitcast i8* %42 to %Qubit** + %qb__21 = load %Qubit*, %Qubit** %43 + call void @__quantum__qis__single_qubit_op(i64 23, i64 1, %Qubit* %qb__21) + %44 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 1) + %45 = bitcast i8* %44 to %Qubit** + %qb__22 = load %Qubit*, %Qubit** %45 + call void @__quantum__qis__single_qubit_op(i64 15, i64 1, %Qubit* %qb__22) + %46 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 2) + %47 = bitcast i8* %46 to %Qubit** + %qb__24 = load %Qubit*, %Qubit** %47 + call void @__quantum__qis__single_qubit_op(i64 11, i64 1, %Qubit* %qb__24) + %c = call %Qubit* @__quantum__rt__qubit_allocate() + %ctls = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %48 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ctls, i64 0) + %49 = bitcast i8* %48 to %Qubit** + store %Qubit* %c, %Qubit** %49 + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 1) + %50 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 0) + %51 = bitcast i8* %50 to %Qubit** + %qb__26 = load %Qubit*, %Qubit** %51 + br i1 true, label %then0__1, label %else__1 + +then0__1: ; preds = %entry + call void @__quantum__qis__single_qubit_op_ctl(i64 1, i64 1, %Array* %ctls, %Qubit* %qb__26) + br label %continue__1 + +else__1: ; preds = %entry + call void @__quantum__qis__single_qubit_op_ctl(i64 2, i64 1, %Array* %ctls, %Qubit* %qb__26) + br label %continue__1 + +continue__1: ; preds = %else__1, %then0__1 + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %ctls, i64 -1) + %ctls__1 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %52 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ctls__1, i64 0) + %53 = bitcast i8* %52 to %Qubit** + store %Qubit* %c, %Qubit** %53 + call void @__quantum__rt__array_update_alias_count(%Array* %ctls__1, i64 1) + %54 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 0) + %55 = bitcast i8* %54 to %Qubit** + %qb__27 = load %Qubit*, %Qubit** %55 + br i1 true, label %then0__2, label %else__2 + +then0__2: ; preds = %continue__1 + call void @__quantum__qis__single_qubit_op_ctl(i64 4, i64 1, %Array* %ctls__1, %Qubit* %qb__27) + br label %continue__2 + +else__2: ; preds = %continue__1 + call void @__quantum__qis__single_qubit_op_ctl(i64 5, i64 1, %Array* %ctls__1, %Qubit* %qb__27) + br label %continue__2 + +continue__2: ; preds = %else__2, %then0__2 + call void @__quantum__rt__array_update_alias_count(%Array* %ctls__1, i64 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %ctls__1, i64 -1) + %ctls__2 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %56 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ctls__2, i64 0) + %57 = bitcast i8* %56 to %Qubit** + store %Qubit* %c, %Qubit** %57 + call void @__quantum__rt__array_update_alias_count(%Array* %ctls__2, i64 1) + %58 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 1) + %59 = bitcast i8* %58 to %Qubit** + %qb__28 = load %Qubit*, %Qubit** %59 + br i1 true, label %then0__3, label %else__3 + +then0__3: ; preds = %continue__2 + call void @__quantum__qis__single_qubit_op_ctl(i64 7, i64 1, %Array* %ctls__2, %Qubit* %qb__28) + br label %continue__3 + +else__3: ; preds = %continue__2 + call void @__quantum__qis__single_qubit_op_ctl(i64 8, i64 1, %Array* %ctls__2, %Qubit* %qb__28) + br label %continue__3 + +continue__3: ; preds = %else__3, %then0__3 + call void @__quantum__rt__array_update_alias_count(%Array* %ctls__2, i64 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %ctls__2, i64 -1) + %ctls__3 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %60 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ctls__3, i64 0) + %61 = bitcast i8* %60 to %Qubit** + store %Qubit* %c, %Qubit** %61 + call void @__quantum__rt__array_update_alias_count(%Array* %ctls__3, i64 1) + %62 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 1) + %63 = bitcast i8* %62 to %Qubit** + %qb__29 = load %Qubit*, %Qubit** %63 + call void @__quantum__qis__single_qubit_op_ctl(i64 10, i64 1, %Array* %ctls__3, %Qubit* %qb__29) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls__3, i64 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %ctls__3, i64 -1) + %ctls__4 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %64 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ctls__4, i64 0) + %65 = bitcast i8* %64 to %Qubit** + store %Qubit* %c, %Qubit** %65 + call void @__quantum__rt__array_update_alias_count(%Array* %ctls__4, i64 1) + %66 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 0) + %67 = bitcast i8* %66 to %Qubit** + %qb__30 = load %Qubit*, %Qubit** %67 + call void @__quantum__qis__single_qubit_op_ctl(i64 20, i64 1, %Array* %ctls__4, %Qubit* %qb__30) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls__4, i64 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %ctls__4, i64 -1) + %ctls__5 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %68 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ctls__5, i64 0) + %69 = bitcast i8* %68 to %Qubit** + store %Qubit* %c, %Qubit** %69 + call void @__quantum__rt__array_update_alias_count(%Array* %ctls__5, i64 1) + %70 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 1) + %71 = bitcast i8* %70 to %Qubit** + %qb__31 = load %Qubit*, %Qubit** %71 + call void @__quantum__qis__single_qubit_op_ctl(i64 22, i64 1, %Array* %ctls__5, %Qubit* %qb__31) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls__5, i64 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %ctls__5, i64 -1) + %ctls__6 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %72 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ctls__6, i64 0) + %73 = bitcast i8* %72 to %Qubit** + store %Qubit* %c, %Qubit** %73 + call void @__quantum__rt__array_update_alias_count(%Array* %ctls__6, i64 1) + %74 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 2) + %75 = bitcast i8* %74 to %Qubit** + %qb__32 = load %Qubit*, %Qubit** %75 + call void @__quantum__qis__single_qubit_op_ctl(i64 24, i64 1, %Array* %ctls__6, %Qubit* %qb__32) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls__6, i64 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %ctls__6, i64 -1) + %ctls__7 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %76 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ctls__7, i64 0) + %77 = bitcast i8* %76 to %Qubit** + store %Qubit* %c, %Qubit** %77 + call void @__quantum__rt__array_update_alias_count(%Array* %ctls__7, i64 1) + %78 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 1) + %79 = bitcast i8* %78 to %Qubit** + %qb__33 = load %Qubit*, %Qubit** %79 + call void @__quantum__rt__array_update_alias_count(%Array* %ctls__7, i64 1) + call void @__quantum__qis__single_qubit_op_ctl(i64 16, i64 1, %Array* %ctls__7, %Qubit* %qb__33) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls__7, i64 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls__7, i64 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %ctls__7, i64 -1) + %ctls__9 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %80 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ctls__9, i64 0) + %81 = bitcast i8* %80 to %Qubit** + store %Qubit* %c, %Qubit** %81 + call void @__quantum__rt__array_update_alias_count(%Array* %ctls__9, i64 1) + %82 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 2) + %83 = bitcast i8* %82 to %Qubit** + %qb__35 = load %Qubit*, %Qubit** %83 + call void @__quantum__rt__array_update_alias_count(%Array* %ctls__9, i64 1) + call void @__quantum__qis__single_qubit_op_ctl(i64 12, i64 1, %Array* %ctls__9, %Qubit* %qb__35) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls__9, i64 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls__9, i64 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %ctls__9, i64 -1) + call void @__quantum__rt__qubit_release(%Qubit* %c) + %cc = call %Array* @__quantum__rt__qubit_allocate_array(i64 2) + call void @__quantum__rt__array_update_alias_count(%Array* %cc, i64 1) + call void @__quantum__rt__array_update_alias_count(%Array* %cc, i64 1) + %84 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 0) + %85 = bitcast i8* %84 to %Qubit** + %qb__37 = load %Qubit*, %Qubit** %85 + %86 = call i64 @__quantum__rt__array_get_size_1d(%Array* %cc) + %87 = icmp eq i64 %86, 1 + br i1 %87, label %then0__4, label %else__4 + +then0__4: ; preds = %continue__3 + call void @__quantum__qis__single_qubit_op_ctl(i64 1, i64 1, %Array* %cc, %Qubit* %qb__37) + br label %continue__4 + +else__4: ; preds = %continue__3 + call void @__quantum__qis__single_qubit_op_ctl(i64 2, i64 1, %Array* %cc, %Qubit* %qb__37) + br label %continue__4 + +continue__4: ; preds = %else__4, %then0__4 + call void @__quantum__rt__array_update_alias_count(%Array* %cc, i64 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %cc, i64 1) + %88 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 0) + %89 = bitcast i8* %88 to %Qubit** + %qb__38 = load %Qubit*, %Qubit** %89 + %90 = icmp eq i64 %86, 1 + br i1 %90, label %then0__5, label %else__5 + +then0__5: ; preds = %continue__4 + call void @__quantum__qis__single_qubit_op_ctl(i64 4, i64 1, %Array* %cc, %Qubit* %qb__38) + br label %continue__5 + +else__5: ; preds = %continue__4 + call void @__quantum__qis__single_qubit_op_ctl(i64 5, i64 1, %Array* %cc, %Qubit* %qb__38) + br label %continue__5 + +continue__5: ; preds = %else__5, %then0__5 + call void @__quantum__rt__array_update_alias_count(%Array* %cc, i64 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %cc, i64 1) + %91 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 1) + %92 = bitcast i8* %91 to %Qubit** + %qb__39 = load %Qubit*, %Qubit** %92 + %93 = icmp eq i64 %86, 1 + br i1 %93, label %then0__6, label %else__6 + +then0__6: ; preds = %continue__5 + call void @__quantum__qis__single_qubit_op_ctl(i64 7, i64 1, %Array* %cc, %Qubit* %qb__39) + br label %continue__6 + +else__6: ; preds = %continue__5 + call void @__quantum__qis__single_qubit_op_ctl(i64 8, i64 1, %Array* %cc, %Qubit* %qb__39) + br label %continue__6 + +continue__6: ; preds = %else__6, %then0__6 + call void @__quantum__rt__array_update_alias_count(%Array* %cc, i64 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %cc, i64 1) + %94 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 1) + %95 = bitcast i8* %94 to %Qubit** + %qb__40 = load %Qubit*, %Qubit** %95 + call void @__quantum__qis__single_qubit_op_ctl(i64 10, i64 1, %Array* %cc, %Qubit* %qb__40) + call void @__quantum__rt__array_update_alias_count(%Array* %cc, i64 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %cc, i64 1) + %96 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 0) + %97 = bitcast i8* %96 to %Qubit** + %qb__41 = load %Qubit*, %Qubit** %97 + call void @__quantum__qis__single_qubit_op_ctl(i64 20, i64 1, %Array* %cc, %Qubit* %qb__41) + call void @__quantum__rt__array_update_alias_count(%Array* %cc, i64 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %cc, i64 1) + %98 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 1) + %99 = bitcast i8* %98 to %Qubit** + %qb__42 = load %Qubit*, %Qubit** %99 + call void @__quantum__qis__single_qubit_op_ctl(i64 22, i64 1, %Array* %cc, %Qubit* %qb__42) + call void @__quantum__rt__array_update_alias_count(%Array* %cc, i64 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %cc, i64 1) + %100 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 2) + %101 = bitcast i8* %100 to %Qubit** + %qb__43 = load %Qubit*, %Qubit** %101 + call void @__quantum__qis__single_qubit_op_ctl(i64 24, i64 1, %Array* %cc, %Qubit* %qb__43) + call void @__quantum__rt__array_update_alias_count(%Array* %cc, i64 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %cc, i64 1) + %102 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 1) + %103 = bitcast i8* %102 to %Qubit** + %qb__44 = load %Qubit*, %Qubit** %103 + call void @__quantum__rt__array_update_alias_count(%Array* %cc, i64 1) + call void @__quantum__qis__single_qubit_op_ctl(i64 16, i64 1, %Array* %cc, %Qubit* %qb__44) + call void @__quantum__rt__array_update_alias_count(%Array* %cc, i64 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %cc, i64 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %cc, i64 1) + %104 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 2) + %105 = bitcast i8* %104 to %Qubit** + %qb__46 = load %Qubit*, %Qubit** %105 + call void @__quantum__rt__array_update_alias_count(%Array* %cc, i64 1) + call void @__quantum__qis__single_qubit_op_ctl(i64 12, i64 1, %Array* %cc, %Qubit* %qb__46) + call void @__quantum__rt__array_update_alias_count(%Array* %cc, i64 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %cc, i64 -1) + call void @__quantum__rt__qubit_release_array(%Array* %cc) + call void @__quantum__rt__array_update_alias_count(%Array* %cc, i64 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %cc, i64 -1) + call void @__quantum__rt__qubit_release_array(%Array* %qs) + call void @__quantum__rt__array_update_alias_count(%Array* %qs, i64 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qs, i64 -1) + ret void +} + +declare %Qubit* @__quantum__rt__qubit_allocate() + +declare %Array* @__quantum__rt__qubit_allocate_array(i64) + +declare void @__quantum__qis__inject_barrier(i64, i64) + +declare void @__quantum__rt__qubit_release(%Qubit*) + +declare void @__quantum__rt__qubit_release_array(%Array*) + +define void @Microsoft__Quantum__Testing__Tracer__TestMeasurements__body() { +entry: + %qs = call %Array* @__quantum__rt__qubit_allocate_array(i64 6) + call void @__quantum__rt__array_update_alias_count(%Array* %qs, i64 1) + %0 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 0) + %1 = bitcast i8* %0 to %Qubit** + %qb = load %Qubit*, %Qubit** %1 + call void @__quantum__qis__single_qubit_op(i64 11, i64 1, %Qubit* %qb) + %2 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 0) + %3 = bitcast i8* %2 to %Qubit** + %qb__2 = load %Qubit*, %Qubit** %3 + %r0 = call %Result* @__quantum__qis__single_qubit_measure(i64 100, i64 1, %Qubit* %qb__2) + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 1) + %5 = bitcast i8* %4 to %Qubit** + %qb__4 = load %Qubit*, %Qubit** %5 + call void @__quantum__qis__single_qubit_op(i64 11, i64 1, %Qubit* %qb__4) + %6 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 1) + %7 = bitcast i8* %6 to %Qubit** + %8 = load %Qubit*, %Qubit** %7 + %9 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 2) + %10 = bitcast i8* %9 to %Qubit** + %11 = load %Qubit*, %Qubit** %10 + call void @Microsoft__Quantum__Intrinsic__CNOT__body(%Qubit* %8, %Qubit* %11) + %qs12 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 2) + %12 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs12, i64 0) + %13 = bitcast i8* %12 to %Qubit** + %14 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs12, i64 1) + %15 = bitcast i8* %14 to %Qubit** + %16 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 1) + %17 = bitcast i8* %16 to %Qubit** + %18 = load %Qubit*, %Qubit** %17 + %19 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 2) + %20 = bitcast i8* %19 to %Qubit** + %21 = load %Qubit*, %Qubit** %20 + store %Qubit* %18, %Qubit** %13 + store %Qubit* %21, %Qubit** %15 + call void @__quantum__rt__array_update_alias_count(%Array* %qs12, i64 1) + %paulis = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 2) + %22 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 0) + %23 = bitcast i8* %22 to i2* + %24 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 1) + %25 = bitcast i8* %24 to i2* + %26 = load i2, i2* @PauliY + %27 = load i2, i2* @PauliX + store i2 %26, i2* %23 + store i2 %27, i2* %25 + call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i64 1) + call void @__quantum__rt__array_update_alias_count(%Array* %qs12, i64 1) + %28 = load %Result*, %Result** @ResultOne + %res = alloca %Result* + store %Result* %28, %Result** %res + call void @__quantum__rt__result_update_reference_count(%Result* %28, i64 1) + %haveY = alloca i1 + store i1 false, i1* %haveY + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %i = phi i64 [ 0, %entry ], [ %41, %exiting__1 ] + %29 = icmp sle i64 %i, 1 + br i1 %29, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %30 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 %i) + %31 = bitcast i8* %30 to i2* + %32 = load i2, i2* %31 + %33 = load i2, i2* @PauliY + %34 = icmp eq i2 %32, %33 + %35 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 %i) + %36 = bitcast i8* %35 to i2* + %37 = load i2, i2* %36 + %38 = load i2, i2* @PauliI + %39 = icmp eq i2 %37, %38 + %40 = or i1 %34, %39 + br i1 %40, label %then0__1, label %continue__1 -else__1: ; preds = %entry - call void @__quantum__qis__single_qubit_op_ctl(i64 2, i64 1, %Array* %__controlQubits__, %Qubit* %qb) +then0__1: ; preds = %body__1 + store i1 true, i1* %haveY br label %continue__1 -continue__1: ; preds = %else__1, %then0__1 - call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i64 -1) - call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i64 -1) - ret void -} - -define void @Microsoft__Quantum__Intrinsic__Y__body(%Qubit* %qb) { -entry: - call void @__quantum__qis__single_qubit_op(i64 3, i64 1, %Qubit* %qb) - ret void -} +continue__1: ; preds = %then0__1, %body__1 + br label %exiting__1 -define void @Microsoft__Quantum__Intrinsic__Y__adj(%Qubit* %qb) { -entry: - call void @__quantum__qis__single_qubit_op(i64 3, i64 1, %Qubit* %qb) - ret void -} +exiting__1: ; preds = %continue__1 + %41 = add i64 %i, 1 + br label %header__1 -define void @Microsoft__Quantum__Intrinsic__Y__ctl(%Array* %ctls, %Qubit* %qb) { -entry: - call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 1) - %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %ctls) - %1 = icmp eq i64 %0, 1 - br i1 %1, label %then0__1, label %else__1 +exit__1: ; preds = %header__1 + %42 = load i1, i1* %haveY + br i1 %42, label %then0__2, label %test1__1 -then0__1: ; preds = %entry - call void @__quantum__qis__single_qubit_op_ctl(i64 4, i64 1, %Array* %ctls, %Qubit* %qb) - br label %continue__1 +then0__2: ; preds = %exit__1 + %43 = call %Result* @__quantum__qis__joint_measure(i64 106, i64 1, %Array* %qs12) + call void @__quantum__rt__result_update_reference_count(%Result* %43, i64 1) + store %Result* %43, %Result** %res + call void @__quantum__rt__result_update_reference_count(%Result* %43, i64 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %28, i64 -1) + br label %continue__2 -else__1: ; preds = %entry - call void @__quantum__qis__single_qubit_op_ctl(i64 5, i64 1, %Array* %ctls, %Qubit* %qb) - br label %continue__1 +test1__1: ; preds = %exit__1 + br i1 false, label %then1__1, label %test2__1 -continue__1: ; preds = %else__1, %then0__1 - call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 -1) - ret void -} +then1__1: ; preds = %test1__1 + %44 = call %Result* @__quantum__qis__joint_measure(i64 107, i64 1, %Array* %qs12) + call void @__quantum__rt__result_update_reference_count(%Result* %44, i64 1) + %45 = load %Result*, %Result** %res + store %Result* %44, %Result** %res + call void @__quantum__rt__result_update_reference_count(%Result* %44, i64 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %45, i64 -1) + br label %continue__2 -define void @Microsoft__Quantum__Intrinsic__Y__ctladj(%Array* %__controlQubits__, %Qubit* %qb) { -entry: - call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i64 1) - call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i64 1) - %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %__controlQubits__) - %1 = icmp eq i64 %0, 1 - br i1 %1, label %then0__1, label %else__1 +test2__1: ; preds = %test1__1 + br i1 false, label %then2__1, label %test3__1 -then0__1: ; preds = %entry - call void @__quantum__qis__single_qubit_op_ctl(i64 4, i64 1, %Array* %__controlQubits__, %Qubit* %qb) - br label %continue__1 +then2__1: ; preds = %test2__1 + %46 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 0) + %47 = bitcast i8* %46 to i2* + %48 = load i2, i2* %47 + %49 = load i2, i2* @PauliX + %50 = icmp eq i2 %48, %49 + br i1 %50, label %then0__3, label %else__1 -else__1: ; preds = %entry - call void @__quantum__qis__single_qubit_op_ctl(i64 5, i64 1, %Array* %__controlQubits__, %Qubit* %qb) - br label %continue__1 +then0__3: ; preds = %then2__1 + %51 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs12, i64 0) + %52 = bitcast i8* %51 to %Qubit** + %qb__6 = load %Qubit*, %Qubit** %52 + %53 = call %Result* @__quantum__qis__single_qubit_measure(i64 101, i64 1, %Qubit* %qb__6) + call void @__quantum__rt__result_update_reference_count(%Result* %53, i64 1) + %54 = load %Result*, %Result** %res + store %Result* %53, %Result** %res + call void @__quantum__rt__result_update_reference_count(%Result* %53, i64 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %54, i64 -1) + br label %continue__3 -continue__1: ; preds = %else__1, %then0__1 - call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i64 -1) - call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i64 -1) - ret void -} +else__1: ; preds = %then2__1 + %55 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs12, i64 0) + %56 = bitcast i8* %55 to %Qubit** + %qb__7 = load %Qubit*, %Qubit** %56 + %57 = call %Result* @__quantum__qis__single_qubit_measure(i64 100, i64 1, %Qubit* %qb__7) + call void @__quantum__rt__result_update_reference_count(%Result* %57, i64 1) + %58 = load %Result*, %Result** %res + store %Result* %57, %Result** %res + call void @__quantum__rt__result_update_reference_count(%Result* %57, i64 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %58, i64 -1) + br label %continue__3 -define void @Microsoft__Quantum__Intrinsic__Z__body(%Qubit* %qb) { -entry: - call void @__quantum__qis__single_qubit_op(i64 6, i64 1, %Qubit* %qb) - ret void -} +continue__3: ; preds = %else__1, %then0__3 + br label %continue__2 -define void @Microsoft__Quantum__Intrinsic__Z__adj(%Qubit* %qb) { -entry: - call void @__quantum__qis__single_qubit_op(i64 6, i64 1, %Qubit* %qb) - ret void -} +test3__1: ; preds = %test2__1 + %59 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 0) + %60 = bitcast i8* %59 to i2* + %61 = load i2, i2* %60 + %62 = load i2, i2* @PauliX + %63 = icmp eq i2 %61, %62 + %64 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 1) + %65 = bitcast i8* %64 to i2* + %66 = load i2, i2* %65 + %67 = load i2, i2* @PauliX + %68 = icmp eq i2 %66, %67 + %69 = and i1 %63, %68 + br i1 %69, label %then3__1, label %test4__1 -define void @Microsoft__Quantum__Intrinsic__Z__ctl(%Array* %ctls, %Qubit* %qb) { -entry: - call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 1) - %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %ctls) - %1 = icmp eq i64 %0, 1 - br i1 %1, label %then0__1, label %else__1 +then3__1: ; preds = %test3__1 + call void @__quantum__rt__array_update_alias_count(%Array* %qs12, i64 1) + %70 = call %Result* @__quantum__qis__joint_measure(i64 105, i64 1, %Array* %qs12) + call void @__quantum__rt__array_update_alias_count(%Array* %qs12, i64 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %70, i64 1) + %71 = load %Result*, %Result** %res + store %Result* %70, %Result** %res + call void @__quantum__rt__result_update_reference_count(%Result* %70, i64 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %71, i64 -1) + br label %continue__2 -then0__1: ; preds = %entry - call void @__quantum__qis__single_qubit_op_ctl(i64 7, i64 1, %Array* %ctls, %Qubit* %qb) - br label %continue__1 +test4__1: ; preds = %test3__1 + %72 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 0) + %73 = bitcast i8* %72 to i2* + %74 = load i2, i2* %73 + %75 = load i2, i2* @PauliX + %76 = icmp eq i2 %74, %75 + %77 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 1) + %78 = bitcast i8* %77 to i2* + %79 = load i2, i2* %78 + %80 = load i2, i2* @PauliZ + %81 = icmp eq i2 %79, %80 + %82 = and i1 %76, %81 + br i1 %82, label %then4__1, label %test5__1 -else__1: ; preds = %entry - call void @__quantum__qis__single_qubit_op_ctl(i64 8, i64 1, %Array* %ctls, %Qubit* %qb) - br label %continue__1 +then4__1: ; preds = %test4__1 + call void @__quantum__rt__array_update_alias_count(%Array* %qs12, i64 1) + %83 = call %Result* @__quantum__qis__joint_measure(i64 103, i64 1, %Array* %qs12) + call void @__quantum__rt__array_update_alias_count(%Array* %qs12, i64 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %83, i64 1) + %84 = load %Result*, %Result** %res + store %Result* %83, %Result** %res + call void @__quantum__rt__result_update_reference_count(%Result* %83, i64 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %84, i64 -1) + br label %continue__2 -continue__1: ; preds = %else__1, %then0__1 - call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 -1) - ret void -} +test5__1: ; preds = %test4__1 + %85 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 0) + %86 = bitcast i8* %85 to i2* + %87 = load i2, i2* %86 + %88 = load i2, i2* @PauliZ + %89 = icmp eq i2 %87, %88 + %90 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 1) + %91 = bitcast i8* %90 to i2* + %92 = load i2, i2* %91 + %93 = load i2, i2* @PauliX + %94 = icmp eq i2 %92, %93 + %95 = and i1 %89, %94 + br i1 %95, label %then5__1, label %test6__1 -define void @Microsoft__Quantum__Intrinsic__Z__ctladj(%Array* %__controlQubits__, %Qubit* %qb) { -entry: - call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i64 1) - call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i64 1) - %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %__controlQubits__) - %1 = icmp eq i64 %0, 1 - br i1 %1, label %then0__1, label %else__1 +then5__1: ; preds = %test5__1 + call void @__quantum__rt__array_update_alias_count(%Array* %qs12, i64 1) + %96 = call %Result* @__quantum__qis__joint_measure(i64 104, i64 1, %Array* %qs12) + call void @__quantum__rt__array_update_alias_count(%Array* %qs12, i64 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %96, i64 1) + %97 = load %Result*, %Result** %res + store %Result* %96, %Result** %res + call void @__quantum__rt__result_update_reference_count(%Result* %96, i64 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %97, i64 -1) + br label %continue__2 -then0__1: ; preds = %entry - call void @__quantum__qis__single_qubit_op_ctl(i64 7, i64 1, %Array* %__controlQubits__, %Qubit* %qb) - br label %continue__1 +test6__1: ; preds = %test5__1 + %98 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 0) + %99 = bitcast i8* %98 to i2* + %100 = load i2, i2* %99 + %101 = load i2, i2* @PauliZ + %102 = icmp eq i2 %100, %101 + %103 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 1) + %104 = bitcast i8* %103 to i2* + %105 = load i2, i2* %104 + %106 = load i2, i2* @PauliZ + %107 = icmp eq i2 %105, %106 + %108 = and i1 %102, %107 + br i1 %108, label %then6__1, label %continue__2 -else__1: ; preds = %entry - call void @__quantum__qis__single_qubit_op_ctl(i64 8, i64 1, %Array* %__controlQubits__, %Qubit* %qb) - br label %continue__1 +then6__1: ; preds = %test6__1 + call void @__quantum__rt__array_update_alias_count(%Array* %qs12, i64 1) + %109 = call %Result* @__quantum__qis__joint_measure(i64 102, i64 1, %Array* %qs12) + call void @__quantum__rt__array_update_alias_count(%Array* %qs12, i64 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %109, i64 1) + %110 = load %Result*, %Result** %res + store %Result* %109, %Result** %res + call void @__quantum__rt__result_update_reference_count(%Result* %109, i64 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %110, i64 -1) + br label %continue__2 -continue__1: ; preds = %else__1, %then0__1 - call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i64 -1) - call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i64 -1) +continue__2: ; preds = %then6__1, %test6__1, %then5__1, %then4__1, %then3__1, %continue__3, %then1__1, %then0__2 + %r12 = load %Result*, %Result** %res + call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i64 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qs12, i64 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %paulis, i64 -1) + %111 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 3)) + %112 = bitcast %Tuple* %111 to { %Callable*, %Callable*, %Qubit* }* + %113 = getelementptr inbounds { %Callable*, %Callable*, %Qubit* }, { %Callable*, %Callable*, %Qubit* }* %112, i32 0, i32 0 + %114 = getelementptr inbounds { %Callable*, %Callable*, %Qubit* }, { %Callable*, %Callable*, %Qubit* }* %112, i32 0, i32 1 + %115 = getelementptr inbounds { %Callable*, %Callable*, %Qubit* }, { %Callable*, %Callable*, %Qubit* }* %112, i32 0, i32 2 + %116 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Testing__Tracer__Delay, [2 x void (%Tuple*, i64)*]* null, %Tuple* null) + %117 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Intrinsic__X, [2 x void (%Tuple*, i64)*]* null, %Tuple* null) + %118 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 3) + %119 = bitcast i8* %118 to %Qubit** + %120 = load %Qubit*, %Qubit** %119 + store %Callable* %116, %Callable** %113 + store %Callable* %117, %Callable** %114 + store %Qubit* %120, %Qubit** %115 + %121 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__1, [2 x void (%Tuple*, i64)*]* @MemoryManagement__1, %Tuple* %111) + %122 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 3)) + %123 = bitcast %Tuple* %122 to { %Callable*, %Callable*, %Qubit* }* + %124 = getelementptr inbounds { %Callable*, %Callable*, %Qubit* }, { %Callable*, %Callable*, %Qubit* }* %123, i32 0, i32 0 + %125 = getelementptr inbounds { %Callable*, %Callable*, %Qubit* }, { %Callable*, %Callable*, %Qubit* }* %123, i32 0, i32 1 + %126 = getelementptr inbounds { %Callable*, %Callable*, %Qubit* }, { %Callable*, %Callable*, %Qubit* }* %123, i32 0, i32 2 + %127 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Testing__Tracer__Delay, [2 x void (%Tuple*, i64)*]* null, %Tuple* null) + %128 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Intrinsic__Y, [2 x void (%Tuple*, i64)*]* null, %Tuple* null) + %129 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 3) + %130 = bitcast i8* %129 to %Qubit** + %131 = load %Qubit*, %Qubit** %130 + store %Callable* %127, %Callable** %124 + store %Callable* %128, %Callable** %125 + store %Qubit* %131, %Qubit** %126 + %132 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__2, [2 x void (%Tuple*, i64)*]* @MemoryManagement__1, %Tuple* %122) + call void @Microsoft__Quantum__Intrinsic__ApplyIfElseIntrinsic__body(%Result* %r0, %Callable* %121, %Callable* %132) + %133 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 3)) + %134 = bitcast %Tuple* %133 to { %Callable*, %Callable*, %Qubit* }* + %135 = getelementptr inbounds { %Callable*, %Callable*, %Qubit* }, { %Callable*, %Callable*, %Qubit* }* %134, i32 0, i32 0 + %136 = getelementptr inbounds { %Callable*, %Callable*, %Qubit* }, { %Callable*, %Callable*, %Qubit* }* %134, i32 0, i32 1 + %137 = getelementptr inbounds { %Callable*, %Callable*, %Qubit* }, { %Callable*, %Callable*, %Qubit* }* %134, i32 0, i32 2 + %138 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Testing__Tracer__Delay, [2 x void (%Tuple*, i64)*]* null, %Tuple* null) + %139 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Intrinsic__Z, [2 x void (%Tuple*, i64)*]* null, %Tuple* null) + %140 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 4) + %141 = bitcast i8* %140 to %Qubit** + %142 = load %Qubit*, %Qubit** %141 + store %Callable* %138, %Callable** %135 + store %Callable* %139, %Callable** %136 + store %Qubit* %142, %Qubit** %137 + %143 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__3, [2 x void (%Tuple*, i64)*]* @MemoryManagement__1, %Tuple* %133) + %144 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 3)) + %145 = bitcast %Tuple* %144 to { %Callable*, %Callable*, %Qubit* }* + %146 = getelementptr inbounds { %Callable*, %Callable*, %Qubit* }, { %Callable*, %Callable*, %Qubit* }* %145, i32 0, i32 0 + %147 = getelementptr inbounds { %Callable*, %Callable*, %Qubit* }, { %Callable*, %Callable*, %Qubit* }* %145, i32 0, i32 1 + %148 = getelementptr inbounds { %Callable*, %Callable*, %Qubit* }, { %Callable*, %Callable*, %Qubit* }* %145, i32 0, i32 2 + %149 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Testing__Tracer__Delay, [2 x void (%Tuple*, i64)*]* null, %Tuple* null) + %150 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Intrinsic__S, [2 x void (%Tuple*, i64)*]* null, %Tuple* null) + %151 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 4) + %152 = bitcast i8* %151 to %Qubit** + %153 = load %Qubit*, %Qubit** %152 + store %Callable* %149, %Callable** %146 + store %Callable* %150, %Callable** %147 + store %Qubit* %153, %Qubit** %148 + %154 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__4, [2 x void (%Tuple*, i64)*]* @MemoryManagement__2, %Tuple* %144) + call void @Microsoft__Quantum__Intrinsic__ApplyIfElseIntrinsic__body(%Result* %r12, %Callable* %143, %Callable* %154) + %155 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 5) + %156 = bitcast i8* %155 to %Qubit** + %qb__8 = load %Qubit*, %Qubit** %156 + call void @__quantum__qis__single_qubit_op(i64 19, i64 1, %Qubit* %qb__8) + call void @__quantum__rt__qubit_release_array(%Array* %qs) + call void @__quantum__rt__array_update_alias_count(%Array* %qs, i64 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qs12, i64 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qs, i64 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %r0, i64 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qs12, i64 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %r12, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %121, i64 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %121, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %132, i64 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %132, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %143, i64 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %143, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %154, i64 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %154, i64 -1) ret void } -define %Tuple* @Microsoft__Quantum__Core__Attribute__body() { +define void @Microsoft__Quantum__Testing__Tracer__Delay__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { entry: - ret %Tuple* null + %0 = bitcast %Tuple* %arg-tuple to { %Callable*, %Qubit*, %Tuple* }* + %1 = getelementptr inbounds { %Callable*, %Qubit*, %Tuple* }, { %Callable*, %Qubit*, %Tuple* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Callable*, %Qubit*, %Tuple* }, { %Callable*, %Qubit*, %Tuple* }* %0, i32 0, i32 1 + %3 = getelementptr inbounds { %Callable*, %Qubit*, %Tuple* }, { %Callable*, %Qubit*, %Tuple* }* %0, i32 0, i32 2 + %4 = load %Callable*, %Callable** %1 + %5 = load %Qubit*, %Qubit** %2 + %6 = load %Tuple*, %Tuple** %3 + call void @Microsoft__Quantum__Testing__Tracer__Delay__body(%Callable* %4, %Qubit* %5, %Tuple* %6) + ret void } -define %Tuple* @Microsoft__Quantum__Core__EntryPoint__body() { +declare %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]*, [2 x void (%Tuple*, i64)*]*, %Tuple*) + +define void @Microsoft__Quantum__Intrinsic__X__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { entry: - ret %Tuple* null + %0 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %1 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %0, i32 0, i32 0 + %2 = load %Qubit*, %Qubit** %1 + call void @Microsoft__Quantum__Intrinsic__X__body(%Qubit* %2) + ret void } -define %Tuple* @Microsoft__Quantum__Core__Inline__body() { +define void @Microsoft__Quantum__Intrinsic__X__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { entry: - ret %Tuple* null + %0 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %1 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %0, i32 0, i32 0 + %2 = load %Qubit*, %Qubit** %1 + call void @Microsoft__Quantum__Intrinsic__X__adj(%Qubit* %2) + ret void } -define %Result* @Microsoft__Quantum__Instructions__Mx__body(%Qubit* %qb) { +define void @Microsoft__Quantum__Intrinsic__X__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { entry: - %0 = call %Result* @__quantum__qis__single_qubit_measure(i64 101, i64 1, %Qubit* %qb) - ret %Result* %0 + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1 + %4 = load %Qubit*, %Qubit** %2 + call void @Microsoft__Quantum__Intrinsic__X__ctl(%Array* %3, %Qubit* %4) + ret void } -define %Result* @Microsoft__Quantum__Instructions__Mxx__body(%Array* %qubits) { +define void @Microsoft__Quantum__Intrinsic__X__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { entry: - call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i64 1) - %0 = call %Result* @__quantum__qis__joint_measure(i64 105, i64 1, %Array* %qubits) - call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i64 -1) - ret %Result* %0 + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1 + %4 = load %Qubit*, %Qubit** %2 + call void @Microsoft__Quantum__Intrinsic__X__ctladj(%Array* %3, %Qubit* %4) + ret void } -define %Result* @Microsoft__Quantum__Instructions__Mxz__body(%Array* %qubits) { -entry: - call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i64 1) - %0 = call %Result* @__quantum__qis__joint_measure(i64 103, i64 1, %Array* %qubits) - call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i64 -1) - ret %Result* %0 +define void @Lifted__PartialApplication__1__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Callable*, %Qubit* }* + %1 = getelementptr inbounds { %Callable*, %Callable*, %Qubit* }, { %Callable*, %Callable*, %Qubit* }* %0, i32 0, i32 1 + %2 = load %Callable*, %Callable** %1 + %3 = getelementptr inbounds { %Callable*, %Callable*, %Qubit* }, { %Callable*, %Callable*, %Qubit* }* %0, i32 0, i32 2 + %4 = load %Qubit*, %Qubit** %3 + %5 = bitcast %Tuple* %arg-tuple to { %Tuple* }* + %6 = getelementptr inbounds { %Tuple* }, { %Tuple* }* %5, i32 0, i32 0 + %7 = load %Tuple*, %Tuple** %6 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 3)) + %9 = bitcast %Tuple* %8 to { %Callable*, %Qubit*, %Tuple* }* + %10 = getelementptr inbounds { %Callable*, %Qubit*, %Tuple* }, { %Callable*, %Qubit*, %Tuple* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { %Callable*, %Qubit*, %Tuple* }, { %Callable*, %Qubit*, %Tuple* }* %9, i32 0, i32 1 + %12 = getelementptr inbounds { %Callable*, %Qubit*, %Tuple* }, { %Callable*, %Qubit*, %Tuple* }* %9, i32 0, i32 2 + store %Callable* %2, %Callable** %10 + store %Qubit* %4, %Qubit** %11 + store %Tuple* %7, %Tuple** %12 + %13 = getelementptr inbounds { %Callable*, %Callable*, %Qubit* }, { %Callable*, %Callable*, %Qubit* }* %0, i32 0, i32 0 + %14 = load %Callable*, %Callable** %13 + call void @__quantum__rt__callable_invoke(%Callable* %14, %Tuple* %8, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i64 -1) + ret void } -define %Result* @Microsoft__Quantum__Instructions__Mz__body(%Qubit* %qb) { +define void @MemoryManagement__1__RefCount(%Tuple* %capture-tuple, i64 %count-change) { entry: - %0 = call %Result* @__quantum__qis__single_qubit_measure(i64 100, i64 1, %Qubit* %qb) - ret %Result* %0 + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Callable*, %Qubit* }* + %1 = getelementptr inbounds { %Callable*, %Callable*, %Qubit* }, { %Callable*, %Callable*, %Qubit* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1 + call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %2, i64 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %2, i64 %count-change) + %3 = getelementptr inbounds { %Callable*, %Callable*, %Qubit* }, { %Callable*, %Callable*, %Qubit* }* %0, i32 0, i32 1 + %4 = load %Callable*, %Callable** %3 + call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %4, i64 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %4, i64 %count-change) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %capture-tuple, i64 %count-change) + ret void } -define %Result* @Microsoft__Quantum__Instructions__Mzx__body(%Array* %qubits) { +define void @MemoryManagement__1__AliasCount(%Tuple* %capture-tuple, i64 %count-change) { entry: - call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i64 1) - %0 = call %Result* @__quantum__qis__joint_measure(i64 104, i64 1, %Array* %qubits) - call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i64 -1) - ret %Result* %0 + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Callable*, %Qubit* }* + %1 = getelementptr inbounds { %Callable*, %Callable*, %Qubit* }, { %Callable*, %Callable*, %Qubit* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1 + call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %2, i64 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i64 %count-change) + %3 = getelementptr inbounds { %Callable*, %Callable*, %Qubit* }, { %Callable*, %Callable*, %Qubit* }* %0, i32 0, i32 1 + %4 = load %Callable*, %Callable** %3 + call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %4, i64 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %4, i64 %count-change) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %capture-tuple, i64 %count-change) + ret void } -define %Result* @Microsoft__Quantum__Instructions__Mzz__body(%Array* %qubits) { +define void @Microsoft__Quantum__Intrinsic__Y__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { entry: - call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i64 1) - %0 = call %Result* @__quantum__qis__joint_measure(i64 102, i64 1, %Array* %qubits) - call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i64 -1) - ret %Result* %0 + %0 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %1 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %0, i32 0, i32 0 + %2 = load %Qubit*, %Qubit** %1 + call void @Microsoft__Quantum__Intrinsic__Y__body(%Qubit* %2) + ret void } -define void @Microsoft__Quantum__Instructions__Sx__body(%Qubit* %qb) { +define void @Microsoft__Quantum__Intrinsic__Y__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { entry: - call void @__quantum__qis__single_qubit_op(i64 17, i64 1, %Qubit* %qb) + %0 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %1 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %0, i32 0, i32 0 + %2 = load %Qubit*, %Qubit** %1 + call void @Microsoft__Quantum__Intrinsic__Y__adj(%Qubit* %2) ret void } -define void @Microsoft__Quantum__Instructions__Sx__adj(%Qubit* %qb) { +define void @Microsoft__Quantum__Intrinsic__Y__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { entry: - call void @__quantum__qis__single_qubit_op(i64 17, i64 1, %Qubit* %qb) + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1 + %4 = load %Qubit*, %Qubit** %2 + call void @Microsoft__Quantum__Intrinsic__Y__ctl(%Array* %3, %Qubit* %4) ret void } -define void @Microsoft__Quantum__Instructions__Sx__ctl(%Array* %ctls, %Qubit* %qb) { +define void @Microsoft__Quantum__Intrinsic__Y__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { entry: - call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 1) - call void @__quantum__qis__single_qubit_op_ctl(i64 18, i64 1, %Array* %ctls, %Qubit* %qb) - call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 -1) + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1 + %4 = load %Qubit*, %Qubit** %2 + call void @Microsoft__Quantum__Intrinsic__Y__ctladj(%Array* %3, %Qubit* %4) ret void } -define void @Microsoft__Quantum__Instructions__Sx__ctladj(%Array* %ctls, %Qubit* %qb) { -entry: - call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 1) - call void @__quantum__qis__single_qubit_op_ctl(i64 18, i64 1, %Array* %ctls, %Qubit* %qb) - call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 -1) +define void @Lifted__PartialApplication__2__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Callable*, %Qubit* }* + %1 = getelementptr inbounds { %Callable*, %Callable*, %Qubit* }, { %Callable*, %Callable*, %Qubit* }* %0, i32 0, i32 1 + %2 = load %Callable*, %Callable** %1 + %3 = getelementptr inbounds { %Callable*, %Callable*, %Qubit* }, { %Callable*, %Callable*, %Qubit* }* %0, i32 0, i32 2 + %4 = load %Qubit*, %Qubit** %3 + %5 = bitcast %Tuple* %arg-tuple to { %Tuple* }* + %6 = getelementptr inbounds { %Tuple* }, { %Tuple* }* %5, i32 0, i32 0 + %7 = load %Tuple*, %Tuple** %6 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 3)) + %9 = bitcast %Tuple* %8 to { %Callable*, %Qubit*, %Tuple* }* + %10 = getelementptr inbounds { %Callable*, %Qubit*, %Tuple* }, { %Callable*, %Qubit*, %Tuple* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { %Callable*, %Qubit*, %Tuple* }, { %Callable*, %Qubit*, %Tuple* }* %9, i32 0, i32 1 + %12 = getelementptr inbounds { %Callable*, %Qubit*, %Tuple* }, { %Callable*, %Qubit*, %Tuple* }* %9, i32 0, i32 2 + store %Callable* %2, %Callable** %10 + store %Qubit* %4, %Qubit** %11 + store %Tuple* %7, %Tuple** %12 + %13 = getelementptr inbounds { %Callable*, %Callable*, %Qubit* }, { %Callable*, %Callable*, %Qubit* }* %0, i32 0, i32 0 + %14 = load %Callable*, %Callable** %13 + call void @__quantum__rt__callable_invoke(%Callable* %14, %Tuple* %8, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i64 -1) ret void } -define void @Microsoft__Quantum__Instructions__Sz__body(%Qubit* %qb) { +define void @Microsoft__Quantum__Intrinsic__Z__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { entry: - call void @__quantum__qis__single_qubit_op(i64 15, i64 1, %Qubit* %qb) + %0 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %1 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %0, i32 0, i32 0 + %2 = load %Qubit*, %Qubit** %1 + call void @Microsoft__Quantum__Intrinsic__Z__body(%Qubit* %2) ret void } -define void @Microsoft__Quantum__Instructions__Sz__adj(%Qubit* %qb) { +define void @Microsoft__Quantum__Intrinsic__Z__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { entry: - call void @__quantum__qis__single_qubit_op(i64 15, i64 1, %Qubit* %qb) + %0 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %1 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %0, i32 0, i32 0 + %2 = load %Qubit*, %Qubit** %1 + call void @Microsoft__Quantum__Intrinsic__Z__adj(%Qubit* %2) ret void } -define void @Microsoft__Quantum__Instructions__Sz__ctl(%Array* %ctls, %Qubit* %qb) { +define void @Microsoft__Quantum__Intrinsic__Z__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { entry: - call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 1) - call void @__quantum__qis__single_qubit_op_ctl(i64 16, i64 1, %Array* %ctls, %Qubit* %qb) - call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 -1) + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1 + %4 = load %Qubit*, %Qubit** %2 + call void @Microsoft__Quantum__Intrinsic__Z__ctl(%Array* %3, %Qubit* %4) ret void } -define void @Microsoft__Quantum__Instructions__Sz__ctladj(%Array* %ctls, %Qubit* %qb) { +define void @Microsoft__Quantum__Intrinsic__Z__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { entry: - call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 1) - call void @__quantum__qis__single_qubit_op_ctl(i64 16, i64 1, %Array* %ctls, %Qubit* %qb) - call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 -1) + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1 + %4 = load %Qubit*, %Qubit** %2 + call void @Microsoft__Quantum__Intrinsic__Z__ctladj(%Array* %3, %Qubit* %4) ret void } -define void @Microsoft__Quantum__Instructions__Tx__body(%Qubit* %qb) { -entry: - call void @__quantum__qis__single_qubit_op(i64 13, i64 1, %Qubit* %qb) +define void @Lifted__PartialApplication__3__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Callable*, %Qubit* }* + %1 = getelementptr inbounds { %Callable*, %Callable*, %Qubit* }, { %Callable*, %Callable*, %Qubit* }* %0, i32 0, i32 1 + %2 = load %Callable*, %Callable** %1 + %3 = getelementptr inbounds { %Callable*, %Callable*, %Qubit* }, { %Callable*, %Callable*, %Qubit* }* %0, i32 0, i32 2 + %4 = load %Qubit*, %Qubit** %3 + %5 = bitcast %Tuple* %arg-tuple to { %Tuple* }* + %6 = getelementptr inbounds { %Tuple* }, { %Tuple* }* %5, i32 0, i32 0 + %7 = load %Tuple*, %Tuple** %6 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 3)) + %9 = bitcast %Tuple* %8 to { %Callable*, %Qubit*, %Tuple* }* + %10 = getelementptr inbounds { %Callable*, %Qubit*, %Tuple* }, { %Callable*, %Qubit*, %Tuple* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { %Callable*, %Qubit*, %Tuple* }, { %Callable*, %Qubit*, %Tuple* }* %9, i32 0, i32 1 + %12 = getelementptr inbounds { %Callable*, %Qubit*, %Tuple* }, { %Callable*, %Qubit*, %Tuple* }* %9, i32 0, i32 2 + store %Callable* %2, %Callable** %10 + store %Qubit* %4, %Qubit** %11 + store %Tuple* %7, %Tuple** %12 + %13 = getelementptr inbounds { %Callable*, %Callable*, %Qubit* }, { %Callable*, %Callable*, %Qubit* }* %0, i32 0, i32 0 + %14 = load %Callable*, %Callable** %13 + call void @__quantum__rt__callable_invoke(%Callable* %14, %Tuple* %8, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i64 -1) ret void } -define void @Microsoft__Quantum__Instructions__Tx__adj(%Qubit* %qb) { +define void @Microsoft__Quantum__Intrinsic__S__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { entry: - call void @__quantum__qis__single_qubit_op(i64 13, i64 1, %Qubit* %qb) + %0 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %1 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %0, i32 0, i32 0 + %2 = load %Qubit*, %Qubit** %1 + call void @Microsoft__Quantum__Intrinsic__S__body(%Qubit* %2) ret void } -define void @Microsoft__Quantum__Instructions__Tx__ctl(%Array* %ctls, %Qubit* %qb) { +define void @Microsoft__Quantum__Intrinsic__S__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { entry: - call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 1) - call void @__quantum__qis__single_qubit_op_ctl(i64 14, i64 1, %Array* %ctls, %Qubit* %qb) - call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 -1) + %0 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %1 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %0, i32 0, i32 0 + %2 = load %Qubit*, %Qubit** %1 + call void @Microsoft__Quantum__Intrinsic__S__adj(%Qubit* %2) ret void } -define void @Microsoft__Quantum__Instructions__Tx__ctladj(%Array* %ctls, %Qubit* %qb) { +define void @Microsoft__Quantum__Intrinsic__S__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { entry: - call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 1) - call void @__quantum__qis__single_qubit_op_ctl(i64 14, i64 1, %Array* %ctls, %Qubit* %qb) - call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 -1) + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1 + %4 = load %Qubit*, %Qubit** %2 + call void @Microsoft__Quantum__Intrinsic__S__ctl(%Array* %3, %Qubit* %4) ret void } -define void @Microsoft__Quantum__Instructions__Tz__body(%Qubit* %qb) { +define void @Microsoft__Quantum__Intrinsic__S__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { entry: - call void @__quantum__qis__single_qubit_op(i64 11, i64 1, %Qubit* %qb) + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1 + %4 = load %Qubit*, %Qubit** %2 + call void @Microsoft__Quantum__Intrinsic__S__ctladj(%Array* %3, %Qubit* %4) ret void } -define void @Microsoft__Quantum__Instructions__Tz__adj(%Qubit* %qb) { -entry: - call void @__quantum__qis__single_qubit_op(i64 11, i64 1, %Qubit* %qb) +define void @Lifted__PartialApplication__4__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Callable*, %Qubit* }* + %1 = getelementptr inbounds { %Callable*, %Callable*, %Qubit* }, { %Callable*, %Callable*, %Qubit* }* %0, i32 0, i32 1 + %2 = load %Callable*, %Callable** %1 + %3 = getelementptr inbounds { %Callable*, %Callable*, %Qubit* }, { %Callable*, %Callable*, %Qubit* }* %0, i32 0, i32 2 + %4 = load %Qubit*, %Qubit** %3 + %5 = bitcast %Tuple* %arg-tuple to { %Tuple* }* + %6 = getelementptr inbounds { %Tuple* }, { %Tuple* }* %5, i32 0, i32 0 + %7 = load %Tuple*, %Tuple** %6 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 3)) + %9 = bitcast %Tuple* %8 to { %Callable*, %Qubit*, %Tuple* }* + %10 = getelementptr inbounds { %Callable*, %Qubit*, %Tuple* }, { %Callable*, %Qubit*, %Tuple* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { %Callable*, %Qubit*, %Tuple* }, { %Callable*, %Qubit*, %Tuple* }* %9, i32 0, i32 1 + %12 = getelementptr inbounds { %Callable*, %Qubit*, %Tuple* }, { %Callable*, %Qubit*, %Tuple* }* %9, i32 0, i32 2 + store %Callable* %2, %Callable** %10 + store %Qubit* %4, %Qubit** %11 + store %Tuple* %7, %Tuple** %12 + %13 = getelementptr inbounds { %Callable*, %Callable*, %Qubit* }, { %Callable*, %Callable*, %Qubit* }* %0, i32 0, i32 0 + %14 = load %Callable*, %Callable** %13 + call void @__quantum__rt__callable_invoke(%Callable* %14, %Tuple* %8, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i64 -1) ret void } -define void @Microsoft__Quantum__Instructions__Tz__ctl(%Array* %ctls, %Qubit* %qb) { +define void @MemoryManagement__2__RefCount(%Tuple* %capture-tuple, i64 %count-change) { entry: - call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 1) - call void @__quantum__qis__single_qubit_op_ctl(i64 12, i64 1, %Array* %ctls, %Qubit* %qb) - call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 -1) + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Callable*, %Qubit* }* + %1 = getelementptr inbounds { %Callable*, %Callable*, %Qubit* }, { %Callable*, %Callable*, %Qubit* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1 + call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %2, i64 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %2, i64 %count-change) + %3 = getelementptr inbounds { %Callable*, %Callable*, %Qubit* }, { %Callable*, %Callable*, %Qubit* }* %0, i32 0, i32 1 + %4 = load %Callable*, %Callable** %3 + call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %4, i64 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %4, i64 %count-change) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %capture-tuple, i64 %count-change) ret void } -define void @Microsoft__Quantum__Instructions__Tz__ctladj(%Array* %ctls, %Qubit* %qb) { +define void @MemoryManagement__2__AliasCount(%Tuple* %capture-tuple, i64 %count-change) { entry: - call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 1) - call void @__quantum__qis__single_qubit_op_ctl(i64 12, i64 1, %Array* %ctls, %Qubit* %qb) - call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 -1) + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Callable*, %Qubit* }* + %1 = getelementptr inbounds { %Callable*, %Callable*, %Qubit* }, { %Callable*, %Callable*, %Qubit* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1 + call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %2, i64 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i64 %count-change) + %3 = getelementptr inbounds { %Callable*, %Callable*, %Qubit* }, { %Callable*, %Callable*, %Qubit* }* %0, i32 0, i32 1 + %4 = load %Callable*, %Callable** %3 + call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %4, i64 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %4, i64 %count-change) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %capture-tuple, i64 %count-change) ret void } +declare void @__quantum__rt__callable_update_reference_count(%Callable*, i64) + define { %String* }* @Microsoft__Quantum__Targeting__TargetInstruction__body(%String* %__Item1__) { entry: %0 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64)) @@ -1527,3 +2015,5 @@ entry: } declare void @__quantum__rt__string_update_reference_count(%String*, i64) + +declare void @__quantum__rt__tuple_update_alias_count(%Tuple*, i64) diff --git a/src/QirRuntime/test/QIR-tracer/tracer-target.qs b/src/QirRuntime/test/QIR-tracer/tracer-target.qs index f40b8f9aa4d..f07b57c4304 100644 --- a/src/QirRuntime/test/QIR-tracer/tracer-target.qs +++ b/src/QirRuntime/test/QIR-tracer/tracer-target.qs @@ -36,6 +36,13 @@ namespace Microsoft.Quantum.Instructions { body intrinsic; } + @TargetInstruction("apply_conditionally") + operation apply_conditionally( + measurementResults : Result[], resultsValues : Result[], + onEqualOp : (Unit => Unit) , onNonEqualOp : (Unit => Unit)) : Unit { + body intrinsic; + } + // Operations, used in Hadamard frame tracking @Inline() operation Tz(qb : Qubit) : Unit @@ -209,9 +216,9 @@ namespace Microsoft.Quantum.Intrinsic { operation Rz(theta : Double, qb : Qubit) : Unit is Adj + Ctl { body (...) { Phys.single_qubit_op(23, 1, qb); } - adjoint (...) { Phys.single_qubit_op(24, 1, qb); } - controlled (ctls, ...) { Phys.single_qubit_op_ctl(25, 1, ctls, qb); } - controlled adjoint (ctls, ...) { Phys.single_qubit_op_ctl(25, 1, ctls, qb); } + adjoint (...) { Phys.single_qubit_op(23, 1, qb); } + controlled (ctls, ...) { Phys.single_qubit_op_ctl(24, 1, ctls, qb); } + controlled adjoint (ctls, ...) { Phys.single_qubit_op_ctl(24, 1, ctls, qb); } } @Inline() @@ -252,6 +259,17 @@ namespace Microsoft.Quantum.Intrinsic { } } + operation ApplyConditionallyIntrinsic( + measurementResults : Result[], resultsValues : Result[], + onEqualOp : (Unit => Unit) , onNonEqualOp : (Unit => Unit)) : Unit { + body (...) { return Phys.apply_conditionally(measurementResults, resultsValues, onEqualOp, onNonEqualOp); } + } + + operation ApplyIfElseIntrinsic( + measurementResult : Result, onResultZeroOp : (Unit => Unit) , onResultOneOp : (Unit => Unit)) : Unit { + body (...) { return Phys.apply_conditionally([measurementResult], [Zero], onResultZeroOp, onResultOneOp); } + } + // operation SWAP(a : Qubit, b : Qubit) : Unit // is Adj { // body intrinsic; diff --git a/src/QirRuntime/test/unittests/TracerTests.cpp b/src/QirRuntime/test/unittests/TracerTests.cpp index 37f8bc57ad1..d1ff4711036 100644 --- a/src/QirRuntime/test/unittests/TracerTests.cpp +++ b/src/QirRuntime/test/unittests/TracerTests.cpp @@ -2,8 +2,8 @@ // Licensed under the MIT License. #include -#include #include +#include #include "catch.hpp" @@ -213,12 +213,190 @@ TEST_CASE("Layering measurements", "[tracer]") Qubit qs23[2] = {q2, q3}; CHECK(2 == tr->GetLayerIdOfSourceMeasurement(tr->TraceMultiQubitMeasurement(5, 1, 2, qs23))); CHECK(1 == tr->TraceSingleQubitOp(3, 1, q4)); +} - const vector& layers = tr->UseLayers(); - REQUIRE(layers.size() == 3); - CHECK(layers[0].operations.size() == 3); - CHECK(layers[1].operations.size() == 2); - CHECK(layers[2].operations.size() == 1); +TEST_CASE("Conditionals: noops", "[tracer][tracer.conditionals]") +{ + shared_ptr tr = CreateTracer(3 /*layer duration*/); + + Qubit q1 = tr->AllocateQubit(); + Qubit q2 = tr->AllocateQubit(); + + CHECK(0 == tr->TraceSingleQubitOp(1, 3, q1)); + CHECK(1 == tr->TraceSingleQubitOp(1, 3, q1)); + Result one = tr->UseOne(); + { + CTracer::FenceScope fs(tr.get(), 1, &one, 0, nullptr); + CHECK(0 == tr->TraceSingleQubitOp(1, 1, q2)); + } + { + CTracer::FenceScope fs(tr.get(), 0, nullptr, 1, &one); + CHECK(0 == tr->TraceSingleQubitOp(1, 1, q2)); + } + { + CTracer::FenceScope fs(tr.get(), 0, nullptr, 0, nullptr); + CHECK(0 == tr->TraceSingleQubitOp(1, 1, q2)); + } +} + +TEST_CASE("Conditionals: a new layer because of the fence", "[tracer][tracer.conditionals]") +{ + shared_ptr tr = CreateTracer(1 /*layer duration*/); + + Qubit q1 = tr->AllocateQubit(); + Qubit q2 = tr->AllocateQubit(); + Qubit q3 = tr->AllocateQubit(); + + CHECK(0 == tr->TraceSingleQubitOp(1, 1, q1)); + Result r = tr->TraceSingleQubitMeasurement(1, 1, q1); + CHECK(1 == tr->GetLayerIdOfSourceMeasurement(r)); + + { + CTracer::FenceScope fs(tr.get(), 1, &r, 0, nullptr); + CHECK(2 == tr->TraceSingleQubitOp(1, 1, q2)); + } + + CHECK(0 == tr->TraceSingleQubitOp(1, 1, q3)); +} + +TEST_CASE("Conditionals: single fence", "[tracer][tracer.conditionals]") +{ + shared_ptr tr = CreateTracer(1 /*layer duration*/); + + Qubit q1 = tr->AllocateQubit(); + Qubit q2 = tr->AllocateQubit(); + Qubit q3 = tr->AllocateQubit(); + + CHECK(0 == tr->TraceSingleQubitOp(1, 1, q1)); + Result r = tr->TraceSingleQubitMeasurement(1, 1, q1); + CHECK(1 == tr->GetLayerIdOfSourceMeasurement(r)); + CHECK(2 == tr->TraceSingleQubitOp(1, 1, q1)); + + { + CTracer::FenceScope fs(tr.get(), 1, &r, 0, nullptr); + CHECK(2 == tr->TraceSingleQubitOp(1, 1, q2)); + } + + CHECK(3 == tr->TraceSingleQubitOp(1, 1, q1)); + CHECK(3 == tr->TraceSingleQubitOp(1, 1, q2)); + CHECK(0 == tr->TraceSingleQubitOp(1, 1, q3)); + CHECK(1 == tr->TraceSingleQubitOp(1, 1, q3)); + CHECK(2 == tr->TraceSingleQubitOp(1, 1, q3)); +} + +TEST_CASE("Conditionals: fence from two result arrays", "[tracer][tracer.conditionals]") +{ + shared_ptr tr = CreateTracer(1 /*layer duration*/); + + Qubit q1 = tr->AllocateQubit(); + Qubit q2 = tr->AllocateQubit(); + Qubit q3 = tr->AllocateQubit(); + + CHECK(0 == tr->TraceSingleQubitOp(1, 1, q1)); + Result r1 = tr->TraceSingleQubitMeasurement(1, 1, q1); + CHECK(1 == tr->GetLayerIdOfSourceMeasurement(r1)); + CHECK(0 == tr->TraceSingleQubitOp(1, 1, q2)); + CHECK(1 == tr->TraceSingleQubitOp(1, 1, q2)); + Result r2 = tr->TraceSingleQubitMeasurement(1, 1, q2); + CHECK(2 == tr->GetLayerIdOfSourceMeasurement(r2)); + + { + CTracer::FenceScope fs(tr.get(), 1, &r1, 1, &r2); + CHECK(3 == tr->TraceSingleQubitOp(1, 1, q3)); + } + + CHECK(2 == tr->TraceSingleQubitOp(1, 1, q1)); +} + +TEST_CASE("Conditionals: nested fence is later than parent", "[tracer][tracer.conditionals]") +{ + shared_ptr tr = CreateTracer(1 /*layer duration*/); + + Qubit q1 = tr->AllocateQubit(); + Qubit q2 = tr->AllocateQubit(); + Qubit q3 = tr->AllocateQubit(); + Qubit q4 = tr->AllocateQubit(); + Qubit q5 = tr->AllocateQubit(); + + CHECK(0 == tr->TraceSingleQubitOp(1, 1, q1)); + Result r1 = tr->TraceSingleQubitMeasurement(1, 1, q1); + CHECK(1 == tr->GetLayerIdOfSourceMeasurement(r1)); + CHECK(0 == tr->TraceSingleQubitOp(1, 1, q2)); + CHECK(1 == tr->TraceSingleQubitOp(1, 1, q2)); + Result r2 = tr->TraceSingleQubitMeasurement(1, 1, q2); + CHECK(2 == tr->GetLayerIdOfSourceMeasurement(r2)); + + { + CTracer::FenceScope fs(tr.get(), 1, &r1, 0, nullptr); + CHECK(2 == tr->TraceSingleQubitOp(1, 1, q3)); + { + CTracer::FenceScope fs(tr.get(), 0, nullptr, 1, &r2); + CHECK(3 == tr->TraceSingleQubitOp(1, 1, q4)); + } + CHECK(2 == tr->TraceSingleQubitOp(1, 1, q5)); + } + + CHECK(2 == tr->TraceSingleQubitOp(1, 1, q1)); +} + +TEST_CASE("Conditionals: nested fence is earlier than parent", "[tracer][tracer.conditionals]") +{ + shared_ptr tr = CreateTracer(1 /*layer duration*/); + + Qubit q1 = tr->AllocateQubit(); + Qubit q2 = tr->AllocateQubit(); + Qubit q3 = tr->AllocateQubit(); + Qubit q4 = tr->AllocateQubit(); + Qubit q5 = tr->AllocateQubit(); + + CHECK(0 == tr->TraceSingleQubitOp(1, 1, q1)); + Result r1 = tr->TraceSingleQubitMeasurement(1, 1, q1); + CHECK(1 == tr->GetLayerIdOfSourceMeasurement(r1)); + CHECK(0 == tr->TraceSingleQubitOp(1, 1, q2)); + CHECK(1 == tr->TraceSingleQubitOp(1, 1, q2)); + Result r2 = tr->TraceSingleQubitMeasurement(1, 1, q2); + CHECK(2 == tr->GetLayerIdOfSourceMeasurement(r2)); + + { + CTracer::FenceScope fs(tr.get(), 1, &r2, 0, nullptr); + CHECK(3 == tr->TraceSingleQubitOp(1, 1, q3)); + { + CTracer::FenceScope fs(tr.get(), 0, nullptr, 1, &r1); + CHECK(3 == tr->TraceSingleQubitOp(1, 1, q4)); + } + CHECK(3 == tr->TraceSingleQubitOp(1, 1, q5)); + } + CHECK(2 == tr->TraceSingleQubitOp(1, 1, q1)); +} + +TEST_CASE("Conditionals: fences and barriers", "[tracer][tracer.conditionals]") +{ + shared_ptr tr = CreateTracer(1 /*layer duration*/); + + Qubit q1 = tr->AllocateQubit(); + Qubit q2 = tr->AllocateQubit(); + Qubit q3 = tr->AllocateQubit(); + Qubit q4 = tr->AllocateQubit(); + Qubit q5 = tr->AllocateQubit(); + + CHECK(0 == tr->TraceSingleQubitOp(1, 1, q1)); + Result r1 = tr->TraceSingleQubitMeasurement(1, 1, q1); + CHECK(1 == tr->GetLayerIdOfSourceMeasurement(r1)); + + CHECK(2 == tr->InjectGlobalBarrier(42, 1)); + + Result r2 = tr->TraceSingleQubitMeasurement(1, 1, q2); + CHECK(3 == tr->GetLayerIdOfSourceMeasurement(r2)); + + { + CTracer::FenceScope fs(tr.get(), 1, &r1, 0, nullptr); + CHECK(3 == tr->TraceSingleQubitOp(1, 1, q3)); + } + { + CTracer::FenceScope fs(tr.get(), 0, nullptr, 1, &r2); + CHECK(4 == tr->TraceSingleQubitOp(1, 1, q4)); + } + CHECK(3 == tr->TraceSingleQubitOp(1, 1, q5)); } TEST_CASE("Output: to string", "[tracer]") From d34d2c596117b1389a9072a3abbf1329ed5f116f Mon Sep 17 00:00:00 2001 From: Angela Burton Date: Fri, 5 Mar 2021 15:36:28 -0800 Subject: [PATCH 18/30] Update NuGet.Config (#546) --- NuGet.Config | 2 ++ 1 file changed, 2 insertions(+) diff --git a/NuGet.Config b/NuGet.Config index 08dc1eee2fb..0a8cd98e261 100644 --- a/NuGet.Config +++ b/NuGet.Config @@ -15,6 +15,8 @@ --> + + From 7fae770f44781197200a77504be27f3877ce9fac Mon Sep 17 00:00:00 2001 From: Angela Burton Date: Sat, 6 Mar 2021 10:19:51 -0800 Subject: [PATCH 19/30] Revert "Update NuGet.Config (#546)" (#548) This reverts commit d34d2c596117b1389a9072a3abbf1329ed5f116f. --- NuGet.Config | 2 -- 1 file changed, 2 deletions(-) diff --git a/NuGet.Config b/NuGet.Config index 0a8cd98e261..08dc1eee2fb 100644 --- a/NuGet.Config +++ b/NuGet.Config @@ -15,8 +15,6 @@ --> - - From 419a6e6c902f558f23cf4648cbff0a1714b69b5d Mon Sep 17 00:00:00 2001 From: "Stefan J. Wernli" Date: Sat, 6 Mar 2021 13:58:46 -0800 Subject: [PATCH 20/30] Update scripts to LLVM 11, add devcontainer (#545) This updates the QirRuntime folder's readme and scripts to explicitly call out using LLVM 11 on Linux. It also adds a devcontainer for easier local Linux development with prerequisites for Q# development and QIR development already installed. --- .devcontainer/Dockerfile | 8 ++++ .devcontainer/devcontainer.json | 5 +++ .gitattributes | 61 ++++++++++++++++++++++++++++ src/QirRuntime/README.md | 12 +++--- src/QirRuntime/build-qir-runtime.ps1 | 11 +++-- src/QirRuntime/build.py | 8 +++- src/QirRuntime/prerequisites.ps1 | 9 ++-- 7 files changed, 99 insertions(+), 15 deletions(-) create mode 100644 .devcontainer/Dockerfile create mode 100644 .devcontainer/devcontainer.json create mode 100644 .gitattributes diff --git a/.devcontainer/Dockerfile b/.devcontainer/Dockerfile new file mode 100644 index 00000000000..bc9cabd0ec5 --- /dev/null +++ b/.devcontainer/Dockerfile @@ -0,0 +1,8 @@ +FROM mcr.microsoft.com/dotnet/sdk:3.1-focal +RUN apt update \ + && apt-get install -y cmake \ + && apt-get install -y ninja-build \ + && apt-get install -y clang-11 \ + && apt-get install -y clang-tidy-11 +RUN apt-get install -y build-essential +CMD [ "pwsh" ] \ No newline at end of file diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json new file mode 100644 index 00000000000..a2ff6028b9a --- /dev/null +++ b/.devcontainer/devcontainer.json @@ -0,0 +1,5 @@ +{ + "build": { + "dockerfile": "Dockerfile" + } +} \ No newline at end of file diff --git a/.gitattributes b/.gitattributes new file mode 100644 index 00000000000..f272b39d58c --- /dev/null +++ b/.gitattributes @@ -0,0 +1,61 @@ +############################################################################### +# Set default behavior to automatically normalize line endings. +############################################################################### +* text=auto +*.sh text eol=lf + +############################################################################### +# diff behavior for some non-binary formats +############################################################################### +#*.cs diff=csharp + +############################################################################### +# Set the merge driver for project and solution files +# +# Merging from the command prompt will add diff markers to the files if there +# are conflicts (Merging from VS is not affected by the settings below, in VS +# the diff markers are never inserted). Diff markers may cause the following +# file extensions to fail to load in VS. An alternative would be to treat +# these files as binary and thus will always conflict and require user +# intervention with every merge. To do so, just uncomment the entries below +############################################################################### +#*.sln merge=binary +#*.csproj merge=binary +#*.vbproj merge=binary +#*.vcxproj merge=binary +#*.vcproj merge=binary +#*.dbproj merge=binary +#*.fsproj merge=binary +#*.lsproj merge=binary +#*.wixproj merge=binary +#*.modelproj merge=binary +#*.sqlproj merge=binary +#*.wwaproj merge=binary + +############################################################################### +# behavior for image files +# +# image files are treated as binary by default. +############################################################################### +#*.jpg binary +#*.png binary +#*.gif binary + +############################################################################### +# diff behavior for common document formats +# +# Convert binary document formats to text before diffing them. This feature +# is only available from the command line. Turn it on by uncommenting the +# entries below. +############################################################################### +*.md diff=astextplain +#*.doc diff=astextplain +#*.DOC diff=astextplain +#*.docx diff=astextplain +#*.DOCX diff=astextplain +#*.dot diff=astextplain +#*.DOT diff=astextplain +#*.pdf diff=astextplain +#*.PDF diff=astextplain +#*.rtf diff=astextplain +#*.RTF diff=astextplain \ No newline at end of file diff --git a/src/QirRuntime/README.md b/src/QirRuntime/README.md index fbdd3a525c9..fcf60b28d82 100644 --- a/src/QirRuntime/README.md +++ b/src/QirRuntime/README.md @@ -11,7 +11,7 @@ This folder contains QIR runtime project, which includes implementation of the ## Build -The QirRuntime project is using CMake (3.17) + Ninja(1.10.0) + Clang++(10.0.0). Other versions of the tools might work +The QirRuntime project is using CMake (3.17) + Ninja(1.10.0) + Clang++(11.0.0). Other versions of the tools might work but haven't been tested. Only x64 architecture is supported. You can use CMake directly. For example, to produce a release build: @@ -35,7 +35,7 @@ CI builds and tests are enabled for this project. The build has no external depe ### Windows pre-reqs -1. Install Clang, Ninja and CMake from the public distros. +1. Install Clang 11, Ninja and CMake from the public distros. 1. Add all three to your/system `%PATH%`. 1. Install VS 2019 and enable "Desktop development with C++" component (Clang uses MSVC's standard library on Windows). 1. Install clang-tidy and clang-format if your Clang/LLVM packages didn't include the tools. @@ -52,11 +52,11 @@ Running cmake from the editors will likely default to MSVC or clang-cl and fail. 1. In the Ubuntu's terminal: 1. `$ sudo apt install cmake` (`$ cmake --version` should return 3.16.3) 1. `$ sudo apt-get install ninja-build` (`$ ninja --version` should return 1.10.0) - 1. `$ sudo apt install clang` (`$ clang++ --version` should return 10.0.0) + 1. `$ sudo apt install clang-11` (`$ clang++-11 --version` should return 11.0.0) 1. Set Clang as the preferred C/C++ compiler: - - $ export CC=/usr/bin/clang - - $ export CXX=/usr/bin/clang++ - 1. `$ sudo apt install clang-tidy` (`$ clang-tidy --version` should return 'LLVM version 10.0.0') + - $ export CC=/usr/bin/clang-11 + - $ export CXX=/usr/bin/clang++-11 + 1. `$ sudo apt install clang-tidy-11` (`$ clang-tidy-11 --version` should return 'LLVM version 11.0.0') 1. Install the same version of dotnet as specified by qsharp-runtime [README](../../README.md) 1. <_optional_> To use build/test scripts, check that you have python3 installed (it should be by default). diff --git a/src/QirRuntime/build-qir-runtime.ps1 b/src/QirRuntime/build-qir-runtime.ps1 index 63247ebaab7..dfa07616fc1 100644 --- a/src/QirRuntime/build-qir-runtime.ps1 +++ b/src/QirRuntime/build-qir-runtime.ps1 @@ -18,6 +18,8 @@ if ($Env:ENABLE_QIRRUNTIME -eq "true") { $oldCXX = $env:CXX $oldRC = $env:RC + $clangTidy = "" + if (($IsMacOS) -or ((Test-Path Env:AGENT_OS) -and ($Env:AGENT_OS.StartsWith("Darwin")))) { Write-Host "On MacOS build QIR Runtim using the default C/C++ compiler (should be AppleClang)" @@ -25,9 +27,10 @@ if ($Env:ENABLE_QIRRUNTIME -eq "true") { elseif (($IsLinux) -or ((Test-Path Env:AGENT_OS) -and ($Env:AGENT_OS.StartsWith("Lin")))) { Write-Host "On Linux build QIR Runtime using Clang" - $env:CC = "/usr/bin/clang" - $env:CXX = "/usr/bin/clang++" - $env:RC = "/usr/bin/clang++" + $env:CC = "/usr/bin/clang-11" + $env:CXX = "/usr/bin/clang++-11" + $env:RC = "/usr/bin/clang++-11" + $clangTidy = "-DCMAKE_CXX_CLANG_TIDY=clang-tidy-11" } elseif (($IsWindows) -or ((Test-Path Env:AGENT_OS) -and ($Env:AGENT_OS.StartsWith("Win")))) { @@ -48,7 +51,7 @@ if ($Env:ENABLE_QIRRUNTIME -eq "true") { Push-Location $qirRuntimeBuildFolder - cmake -G Ninja -D CMAKE_BUILD_TYPE="$Env:BUILD_CONFIGURATION" ../.. + cmake -G Ninja $clangTidy -D CMAKE_BUILD_TYPE="$Env:BUILD_CONFIGURATION" ../.. cmake --build . --target install Pop-Location diff --git a/src/QirRuntime/build.py b/src/QirRuntime/build.py index ac20d6ec86c..5b6c1646a19 100644 --- a/src/QirRuntime/build.py +++ b/src/QirRuntime/build.py @@ -45,10 +45,14 @@ def do_build(root_dir, should_make, should_build, flavor): flavorWithDebInfo = flavor if flavor == "Release" : - flavorWithDebInfo = "RelWithDebInfo" + flavorWithDebInfo = "RelWithDebInfo" + + clangTidy = "clang-tidy" + if platform.system() == "Linux" : + clangTidy = "clang-tidy-11" if should_make: - cmd = "cmake -G Ninja -DCMAKE_CXX_CLANG_TIDY=clang-tidy -DCMAKE_BUILD_TYPE=" + flavorWithDebInfo + " ../../.." + cmd = "cmake -G Ninja -DCMAKE_CXX_CLANG_TIDY=" + clangTidy + " -DCMAKE_BUILD_TYPE=" + flavorWithDebInfo + " ../../.." log("running: " + cmd) result = subprocess.run(cmd, shell = True) if result.returncode != 0: diff --git a/src/QirRuntime/prerequisites.ps1 b/src/QirRuntime/prerequisites.ps1 index e10df08fa3e..66a03ff8f78 100644 --- a/src/QirRuntime/prerequisites.ps1 +++ b/src/QirRuntime/prerequisites.ps1 @@ -5,10 +5,13 @@ if ($Env:ENABLE_QIRRUNTIME -eq "true") { if (($IsWindows) -or ((Test-Path Env:AGENT_OS) -and ($Env:AGENT_OS.StartsWith("Win")))) { choco install llvm choco install ninja - } else { - #brew install llvm # this seems to mess up native simulator build, probably because of STD libs - # llvm should be already available on later Linux/Darwin systems + } elseif ($IsMacOS) { brew install ninja + } else { + sudo apt update + sudo apt-get install -y ninja-build + sudo apt-get install -y clang-11 + sudo apt-get install -y clang-tidy-11 } } From 2001b9aac5533310a39b5d68df9e02cf0e9c6f47 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?C=C3=A9sar=20Zaragoza=20Cort=C3=A9s?= Date: Tue, 9 Mar 2021 09:55:26 -0800 Subject: [PATCH 21/30] QIR Entry Point Driver Samples (#520) * Setup for QIR-based RandomBit Q# program. * Added generated QIR file. * Changed structure for standalone tests. * Added simulator and Q# operation output redirection through command line options. * Added standalone example for different input types. * Changed type of InputTypes Q# operation. * Update src/QirRuntime/test/Standalone/InputTypes/input-types.qs Co-authored-by: Chris Granade * Clean-up a bit and add TODOs. * Moved to a folder outside test. * Simplified make file and removed unused headers in driver. * Rename entry-point operation. * Rename sample to StandaloneInputReference. * Add CLI11 library to component manifest. * Update .clang-tidy for standalone samples. * Add readme. * Update readmes that mention CLI11. * Revert extra-line to .clang-tidy. * Move standalone includes to specific CMakeLists.txt. * Use QIR runtime API to redirect simulation output. * Renamed options. * Add option for Pauli type. * Simplify options code. * Add support for QirRange and QirString to be parsed from the command line. * Add support for Q# Result type. * Do not catch all exceptions in the main function. * Resolved bug pointed by Robin. * Create QirArray. * Reordered includes. * Remove headers not needed and add headers that are explicitly needed. * Added simple test for sample reference binary. * Fixed quotation marks in cmake file. * Extend entry-point operation to exercise a larger set of supported inputs. * Add support for arrays of different types in driver. * Added support for Pauli array. * All array types supported. * Added clarification comments. * Updated tests. * Removed auto-generated file. * Fix build break due to missing file. * Set environment for standalone sample test. * Fix ctest for standalone sample. * Addressed Robin's feedback. * Reverse file stream closing. * Remove use of Range in entry-point operation. Co-authored-by: Chris Granade --- src/QirRuntime/CMakeLists.txt | 2 +- src/QirRuntime/externals/CLI11/CLI11.hpp | 8258 +++++++++++++++++ src/QirRuntime/externals/cgmanifest.json | 8 + src/QirRuntime/externals/readme.md | 4 + src/QirRuntime/generateqir.py | 1 + src/QirRuntime/samples/CMakeLists.txt | 1 + .../StandaloneInputReference/.clang-tidy | 5 + .../StandaloneInputReference/CMakeLists.txt | 51 + .../StandaloneInputReference/qir-driver.cpp | 266 + .../qir-standalone-input-reference.ll | 77 + .../qir-standalone-input-reference.csproj | 9 + .../qsharp/qir-standalone-input-reference.qs | 15 + .../StandaloneInputReference/readme.md | 12 + src/QirRuntime/test.py | 19 +- 14 files changed, 8726 insertions(+), 2 deletions(-) create mode 100644 src/QirRuntime/externals/CLI11/CLI11.hpp create mode 100644 src/QirRuntime/samples/CMakeLists.txt create mode 100644 src/QirRuntime/samples/StandaloneInputReference/.clang-tidy create mode 100644 src/QirRuntime/samples/StandaloneInputReference/CMakeLists.txt create mode 100644 src/QirRuntime/samples/StandaloneInputReference/qir-driver.cpp create mode 100644 src/QirRuntime/samples/StandaloneInputReference/qir-standalone-input-reference.ll create mode 100644 src/QirRuntime/samples/StandaloneInputReference/qsharp/qir-standalone-input-reference.csproj create mode 100644 src/QirRuntime/samples/StandaloneInputReference/qsharp/qir-standalone-input-reference.qs create mode 100644 src/QirRuntime/samples/StandaloneInputReference/readme.md diff --git a/src/QirRuntime/CMakeLists.txt b/src/QirRuntime/CMakeLists.txt index 63221cd4d5b..5ff3fd585c4 100644 --- a/src/QirRuntime/CMakeLists.txt +++ b/src/QirRuntime/CMakeLists.txt @@ -122,5 +122,5 @@ else() endif() add_subdirectory(lib) +add_subdirectory(samples) add_subdirectory(test) - diff --git a/src/QirRuntime/externals/CLI11/CLI11.hpp b/src/QirRuntime/externals/CLI11/CLI11.hpp new file mode 100644 index 00000000000..68244d3864d --- /dev/null +++ b/src/QirRuntime/externals/CLI11/CLI11.hpp @@ -0,0 +1,8258 @@ +#pragma once + +// CLI11: Version 1.9.1 +// Originally designed by Henry Schreiner +// https://github.com/CLIUtils/CLI11 +// +// This is a standalone header file generated by MakeSingleHeader.py in CLI11/scripts +// from: v1.9.1 +// +// From LICENSE: +// +// CLI11 1.8 Copyright (c) 2017-2019 University of Cincinnati, developed by Henry +// Schreiner under NSF AWARD 1414736. All rights reserved. +// +// Redistribution and use in source and binary forms of CLI11, with or without +// modification, are permitted provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this +// list of conditions and the following disclaimer. +// 2. Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// 3. Neither the name of the copyright holder nor the names of its contributors +// may be used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR +// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON +// ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +// Standard combined includes: + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +// Verbatim copy from Version.hpp: + + +#define CLI11_VERSION_MAJOR 1 +#define CLI11_VERSION_MINOR 9 +#define CLI11_VERSION_PATCH 1 +#define CLI11_VERSION "1.9.1" + + + + +// Verbatim copy from Macros.hpp: + + +// The following version macro is very similar to the one in PyBind11 +#if !(defined(_MSC_VER) && __cplusplus == 199711L) && !defined(__INTEL_COMPILER) +#if __cplusplus >= 201402L +#define CLI11_CPP14 +#if __cplusplus >= 201703L +#define CLI11_CPP17 +#if __cplusplus > 201703L +#define CLI11_CPP20 +#endif +#endif +#endif +#elif defined(_MSC_VER) && __cplusplus == 199711L +// MSVC sets _MSVC_LANG rather than __cplusplus (supposedly until the standard is fully implemented) +// Unless you use the /Zc:__cplusplus flag on Visual Studio 2017 15.7 Preview 3 or newer +#if _MSVC_LANG >= 201402L +#define CLI11_CPP14 +#if _MSVC_LANG > 201402L && _MSC_VER >= 1910 +#define CLI11_CPP17 +#if __MSVC_LANG > 201703L && _MSC_VER >= 1910 +#define CLI11_CPP20 +#endif +#endif +#endif +#endif + +#if defined(CLI11_CPP14) +#define CLI11_DEPRECATED(reason) [[deprecated(reason)]] +#elif defined(_MSC_VER) +#define CLI11_DEPRECATED(reason) __declspec(deprecated(reason)) +#else +#define CLI11_DEPRECATED(reason) __attribute__((deprecated(reason))) +#endif + + + + +// Verbatim copy from Validators.hpp: + + +// C standard library +// Only needed for existence checking +#if defined CLI11_CPP17 && defined __has_include && !defined CLI11_HAS_FILESYSTEM +#if __has_include() +// Filesystem cannot be used if targeting macOS < 10.15 +#if defined __MAC_OS_X_VERSION_MIN_REQUIRED && __MAC_OS_X_VERSION_MIN_REQUIRED < 101500 +#define CLI11_HAS_FILESYSTEM 0 +#else +#include +#if defined __cpp_lib_filesystem && __cpp_lib_filesystem >= 201703 +#if defined _GLIBCXX_RELEASE && _GLIBCXX_RELEASE >= 9 +#define CLI11_HAS_FILESYSTEM 1 +#elif defined(__GLIBCXX__) +// if we are using gcc and Version <9 default to no filesystem +#define CLI11_HAS_FILESYSTEM 0 +#else +#define CLI11_HAS_FILESYSTEM 1 +#endif +#else +#define CLI11_HAS_FILESYSTEM 0 +#endif +#endif +#endif +#endif + +#if defined CLI11_HAS_FILESYSTEM && CLI11_HAS_FILESYSTEM > 0 +#include // NOLINT(build/include) +#else +#include +#include +#endif + + + +// From Version.hpp: + + + +// From Macros.hpp: + + + +// From StringTools.hpp: + +namespace CLI { + +/// Include the items in this namespace to get free conversion of enums to/from streams. +/// (This is available inside CLI as well, so CLI11 will use this without a using statement). +namespace enums { + +/// output streaming for enumerations +template ::value>::type> +std::ostream &operator<<(std::ostream &in, const T &item) { + // make sure this is out of the detail namespace otherwise it won't be found when needed + return in << static_cast::type>(item); +} + +} // namespace enums + +/// Export to CLI namespace +using enums::operator<<; + +namespace detail { +/// a constant defining an expected max vector size defined to be a big number that could be multiplied by 4 and not +/// produce overflow for some expected uses +constexpr int expected_max_vector_size{1 << 29}; +// Based on http://stackoverflow.com/questions/236129/split-a-string-in-c +/// Split a string by a delim +inline std::vector split(const std::string &s, char delim) { + std::vector elems; + // Check to see if empty string, give consistent result + if(s.empty()) { + elems.emplace_back(); + } else { + std::stringstream ss; + ss.str(s); + std::string item; + while(std::getline(ss, item, delim)) { + elems.push_back(item); + } + } + return elems; +} + +/// Simple function to join a string +template std::string join(const T &v, std::string delim = ",") { + std::ostringstream s; + auto beg = std::begin(v); + auto end = std::end(v); + if(beg != end) + s << *beg++; + while(beg != end) { + s << delim << *beg++; + } + return s.str(); +} + +/// Simple function to join a string from processed elements +template ::value>::type> +std::string join(const T &v, Callable func, std::string delim = ",") { + std::ostringstream s; + auto beg = std::begin(v); + auto end = std::end(v); + if(beg != end) + s << func(*beg++); + while(beg != end) { + s << delim << func(*beg++); + } + return s.str(); +} + +/// Join a string in reverse order +template std::string rjoin(const T &v, std::string delim = ",") { + std::ostringstream s; + for(std::size_t start = 0; start < v.size(); start++) { + if(start > 0) + s << delim; + s << v[v.size() - start - 1]; + } + return s.str(); +} + +// Based roughly on http://stackoverflow.com/questions/25829143/c-trim-whitespace-from-a-string + +/// Trim whitespace from left of string +inline std::string <rim(std::string &str) { + auto it = std::find_if(str.begin(), str.end(), [](char ch) { return !std::isspace(ch, std::locale()); }); + str.erase(str.begin(), it); + return str; +} + +/// Trim anything from left of string +inline std::string <rim(std::string &str, const std::string &filter) { + auto it = std::find_if(str.begin(), str.end(), [&filter](char ch) { return filter.find(ch) == std::string::npos; }); + str.erase(str.begin(), it); + return str; +} + +/// Trim whitespace from right of string +inline std::string &rtrim(std::string &str) { + auto it = std::find_if(str.rbegin(), str.rend(), [](char ch) { return !std::isspace(ch, std::locale()); }); + str.erase(it.base(), str.end()); + return str; +} + +/// Trim anything from right of string +inline std::string &rtrim(std::string &str, const std::string &filter) { + auto it = + std::find_if(str.rbegin(), str.rend(), [&filter](char ch) { return filter.find(ch) == std::string::npos; }); + str.erase(it.base(), str.end()); + return str; +} + +/// Trim whitespace from string +inline std::string &trim(std::string &str) { return ltrim(rtrim(str)); } + +/// Trim anything from string +inline std::string &trim(std::string &str, const std::string filter) { return ltrim(rtrim(str, filter), filter); } + +/// Make a copy of the string and then trim it +inline std::string trim_copy(const std::string &str) { + std::string s = str; + return trim(s); +} + +/// remove quotes at the front and back of a string either '"' or '\'' +inline std::string &remove_quotes(std::string &str) { + if(str.length() > 1 && (str.front() == '"' || str.front() == '\'')) { + if(str.front() == str.back()) { + str.pop_back(); + str.erase(str.begin(), str.begin() + 1); + } + } + return str; +} + +/// Make a copy of the string and then trim it, any filter string can be used (any char in string is filtered) +inline std::string trim_copy(const std::string &str, const std::string &filter) { + std::string s = str; + return trim(s, filter); +} +/// Print a two part "help" string +inline std::ostream &format_help(std::ostream &out, std::string name, std::string description, std::size_t wid) { + name = " " + name; + out << std::setw(static_cast(wid)) << std::left << name; + if(!description.empty()) { + if(name.length() >= wid) + out << "\n" << std::setw(static_cast(wid)) << ""; + for(const char c : description) { + out.put(c); + if(c == '\n') { + out << std::setw(static_cast(wid)) << ""; + } + } + } + out << "\n"; + return out; +} + +/// Verify the first character of an option +template bool valid_first_char(T c) { + return std::isalnum(c, std::locale()) || c == '_' || c == '?' || c == '@'; +} + +/// Verify following characters of an option +template bool valid_later_char(T c) { return valid_first_char(c) || c == '.' || c == '-'; } + +/// Verify an option name +inline bool valid_name_string(const std::string &str) { + if(str.empty() || !valid_first_char(str[0])) + return false; + for(auto c : str.substr(1)) + if(!valid_later_char(c)) + return false; + return true; +} + +/// Verify that str consists of letters only +inline bool isalpha(const std::string &str) { + return std::all_of(str.begin(), str.end(), [](char c) { return std::isalpha(c, std::locale()); }); +} + +/// Return a lower case version of a string +inline std::string to_lower(std::string str) { + std::transform(std::begin(str), std::end(str), std::begin(str), [](const std::string::value_type &x) { + return std::tolower(x, std::locale()); + }); + return str; +} + +/// remove underscores from a string +inline std::string remove_underscore(std::string str) { + str.erase(std::remove(std::begin(str), std::end(str), '_'), std::end(str)); + return str; +} + +/// Find and replace a substring with another substring +inline std::string find_and_replace(std::string str, std::string from, std::string to) { + + std::size_t start_pos = 0; + + while((start_pos = str.find(from, start_pos)) != std::string::npos) { + str.replace(start_pos, from.length(), to); + start_pos += to.length(); + } + + return str; +} + +/// check if the flag definitions has possible false flags +inline bool has_default_flag_values(const std::string &flags) { + return (flags.find_first_of("{!") != std::string::npos); +} + +inline void remove_default_flag_values(std::string &flags) { + auto loc = flags.find_first_of('{'); + while(loc != std::string::npos) { + auto finish = flags.find_first_of("},", loc + 1); + if((finish != std::string::npos) && (flags[finish] == '}')) { + flags.erase(flags.begin() + static_cast(loc), + flags.begin() + static_cast(finish) + 1); + } + loc = flags.find_first_of('{', loc + 1); + } + flags.erase(std::remove(flags.begin(), flags.end(), '!'), flags.end()); +} + +/// Check if a string is a member of a list of strings and optionally ignore case or ignore underscores +inline std::ptrdiff_t find_member(std::string name, + const std::vector names, + bool ignore_case = false, + bool ignore_underscore = false) { + auto it = std::end(names); + if(ignore_case) { + if(ignore_underscore) { + name = detail::to_lower(detail::remove_underscore(name)); + it = std::find_if(std::begin(names), std::end(names), [&name](std::string local_name) { + return detail::to_lower(detail::remove_underscore(local_name)) == name; + }); + } else { + name = detail::to_lower(name); + it = std::find_if(std::begin(names), std::end(names), [&name](std::string local_name) { + return detail::to_lower(local_name) == name; + }); + } + + } else if(ignore_underscore) { + name = detail::remove_underscore(name); + it = std::find_if(std::begin(names), std::end(names), [&name](std::string local_name) { + return detail::remove_underscore(local_name) == name; + }); + } else { + it = std::find(std::begin(names), std::end(names), name); + } + + return (it != std::end(names)) ? (it - std::begin(names)) : (-1); +} + +/// Find a trigger string and call a modify callable function that takes the current string and starting position of the +/// trigger and returns the position in the string to search for the next trigger string +template inline std::string find_and_modify(std::string str, std::string trigger, Callable modify) { + std::size_t start_pos = 0; + while((start_pos = str.find(trigger, start_pos)) != std::string::npos) { + start_pos = modify(str, start_pos); + } + return str; +} + +/// Split a string '"one two" "three"' into 'one two', 'three' +/// Quote characters can be ` ' or " +inline std::vector split_up(std::string str, char delimiter = '\0') { + + const std::string delims("\'\"`"); + auto find_ws = [delimiter](char ch) { + return (delimiter == '\0') ? (std::isspace(ch, std::locale()) != 0) : (ch == delimiter); + }; + trim(str); + + std::vector output; + bool embeddedQuote = false; + char keyChar = ' '; + while(!str.empty()) { + if(delims.find_first_of(str[0]) != std::string::npos) { + keyChar = str[0]; + auto end = str.find_first_of(keyChar, 1); + while((end != std::string::npos) && (str[end - 1] == '\\')) { // deal with escaped quotes + end = str.find_first_of(keyChar, end + 1); + embeddedQuote = true; + } + if(end != std::string::npos) { + output.push_back(str.substr(1, end - 1)); + str = str.substr(end + 1); + } else { + output.push_back(str.substr(1)); + str = ""; + } + } else { + auto it = std::find_if(std::begin(str), std::end(str), find_ws); + if(it != std::end(str)) { + std::string value = std::string(str.begin(), it); + output.push_back(value); + str = std::string(it + 1, str.end()); + } else { + output.push_back(str); + str = ""; + } + } + // transform any embedded quotes into the regular character + if(embeddedQuote) { + output.back() = find_and_replace(output.back(), std::string("\\") + keyChar, std::string(1, keyChar)); + embeddedQuote = false; + } + trim(str); + } + return output; +} + +/// Add a leader to the beginning of all new lines (nothing is added +/// at the start of the first line). `"; "` would be for ini files +/// +/// Can't use Regex, or this would be a subs. +inline std::string fix_newlines(const std::string &leader, std::string input) { + std::string::size_type n = 0; + while(n != std::string::npos && n < input.size()) { + n = input.find('\n', n); + if(n != std::string::npos) { + input = input.substr(0, n + 1) + leader + input.substr(n + 1); + n += leader.size(); + } + } + return input; +} + +/// This function detects an equal or colon followed by an escaped quote after an argument +/// then modifies the string to replace the equality with a space. This is needed +/// to allow the split up function to work properly and is intended to be used with the find_and_modify function +/// the return value is the offset+1 which is required by the find_and_modify function. +inline std::size_t escape_detect(std::string &str, std::size_t offset) { + auto next = str[offset + 1]; + if((next == '\"') || (next == '\'') || (next == '`')) { + auto astart = str.find_last_of("-/ \"\'`", offset - 1); + if(astart != std::string::npos) { + if(str[astart] == ((str[offset] == '=') ? '-' : '/')) + str[offset] = ' '; // interpret this as a space so the split_up works properly + } + } + return offset + 1; +} + +/// Add quotes if the string contains spaces +inline std::string &add_quotes_if_needed(std::string &str) { + if((str.front() != '"' && str.front() != '\'') || str.front() != str.back()) { + char quote = str.find('"') < str.find('\'') ? '\'' : '"'; + if(str.find(' ') != std::string::npos) { + str.insert(0, 1, quote); + str.append(1, quote); + } + } + return str; +} + +} // namespace detail + +} // namespace CLI + +// From Error.hpp: + +namespace CLI { + +// Use one of these on all error classes. +// These are temporary and are undef'd at the end of this file. +#define CLI11_ERROR_DEF(parent, name) \ + protected: \ + name(std::string ename, std::string msg, int exit_code) : parent(std::move(ename), std::move(msg), exit_code) {} \ + name(std::string ename, std::string msg, ExitCodes exit_code) \ + : parent(std::move(ename), std::move(msg), exit_code) {} \ + \ + public: \ + name(std::string msg, ExitCodes exit_code) : parent(#name, std::move(msg), exit_code) {} \ + name(std::string msg, int exit_code) : parent(#name, std::move(msg), exit_code) {} + +// This is added after the one above if a class is used directly and builds its own message +#define CLI11_ERROR_SIMPLE(name) \ + explicit name(std::string msg) : name(#name, msg, ExitCodes::name) {} + +/// These codes are part of every error in CLI. They can be obtained from e using e.exit_code or as a quick shortcut, +/// int values from e.get_error_code(). +enum class ExitCodes { + Success = 0, + IncorrectConstruction = 100, + BadNameString, + OptionAlreadyAdded, + FileError, + ConversionError, + ValidationError, + RequiredError, + RequiresError, + ExcludesError, + ExtrasError, + ConfigError, + InvalidError, + HorribleError, + OptionNotFound, + ArgumentMismatch, + BaseClass = 127 +}; + +// Error definitions + +/// @defgroup error_group Errors +/// @brief Errors thrown by CLI11 +/// +/// These are the errors that can be thrown. Some of them, like CLI::Success, are not really errors. +/// @{ + +/// All errors derive from this one +class Error : public std::runtime_error { + int actual_exit_code; + std::string error_name{"Error"}; + + public: + int get_exit_code() const { return actual_exit_code; } + + std::string get_name() const { return error_name; } + + Error(std::string name, std::string msg, int exit_code = static_cast(ExitCodes::BaseClass)) + : runtime_error(msg), actual_exit_code(exit_code), error_name(std::move(name)) {} + + Error(std::string name, std::string msg, ExitCodes exit_code) : Error(name, msg, static_cast(exit_code)) {} +}; + +// Note: Using Error::Error constructors does not work on GCC 4.7 + +/// Construction errors (not in parsing) +class ConstructionError : public Error { + CLI11_ERROR_DEF(Error, ConstructionError) +}; + +/// Thrown when an option is set to conflicting values (non-vector and multi args, for example) +class IncorrectConstruction : public ConstructionError { + CLI11_ERROR_DEF(ConstructionError, IncorrectConstruction) + CLI11_ERROR_SIMPLE(IncorrectConstruction) + static IncorrectConstruction PositionalFlag(std::string name) { + return IncorrectConstruction(name + ": Flags cannot be positional"); + } + static IncorrectConstruction Set0Opt(std::string name) { + return IncorrectConstruction(name + ": Cannot set 0 expected, use a flag instead"); + } + static IncorrectConstruction SetFlag(std::string name) { + return IncorrectConstruction(name + ": Cannot set an expected number for flags"); + } + static IncorrectConstruction ChangeNotVector(std::string name) { + return IncorrectConstruction(name + ": You can only change the expected arguments for vectors"); + } + static IncorrectConstruction AfterMultiOpt(std::string name) { + return IncorrectConstruction( + name + ": You can't change expected arguments after you've changed the multi option policy!"); + } + static IncorrectConstruction MissingOption(std::string name) { + return IncorrectConstruction("Option " + name + " is not defined"); + } + static IncorrectConstruction MultiOptionPolicy(std::string name) { + return IncorrectConstruction(name + ": multi_option_policy only works for flags and exact value options"); + } +}; + +/// Thrown on construction of a bad name +class BadNameString : public ConstructionError { + CLI11_ERROR_DEF(ConstructionError, BadNameString) + CLI11_ERROR_SIMPLE(BadNameString) + static BadNameString OneCharName(std::string name) { return BadNameString("Invalid one char name: " + name); } + static BadNameString BadLongName(std::string name) { return BadNameString("Bad long name: " + name); } + static BadNameString DashesOnly(std::string name) { + return BadNameString("Must have a name, not just dashes: " + name); + } + static BadNameString MultiPositionalNames(std::string name) { + return BadNameString("Only one positional name allowed, remove: " + name); + } +}; + +/// Thrown when an option already exists +class OptionAlreadyAdded : public ConstructionError { + CLI11_ERROR_DEF(ConstructionError, OptionAlreadyAdded) + explicit OptionAlreadyAdded(std::string name) + : OptionAlreadyAdded(name + " is already added", ExitCodes::OptionAlreadyAdded) {} + static OptionAlreadyAdded Requires(std::string name, std::string other) { + return OptionAlreadyAdded(name + " requires " + other, ExitCodes::OptionAlreadyAdded); + } + static OptionAlreadyAdded Excludes(std::string name, std::string other) { + return OptionAlreadyAdded(name + " excludes " + other, ExitCodes::OptionAlreadyAdded); + } +}; + +// Parsing errors + +/// Anything that can error in Parse +class ParseError : public Error { + CLI11_ERROR_DEF(Error, ParseError) +}; + +// Not really "errors" + +/// This is a successful completion on parsing, supposed to exit +class Success : public ParseError { + CLI11_ERROR_DEF(ParseError, Success) + Success() : Success("Successfully completed, should be caught and quit", ExitCodes::Success) {} +}; + +/// -h or --help on command line +class CallForHelp : public ParseError { + CLI11_ERROR_DEF(ParseError, CallForHelp) + CallForHelp() : CallForHelp("This should be caught in your main function, see examples", ExitCodes::Success) {} +}; + +/// Usually something like --help-all on command line +class CallForAllHelp : public ParseError { + CLI11_ERROR_DEF(ParseError, CallForAllHelp) + CallForAllHelp() + : CallForAllHelp("This should be caught in your main function, see examples", ExitCodes::Success) {} +}; + +/// Does not output a diagnostic in CLI11_PARSE, but allows to return from main() with a specific error code. +class RuntimeError : public ParseError { + CLI11_ERROR_DEF(ParseError, RuntimeError) + explicit RuntimeError(int exit_code = 1) : RuntimeError("Runtime error", exit_code) {} +}; + +/// Thrown when parsing an INI file and it is missing +class FileError : public ParseError { + CLI11_ERROR_DEF(ParseError, FileError) + CLI11_ERROR_SIMPLE(FileError) + static FileError Missing(std::string name) { return FileError(name + " was not readable (missing?)"); } +}; + +/// Thrown when conversion call back fails, such as when an int fails to coerce to a string +class ConversionError : public ParseError { + CLI11_ERROR_DEF(ParseError, ConversionError) + CLI11_ERROR_SIMPLE(ConversionError) + ConversionError(std::string member, std::string name) + : ConversionError("The value " + member + " is not an allowed value for " + name) {} + ConversionError(std::string name, std::vector results) + : ConversionError("Could not convert: " + name + " = " + detail::join(results)) {} + static ConversionError TooManyInputsFlag(std::string name) { + return ConversionError(name + ": too many inputs for a flag"); + } + static ConversionError TrueFalse(std::string name) { + return ConversionError(name + ": Should be true/false or a number"); + } +}; + +/// Thrown when validation of results fails +class ValidationError : public ParseError { + CLI11_ERROR_DEF(ParseError, ValidationError) + CLI11_ERROR_SIMPLE(ValidationError) + explicit ValidationError(std::string name, std::string msg) : ValidationError(name + ": " + msg) {} +}; + +/// Thrown when a required option is missing +class RequiredError : public ParseError { + CLI11_ERROR_DEF(ParseError, RequiredError) + explicit RequiredError(std::string name) : RequiredError(name + " is required", ExitCodes::RequiredError) {} + static RequiredError Subcommand(std::size_t min_subcom) { + if(min_subcom == 1) { + return RequiredError("A subcommand"); + } + return RequiredError("Requires at least " + std::to_string(min_subcom) + " subcommands", + ExitCodes::RequiredError); + } + static RequiredError + Option(std::size_t min_option, std::size_t max_option, std::size_t used, const std::string &option_list) { + if((min_option == 1) && (max_option == 1) && (used == 0)) + return RequiredError("Exactly 1 option from [" + option_list + "]"); + if((min_option == 1) && (max_option == 1) && (used > 1)) { + return RequiredError("Exactly 1 option from [" + option_list + "] is required and " + std::to_string(used) + + " were given", + ExitCodes::RequiredError); + } + if((min_option == 1) && (used == 0)) + return RequiredError("At least 1 option from [" + option_list + "]"); + if(used < min_option) { + return RequiredError("Requires at least " + std::to_string(min_option) + " options used and only " + + std::to_string(used) + "were given from [" + option_list + "]", + ExitCodes::RequiredError); + } + if(max_option == 1) + return RequiredError("Requires at most 1 options be given from [" + option_list + "]", + ExitCodes::RequiredError); + + return RequiredError("Requires at most " + std::to_string(max_option) + " options be used and " + + std::to_string(used) + "were given from [" + option_list + "]", + ExitCodes::RequiredError); + } +}; + +/// Thrown when the wrong number of arguments has been received +class ArgumentMismatch : public ParseError { + CLI11_ERROR_DEF(ParseError, ArgumentMismatch) + CLI11_ERROR_SIMPLE(ArgumentMismatch) + ArgumentMismatch(std::string name, int expected, std::size_t received) + : ArgumentMismatch(expected > 0 ? ("Expected exactly " + std::to_string(expected) + " arguments to " + name + + ", got " + std::to_string(received)) + : ("Expected at least " + std::to_string(-expected) + " arguments to " + name + + ", got " + std::to_string(received)), + ExitCodes::ArgumentMismatch) {} + + static ArgumentMismatch AtLeast(std::string name, int num, std::size_t received) { + return ArgumentMismatch(name + ": At least " + std::to_string(num) + " required but received " + + std::to_string(received)); + } + static ArgumentMismatch AtMost(std::string name, int num, std::size_t received) { + return ArgumentMismatch(name + ": At Most " + std::to_string(num) + " required but received " + + std::to_string(received)); + } + static ArgumentMismatch TypedAtLeast(std::string name, int num, std::string type) { + return ArgumentMismatch(name + ": " + std::to_string(num) + " required " + type + " missing"); + } + static ArgumentMismatch FlagOverride(std::string name) { + return ArgumentMismatch(name + " was given a disallowed flag override"); + } +}; + +/// Thrown when a requires option is missing +class RequiresError : public ParseError { + CLI11_ERROR_DEF(ParseError, RequiresError) + RequiresError(std::string curname, std::string subname) + : RequiresError(curname + " requires " + subname, ExitCodes::RequiresError) {} +}; + +/// Thrown when an excludes option is present +class ExcludesError : public ParseError { + CLI11_ERROR_DEF(ParseError, ExcludesError) + ExcludesError(std::string curname, std::string subname) + : ExcludesError(curname + " excludes " + subname, ExitCodes::ExcludesError) {} +}; + +/// Thrown when too many positionals or options are found +class ExtrasError : public ParseError { + CLI11_ERROR_DEF(ParseError, ExtrasError) + explicit ExtrasError(std::vector args) + : ExtrasError((args.size() > 1 ? "The following arguments were not expected: " + : "The following argument was not expected: ") + + detail::rjoin(args, " "), + ExitCodes::ExtrasError) {} + ExtrasError(const std::string &name, std::vector args) + : ExtrasError(name, + (args.size() > 1 ? "The following arguments were not expected: " + : "The following argument was not expected: ") + + detail::rjoin(args, " "), + ExitCodes::ExtrasError) {} +}; + +/// Thrown when extra values are found in an INI file +class ConfigError : public ParseError { + CLI11_ERROR_DEF(ParseError, ConfigError) + CLI11_ERROR_SIMPLE(ConfigError) + static ConfigError Extras(std::string item) { return ConfigError("INI was not able to parse " + item); } + static ConfigError NotConfigurable(std::string item) { + return ConfigError(item + ": This option is not allowed in a configuration file"); + } +}; + +/// Thrown when validation fails before parsing +class InvalidError : public ParseError { + CLI11_ERROR_DEF(ParseError, InvalidError) + explicit InvalidError(std::string name) + : InvalidError(name + ": Too many positional arguments with unlimited expected args", ExitCodes::InvalidError) { + } +}; + +/// This is just a safety check to verify selection and parsing match - you should not ever see it +/// Strings are directly added to this error, but again, it should never be seen. +class HorribleError : public ParseError { + CLI11_ERROR_DEF(ParseError, HorribleError) + CLI11_ERROR_SIMPLE(HorribleError) +}; + +// After parsing + +/// Thrown when counting a non-existent option +class OptionNotFound : public Error { + CLI11_ERROR_DEF(Error, OptionNotFound) + explicit OptionNotFound(std::string name) : OptionNotFound(name + " not found", ExitCodes::OptionNotFound) {} +}; + +#undef CLI11_ERROR_DEF +#undef CLI11_ERROR_SIMPLE + +/// @} + +} // namespace CLI + +// From TypeTools.hpp: + +namespace CLI { + +// Type tools + +// Utilities for type enabling +namespace detail { +// Based generally on https://rmf.io/cxx11/almost-static-if +/// Simple empty scoped class +enum class enabler {}; + +/// An instance to use in EnableIf +constexpr enabler dummy = {}; +} // namespace detail + +/// A copy of enable_if_t from C++14, compatible with C++11. +/// +/// We could check to see if C++14 is being used, but it does not hurt to redefine this +/// (even Google does this: https://github.com/google/skia/blob/master/include/private/SkTLogic.h) +/// It is not in the std namespace anyway, so no harm done. +template using enable_if_t = typename std::enable_if::type; + +/// A copy of std::void_t from C++17 (helper for C++11 and C++14) +template struct make_void { using type = void; }; + +/// A copy of std::void_t from C++17 - same reasoning as enable_if_t, it does not hurt to redefine +template using void_t = typename make_void::type; + +/// A copy of std::conditional_t from C++14 - same reasoning as enable_if_t, it does not hurt to redefine +template using conditional_t = typename std::conditional::type; + +/// Check to see if something is a vector (fail check by default) +template struct is_vector : std::false_type {}; + +/// Check to see if something is a vector (true if actually a vector) +template struct is_vector> : std::true_type {}; + +/// Check to see if something is a vector (true if actually a const vector) +template struct is_vector> : std::true_type {}; + +/// Check to see if something is bool (fail check by default) +template struct is_bool : std::false_type {}; + +/// Check to see if something is bool (true if actually a bool) +template <> struct is_bool : std::true_type {}; + +/// Check to see if something is a shared pointer +template struct is_shared_ptr : std::false_type {}; + +/// Check to see if something is a shared pointer (True if really a shared pointer) +template struct is_shared_ptr> : std::true_type {}; + +/// Check to see if something is a shared pointer (True if really a shared pointer) +template struct is_shared_ptr> : std::true_type {}; + +/// Check to see if something is copyable pointer +template struct is_copyable_ptr { + static bool const value = is_shared_ptr::value || std::is_pointer::value; +}; + +/// This can be specialized to override the type deduction for IsMember. +template struct IsMemberType { using type = T; }; + +/// The main custom type needed here is const char * should be a string. +template <> struct IsMemberType { using type = std::string; }; + +namespace detail { + +// These are utilities for IsMember and other transforming objects + +/// Handy helper to access the element_type generically. This is not part of is_copyable_ptr because it requires that +/// pointer_traits be valid. + +/// not a pointer +template struct element_type { using type = T; }; + +template struct element_type::value>::type> { + using type = typename std::pointer_traits::element_type; +}; + +/// Combination of the element type and value type - remove pointer (including smart pointers) and get the value_type of +/// the container +template struct element_value_type { using type = typename element_type::type::value_type; }; + +/// Adaptor for set-like structure: This just wraps a normal container in a few utilities that do almost nothing. +template struct pair_adaptor : std::false_type { + using value_type = typename T::value_type; + using first_type = typename std::remove_const::type; + using second_type = typename std::remove_const::type; + + /// Get the first value (really just the underlying value) + template static auto first(Q &&pair_value) -> decltype(std::forward(pair_value)) { + return std::forward(pair_value); + } + /// Get the second value (really just the underlying value) + template static auto second(Q &&pair_value) -> decltype(std::forward(pair_value)) { + return std::forward(pair_value); + } +}; + +/// Adaptor for map-like structure (true version, must have key_type and mapped_type). +/// This wraps a mapped container in a few utilities access it in a general way. +template +struct pair_adaptor< + T, + conditional_t, void>> + : std::true_type { + using value_type = typename T::value_type; + using first_type = typename std::remove_const::type; + using second_type = typename std::remove_const::type; + + /// Get the first value (really just the underlying value) + template static auto first(Q &&pair_value) -> decltype(std::get<0>(std::forward(pair_value))) { + return std::get<0>(std::forward(pair_value)); + } + /// Get the second value (really just the underlying value) + template static auto second(Q &&pair_value) -> decltype(std::get<1>(std::forward(pair_value))) { + return std::get<1>(std::forward(pair_value)); + } +}; + +// Warning is suppressed due to "bug" in gcc<5.0 and gcc 7.0 with c++17 enabled that generates a Wnarrowing warning +// in the unevaluated context even if the function that was using this wasn't used. The standard says narrowing in +// brace initialization shouldn't be allowed but for backwards compatibility gcc allows it in some contexts. It is a +// little fuzzy what happens in template constructs and I think that was something GCC took a little while to work out. +// But regardless some versions of gcc generate a warning when they shouldn't from the following code so that should be +// suppressed +#ifdef __GNUC__ +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wnarrowing" +#endif +// check for constructibility from a specific type and copy assignable used in the parse detection +template class is_direct_constructible { + template + static auto test(int, std::true_type) -> decltype( +// NVCC warns about narrowing conversions here +#ifdef __CUDACC__ +#pragma diag_suppress 2361 +#endif + TT { std::declval() } +#ifdef __CUDACC__ +#pragma diag_default 2361 +#endif + , + std::is_move_assignable()); + + template static auto test(int, std::false_type) -> std::false_type; + + template static auto test(...) -> std::false_type; + + public: + static constexpr bool value = decltype(test(0, typename std::is_constructible::type()))::value; +}; +#ifdef __GNUC__ +#pragma GCC diagnostic pop +#endif + +// Check for output streamability +// Based on https://stackoverflow.com/questions/22758291/how-can-i-detect-if-a-type-can-be-streamed-to-an-stdostream + +template class is_ostreamable { + template + static auto test(int) -> decltype(std::declval() << std::declval(), std::true_type()); + + template static auto test(...) -> std::false_type; + + public: + static constexpr bool value = decltype(test(0))::value; +}; + +/// Check for input streamability +template class is_istreamable { + template + static auto test(int) -> decltype(std::declval() >> std::declval(), std::true_type()); + + template static auto test(...) -> std::false_type; + + public: + static constexpr bool value = decltype(test(0))::value; +}; + +/// Templated operation to get a value from a stream +template ::value, detail::enabler> = detail::dummy> +bool from_stream(const std::string &istring, T &obj) { + std::istringstream is; + is.str(istring); + is >> obj; + return !is.fail() && !is.rdbuf()->in_avail(); +} + +template ::value, detail::enabler> = detail::dummy> +bool from_stream(const std::string & /*istring*/, T & /*obj*/) { + return false; +} + +// Check for tuple like types, as in classes with a tuple_size type trait +template class is_tuple_like { + template + // static auto test(int) + // -> decltype(std::conditional<(std::tuple_size::value > 0), std::true_type, std::false_type>::type()); + static auto test(int) -> decltype(std::tuple_size::value, std::true_type{}); + template static auto test(...) -> std::false_type; + + public: + static constexpr bool value = decltype(test(0))::value; +}; + +/// Convert an object to a string (directly forward if this can become a string) +template ::value, detail::enabler> = detail::dummy> +auto to_string(T &&value) -> decltype(std::forward(value)) { + return std::forward(value); +} + +/// Construct a string from the object +template ::value && !std::is_convertible::value, + detail::enabler> = detail::dummy> +std::string to_string(const T &value) { + return std::string(value); +} + +/// Convert an object to a string (streaming must be supported for that type) +template ::value && !std::is_constructible::value && + is_ostreamable::value, + detail::enabler> = detail::dummy> +std::string to_string(T &&value) { + std::stringstream stream; + stream << value; + return stream.str(); +} + +/// If conversion is not supported, return an empty string (streaming is not supported for that type) +template ::value && !is_ostreamable::value && + !is_vector::type>::type>::value, + detail::enabler> = detail::dummy> +std::string to_string(T &&) { + return std::string{}; +} + +/// convert a vector to a string +template ::value && !is_ostreamable::value && + is_vector::type>::type>::value, + detail::enabler> = detail::dummy> +std::string to_string(T &&variable) { + std::vector defaults; + defaults.reserve(variable.size()); + auto cval = variable.begin(); + auto end = variable.end(); + while(cval != end) { + defaults.emplace_back(CLI::detail::to_string(*cval)); + ++cval; + } + return std::string("[" + detail::join(defaults) + "]"); +} + +/// special template overload +template ::value, detail::enabler> = detail::dummy> +auto checked_to_string(T &&value) -> decltype(to_string(std::forward(value))) { + return to_string(std::forward(value)); +} + +/// special template overload +template ::value, detail::enabler> = detail::dummy> +std::string checked_to_string(T &&) { + return std::string{}; +} +/// get a string as a convertible value for arithmetic types +template ::value, detail::enabler> = detail::dummy> +std::string value_string(const T &value) { + return std::to_string(value); +} +/// get a string as a convertible value for enumerations +template ::value, detail::enabler> = detail::dummy> +std::string value_string(const T &value) { + return std::to_string(static_cast::type>(value)); +} +/// for other types just use the regular to_string function +template ::value && !std::is_arithmetic::value, detail::enabler> = detail::dummy> +auto value_string(const T &value) -> decltype(to_string(value)) { + return to_string(value); +} + +/// This will only trigger for actual void type +template struct type_count { static const int value{0}; }; + +/// Set of overloads to get the type size of an object +template struct type_count::value>::type> { + static constexpr int value{std::tuple_size::value}; +}; +/// Type size for regular object types that do not look like a tuple +template +struct type_count< + T, + typename std::enable_if::value && !is_tuple_like::value && !std::is_void::value>::type> { + static constexpr int value{1}; +}; + +/// Type size of types that look like a vector +template struct type_count::value>::type> { + static constexpr int value{is_vector::value ? expected_max_vector_size + : type_count::value}; +}; + +/// This will only trigger for actual void type +template struct expected_count { static const int value{0}; }; + +/// For most types the number of expected items is 1 +template +struct expected_count::value && !std::is_void::value>::type> { + static constexpr int value{1}; +}; +/// number of expected items in a vector +template struct expected_count::value>::type> { + static constexpr int value{expected_max_vector_size}; +}; + +// Enumeration of the different supported categorizations of objects +enum class object_category : int { + integral_value = 2, + unsigned_integral = 4, + enumeration = 6, + boolean_value = 8, + floating_point = 10, + number_constructible = 12, + double_constructible = 14, + integer_constructible = 16, + vector_value = 30, + tuple_value = 35, + // string assignable or greater used in a condition so anything string like must come last + string_assignable = 50, + string_constructible = 60, + other = 200, + +}; + +/// some type that is not otherwise recognized +template struct classify_object { + static constexpr object_category value{object_category::other}; +}; + +/// Set of overloads to classify an object according to type +template +struct classify_object::value && std::is_signed::value && + !is_bool::value && !std::is_enum::value>::type> { + static constexpr object_category value{object_category::integral_value}; +}; + +/// Unsigned integers +template +struct classify_object< + T, + typename std::enable_if::value && std::is_unsigned::value && !is_bool::value>::type> { + static constexpr object_category value{object_category::unsigned_integral}; +}; + +/// Boolean values +template struct classify_object::value>::type> { + static constexpr object_category value{object_category::boolean_value}; +}; + +/// Floats +template struct classify_object::value>::type> { + static constexpr object_category value{object_category::floating_point}; +}; + +/// String and similar direct assignment +template +struct classify_object< + T, + typename std::enable_if::value && !std::is_integral::value && + std::is_assignable::value && !is_vector::value>::type> { + static constexpr object_category value{object_category::string_assignable}; +}; + +/// String and similar constructible and copy assignment +template +struct classify_object< + T, + typename std::enable_if::value && !std::is_integral::value && + !std::is_assignable::value && + std::is_constructible::value && !is_vector::value>::type> { + static constexpr object_category value{object_category::string_constructible}; +}; + +/// Enumerations +template struct classify_object::value>::type> { + static constexpr object_category value{object_category::enumeration}; +}; + +/// Handy helper to contain a bunch of checks that rule out many common types (integers, string like, floating point, +/// vectors, and enumerations +template struct uncommon_type { + using type = typename std::conditional::value && !std::is_integral::value && + !std::is_assignable::value && + !std::is_constructible::value && !is_vector::value && + !std::is_enum::value, + std::true_type, + std::false_type>::type; + static constexpr bool value = type::value; +}; + +/// Assignable from double or int +template +struct classify_object::value && type_count::value == 1 && + is_direct_constructible::value && + is_direct_constructible::value>::type> { + static constexpr object_category value{object_category::number_constructible}; +}; + +/// Assignable from int +template +struct classify_object::value && type_count::value == 1 && + !is_direct_constructible::value && + is_direct_constructible::value>::type> { + static constexpr object_category value{object_category::integer_constructible}; +}; + +/// Assignable from double +template +struct classify_object::value && type_count::value == 1 && + is_direct_constructible::value && + !is_direct_constructible::value>::type> { + static constexpr object_category value{object_category::double_constructible}; +}; + +/// Tuple type +template +struct classify_object::value >= 2 && !is_vector::value) || + (is_tuple_like::value && uncommon_type::value && + !is_direct_constructible::value && + !is_direct_constructible::value)>::type> { + static constexpr object_category value{object_category::tuple_value}; +}; + +/// Vector type +template struct classify_object::value>::type> { + static constexpr object_category value{object_category::vector_value}; +}; + +// Type name print + +/// Was going to be based on +/// http://stackoverflow.com/questions/1055452/c-get-name-of-type-in-template +/// But this is cleaner and works better in this case + +template ::value == object_category::integral_value || + classify_object::value == object_category::integer_constructible, + detail::enabler> = detail::dummy> +constexpr const char *type_name() { + return "INT"; +} + +template ::value == object_category::unsigned_integral, detail::enabler> = detail::dummy> +constexpr const char *type_name() { + return "UINT"; +} + +template ::value == object_category::floating_point || + classify_object::value == object_category::number_constructible || + classify_object::value == object_category::double_constructible, + detail::enabler> = detail::dummy> +constexpr const char *type_name() { + return "FLOAT"; +} + +/// Print name for enumeration types +template ::value == object_category::enumeration, detail::enabler> = detail::dummy> +constexpr const char *type_name() { + return "ENUM"; +} + +/// Print name for enumeration types +template ::value == object_category::boolean_value, detail::enabler> = detail::dummy> +constexpr const char *type_name() { + return "BOOLEAN"; +} + +/// Print for all other types +template ::value >= object_category::string_assignable, detail::enabler> = detail::dummy> +constexpr const char *type_name() { + return "TEXT"; +} + +/// Print name for single element tuple types +template ::value == object_category::tuple_value && type_count::value == 1, + detail::enabler> = detail::dummy> +inline std::string type_name() { + return type_name::type>(); +} + +/// Empty string if the index > tuple size +template +inline typename std::enable_if::value, std::string>::type tuple_name() { + return std::string{}; +} + +/// Recursively generate the tuple type name +template + inline typename std::enable_if < I::value, std::string>::type tuple_name() { + std::string str = std::string(type_name::type>()) + ',' + tuple_name(); + if(str.back() == ',') + str.pop_back(); + return str; +} + +/// Print type name for tuples with 2 or more elements +template ::value == object_category::tuple_value && type_count::value >= 2, + detail::enabler> = detail::dummy> +std::string type_name() { + auto tname = std::string(1, '[') + tuple_name(); + tname.push_back(']'); + return tname; +} + +/// This one should not be used normally, since vector types print the internal type +template ::value == object_category::vector_value, detail::enabler> = detail::dummy> +inline std::string type_name() { + return type_name(); +} + +// Lexical cast + +/// Convert a flag into an integer value typically binary flags +inline std::int64_t to_flag_value(std::string val) { + static const std::string trueString("true"); + static const std::string falseString("false"); + if(val == trueString) { + return 1; + } + if(val == falseString) { + return -1; + } + val = detail::to_lower(val); + std::int64_t ret; + if(val.size() == 1) { + if(val[0] >= '1' && val[0] <= '9') { + return (static_cast(val[0]) - '0'); + } + switch(val[0]) { + case '0': + case 'f': + case 'n': + case '-': + ret = -1; + break; + case 't': + case 'y': + case '+': + ret = 1; + break; + default: + throw std::invalid_argument("unrecognized character"); + } + return ret; + } + if(val == trueString || val == "on" || val == "yes" || val == "enable") { + ret = 1; + } else if(val == falseString || val == "off" || val == "no" || val == "disable") { + ret = -1; + } else { + ret = std::stoll(val); + } + return ret; +} + +/// Signed integers +template ::value == object_category::integral_value, detail::enabler> = detail::dummy> +bool lexical_cast(const std::string &input, T &output) { + try { + std::size_t n = 0; + std::int64_t output_ll = std::stoll(input, &n, 0); + output = static_cast(output_ll); + return n == input.size() && static_cast(output) == output_ll; + } catch(const std::invalid_argument &) { + return false; + } catch(const std::out_of_range &) { + return false; + } +} + +/// Unsigned integers +template ::value == object_category::unsigned_integral, detail::enabler> = detail::dummy> +bool lexical_cast(const std::string &input, T &output) { + if(!input.empty() && input.front() == '-') + return false; // std::stoull happily converts negative values to junk without any errors. + + try { + std::size_t n = 0; + std::uint64_t output_ll = std::stoull(input, &n, 0); + output = static_cast(output_ll); + return n == input.size() && static_cast(output) == output_ll; + } catch(const std::invalid_argument &) { + return false; + } catch(const std::out_of_range &) { + return false; + } +} + +/// Boolean values +template ::value == object_category::boolean_value, detail::enabler> = detail::dummy> +bool lexical_cast(const std::string &input, T &output) { + try { + auto out = to_flag_value(input); + output = (out > 0); + return true; + } catch(const std::invalid_argument &) { + return false; + } catch(const std::out_of_range &) { + // if the number is out of the range of a 64 bit value then it is still a number and for this purpose is still + // valid all we care about the sign + output = (input[0] != '-'); + return true; + } +} + +/// Floats +template ::value == object_category::floating_point, detail::enabler> = detail::dummy> +bool lexical_cast(const std::string &input, T &output) { + try { + std::size_t n = 0; + output = static_cast(std::stold(input, &n)); + return n == input.size(); + } catch(const std::invalid_argument &) { + return false; + } catch(const std::out_of_range &) { + return false; + } +} + +/// String and similar direct assignment +template ::value == object_category::string_assignable, detail::enabler> = detail::dummy> +bool lexical_cast(const std::string &input, T &output) { + output = input; + return true; +} + +/// String and similar constructible and copy assignment +template < + typename T, + enable_if_t::value == object_category::string_constructible, detail::enabler> = detail::dummy> +bool lexical_cast(const std::string &input, T &output) { + output = T(input); + return true; +} + +/// Enumerations +template ::value == object_category::enumeration, detail::enabler> = detail::dummy> +bool lexical_cast(const std::string &input, T &output) { + typename std::underlying_type::type val; + bool retval = detail::lexical_cast(input, val); + if(!retval) { + return false; + } + output = static_cast(val); + return true; +} + +/// Assignable from double or int +template < + typename T, + enable_if_t::value == object_category::number_constructible, detail::enabler> = detail::dummy> +bool lexical_cast(const std::string &input, T &output) { + int val; + if(lexical_cast(input, val)) { + output = T(val); + return true; + } else { + double dval; + if(lexical_cast(input, dval)) { + output = T{dval}; + return true; + } + } + return from_stream(input, output); +} + +/// Assignable from int +template < + typename T, + enable_if_t::value == object_category::integer_constructible, detail::enabler> = detail::dummy> +bool lexical_cast(const std::string &input, T &output) { + int val; + if(lexical_cast(input, val)) { + output = T(val); + return true; + } + return from_stream(input, output); +} + +/// Assignable from double +template < + typename T, + enable_if_t::value == object_category::double_constructible, detail::enabler> = detail::dummy> +bool lexical_cast(const std::string &input, T &output) { + double val; + if(lexical_cast(input, val)) { + output = T{val}; + return true; + } + return from_stream(input, output); +} + +/// Non-string parsable by a stream +template ::value == object_category::other, detail::enabler> = detail::dummy> +bool lexical_cast(const std::string &input, T &output) { + static_assert(is_istreamable::value, + "option object type must have a lexical cast overload or streaming input operator(>>) defined, if it " + "is convertible from another type use the add_option(...) with XC being the known type"); + return from_stream(input, output); +} + +/// Assign a value through lexical cast operations +template < + typename T, + typename XC, + enable_if_t::value && (classify_object::value == object_category::string_assignable || + classify_object::value == object_category::string_constructible), + detail::enabler> = detail::dummy> +bool lexical_assign(const std::string &input, T &output) { + return lexical_cast(input, output); +} + +/// Assign a value through lexical cast operations +template ::value && classify_object::value != object_category::string_assignable && + classify_object::value != object_category::string_constructible, + detail::enabler> = detail::dummy> +bool lexical_assign(const std::string &input, T &output) { + if(input.empty()) { + output = T{}; + return true; + } + return lexical_cast(input, output); +} + +/// Assign a value converted from a string in lexical cast to the output value directly +template < + typename T, + typename XC, + enable_if_t::value && std::is_assignable::value, detail::enabler> = detail::dummy> +bool lexical_assign(const std::string &input, T &output) { + XC val{}; + bool parse_result = (!input.empty()) ? lexical_cast(input, val) : true; + if(parse_result) { + output = val; + } + return parse_result; +} + +/// Assign a value from a lexical cast through constructing a value and move assigning it +template ::value && !std::is_assignable::value && + std::is_move_assignable::value, + detail::enabler> = detail::dummy> +bool lexical_assign(const std::string &input, T &output) { + XC val{}; + bool parse_result = input.empty() ? true : lexical_cast(input, val); + if(parse_result) { + output = T(val); // use () form of constructor to allow some implicit conversions + } + return parse_result; +} +/// Lexical conversion if there is only one element +template < + typename T, + typename XC, + enable_if_t::value && !is_tuple_like::value && !is_vector::value && !is_vector::value, + detail::enabler> = detail::dummy> +bool lexical_conversion(const std::vector &strings, T &output) { + return lexical_assign(strings[0], output); +} + +/// Lexical conversion if there is only one element but the conversion type is for two call a two element constructor +template ::value == 1 && type_count::value == 2, detail::enabler> = detail::dummy> +bool lexical_conversion(const std::vector &strings, T &output) { + typename std::tuple_element<0, XC>::type v1; + typename std::tuple_element<1, XC>::type v2; + bool retval = lexical_assign(strings[0], v1); + if(strings.size() > 1) { + retval = retval && lexical_assign(strings[1], v2); + } + if(retval) { + output = T{v1, v2}; + } + return retval; +} + +/// Lexical conversion of a vector types +template ::value == expected_max_vector_size && + expected_count::value == expected_max_vector_size && type_count::value == 1, + detail::enabler> = detail::dummy> +bool lexical_conversion(const std::vector &strings, T &output) { + output.clear(); + output.reserve(strings.size()); + for(const auto &elem : strings) { + + output.emplace_back(); + bool retval = lexical_assign(elem, output.back()); + if(!retval) { + return false; + } + } + return (!output.empty()); +} + +/// Lexical conversion of a vector types with type size of two +template ::value == expected_max_vector_size && + expected_count::value == expected_max_vector_size && type_count::value == 2, + detail::enabler> = detail::dummy> +bool lexical_conversion(const std::vector &strings, T &output) { + output.clear(); + for(std::size_t ii = 0; ii < strings.size(); ii += 2) { + + typename std::tuple_element<0, typename XC::value_type>::type v1; + typename std::tuple_element<1, typename XC::value_type>::type v2; + bool retval = lexical_assign(strings[ii], v1); + if(strings.size() > ii + 1) { + retval = retval && lexical_assign(strings[ii + 1], v2); + } + if(retval) { + output.emplace_back(v1, v2); + } else { + return false; + } + } + return (!output.empty()); +} + +/// Conversion to a vector type using a particular single type as the conversion type +template ::value == expected_max_vector_size) && (expected_count::value == 1) && + (type_count::value == 1), + detail::enabler> = detail::dummy> +bool lexical_conversion(const std::vector &strings, T &output) { + bool retval = true; + output.clear(); + output.reserve(strings.size()); + for(const auto &elem : strings) { + + output.emplace_back(); + retval = retval && lexical_assign(elem, output.back()); + } + return (!output.empty()) && retval; +} +// This one is last since it can call other lexical_conversion functions +/// Lexical conversion if there is only one element but the conversion type is a vector +template ::value && !is_vector::value && is_vector::value, detail::enabler> = + detail::dummy> +bool lexical_conversion(const std::vector &strings, T &output) { + + if(strings.size() > 1 || (!strings.empty() && !(strings.front().empty()))) { + XC val; + auto retval = lexical_conversion(strings, val); + output = T{val}; + return retval; + } + output = T{}; + return true; +} + +/// function template for converting tuples if the static Index is greater than the tuple size +template +inline typename std::enable_if= type_count::value, bool>::type tuple_conversion(const std::vector &, + T &) { + return true; +} +/// Tuple conversion operation +template + inline typename std::enable_if < + I::value, bool>::type tuple_conversion(const std::vector &strings, T &output) { + bool retval = true; + if(strings.size() > I) { + retval = retval && lexical_assign::type, + typename std::conditional::value, + typename std::tuple_element::type, + XC>::type>(strings[I], std::get(output)); + } + retval = retval && tuple_conversion(strings, output); + return retval; +} + +/// Conversion for tuples +template ::value, detail::enabler> = detail::dummy> +bool lexical_conversion(const std::vector &strings, T &output) { + static_assert( + !is_tuple_like::value || type_count::value == type_count::value, + "if the conversion type is defined as a tuple it must be the same size as the type you are converting to"); + return tuple_conversion(strings, output); +} + +/// Lexical conversion of a vector types with type_size >2 +template ::value == expected_max_vector_size && + expected_count::value == expected_max_vector_size && (type_count::value > 2), + detail::enabler> = detail::dummy> +bool lexical_conversion(const std::vector &strings, T &output) { + bool retval = true; + output.clear(); + std::vector temp; + std::size_t ii = 0; + std::size_t icount = 0; + std::size_t xcm = type_count::value; + while(ii < strings.size()) { + temp.push_back(strings[ii]); + ++ii; + ++icount; + if(icount == xcm || temp.back().empty()) { + if(static_cast(xcm) == expected_max_vector_size) { + temp.pop_back(); + } + output.emplace_back(); + retval = retval && lexical_conversion(temp, output.back()); + temp.clear(); + if(!retval) { + return false; + } + icount = 0; + } + } + return retval; +} +/// Sum a vector of flag representations +/// The flag vector produces a series of strings in a vector, simple true is represented by a "1", simple false is +/// by +/// "-1" an if numbers are passed by some fashion they are captured as well so the function just checks for the most +/// common true and false strings then uses stoll to convert the rest for summing +template ::value && std::is_unsigned::value, detail::enabler> = detail::dummy> +void sum_flag_vector(const std::vector &flags, T &output) { + std::int64_t count{0}; + for(auto &flag : flags) { + count += detail::to_flag_value(flag); + } + output = (count > 0) ? static_cast(count) : T{0}; +} + +/// Sum a vector of flag representations +/// The flag vector produces a series of strings in a vector, simple true is represented by a "1", simple false is +/// by +/// "-1" an if numbers are passed by some fashion they are captured as well so the function just checks for the most +/// common true and false strings then uses stoll to convert the rest for summing +template ::value && std::is_signed::value, detail::enabler> = detail::dummy> +void sum_flag_vector(const std::vector &flags, T &output) { + std::int64_t count{0}; + for(auto &flag : flags) { + count += detail::to_flag_value(flag); + } + output = static_cast(count); +} + +} // namespace detail +} // namespace CLI + +// From Split.hpp: + +namespace CLI { +namespace detail { + +// Returns false if not a short option. Otherwise, sets opt name and rest and returns true +inline bool split_short(const std::string ¤t, std::string &name, std::string &rest) { + if(current.size() > 1 && current[0] == '-' && valid_first_char(current[1])) { + name = current.substr(1, 1); + rest = current.substr(2); + return true; + } + return false; +} + +// Returns false if not a long option. Otherwise, sets opt name and other side of = and returns true +inline bool split_long(const std::string ¤t, std::string &name, std::string &value) { + if(current.size() > 2 && current.substr(0, 2) == "--" && valid_first_char(current[2])) { + auto loc = current.find_first_of('='); + if(loc != std::string::npos) { + name = current.substr(2, loc - 2); + value = current.substr(loc + 1); + } else { + name = current.substr(2); + value = ""; + } + return true; + } + return false; +} + +// Returns false if not a windows style option. Otherwise, sets opt name and value and returns true +inline bool split_windows_style(const std::string ¤t, std::string &name, std::string &value) { + if(current.size() > 1 && current[0] == '/' && valid_first_char(current[1])) { + auto loc = current.find_first_of(':'); + if(loc != std::string::npos) { + name = current.substr(1, loc - 1); + value = current.substr(loc + 1); + } else { + name = current.substr(1); + value = ""; + } + return true; + } + return false; +} + +// Splits a string into multiple long and short names +inline std::vector split_names(std::string current) { + std::vector output; + std::size_t val; + while((val = current.find(",")) != std::string::npos) { + output.push_back(trim_copy(current.substr(0, val))); + current = current.substr(val + 1); + } + output.push_back(trim_copy(current)); + return output; +} + +/// extract default flag values either {def} or starting with a ! +inline std::vector> get_default_flag_values(const std::string &str) { + std::vector flags = split_names(str); + flags.erase(std::remove_if(flags.begin(), + flags.end(), + [](const std::string &name) { + return ((name.empty()) || (!(((name.find_first_of('{') != std::string::npos) && + (name.back() == '}')) || + (name[0] == '!')))); + }), + flags.end()); + std::vector> output; + output.reserve(flags.size()); + for(auto &flag : flags) { + auto def_start = flag.find_first_of('{'); + std::string defval = "false"; + if((def_start != std::string::npos) && (flag.back() == '}')) { + defval = flag.substr(def_start + 1); + defval.pop_back(); + flag.erase(def_start, std::string::npos); + } + flag.erase(0, flag.find_first_not_of("-!")); + output.emplace_back(flag, defval); + } + return output; +} + +/// Get a vector of short names, one of long names, and a single name +inline std::tuple, std::vector, std::string> +get_names(const std::vector &input) { + + std::vector short_names; + std::vector long_names; + std::string pos_name; + + for(std::string name : input) { + if(name.length() == 0) { + continue; + } + if(name.length() > 1 && name[0] == '-' && name[1] != '-') { + if(name.length() == 2 && valid_first_char(name[1])) + short_names.emplace_back(1, name[1]); + else + throw BadNameString::OneCharName(name); + } else if(name.length() > 2 && name.substr(0, 2) == "--") { + name = name.substr(2); + if(valid_name_string(name)) + long_names.push_back(name); + else + throw BadNameString::BadLongName(name); + } else if(name == "-" || name == "--") { + throw BadNameString::DashesOnly(name); + } else { + if(pos_name.length() > 0) + throw BadNameString::MultiPositionalNames(name); + pos_name = name; + } + } + + return std::tuple, std::vector, std::string>( + short_names, long_names, pos_name); +} + +} // namespace detail +} // namespace CLI + +// From ConfigFwd.hpp: + +namespace CLI { + +class App; + +/// Holds values to load into Options +struct ConfigItem { + /// This is the list of parents + std::vector parents{}; + + /// This is the name + std::string name{}; + + /// Listing of inputs + std::vector inputs{}; + + /// The list of parents and name joined by "." + std::string fullname() const { + std::vector tmp = parents; + tmp.emplace_back(name); + return detail::join(tmp, "."); + } +}; + +/// This class provides a converter for configuration files. +class Config { + protected: + std::vector items{}; + + public: + /// Convert an app into a configuration + virtual std::string to_config(const App *, bool, bool, std::string) const = 0; + + /// Convert a configuration into an app + virtual std::vector from_config(std::istream &) const = 0; + + /// Get a flag value + virtual std::string to_flag(const ConfigItem &item) const { + if(item.inputs.size() == 1) { + return item.inputs.at(0); + } + throw ConversionError::TooManyInputsFlag(item.fullname()); + } + + /// Parse a config file, throw an error (ParseError:ConfigParseError or FileError) on failure + std::vector from_file(const std::string &name) { + std::ifstream input{name}; + if(!input.good()) + throw FileError::Missing(name); + + return from_config(input); + } + + /// Virtual destructor + virtual ~Config() = default; +}; + +/// This converter works with INI/TOML files; to write proper TOML files use ConfigTOML +class ConfigBase : public Config { + protected: + /// the character used for comments + char commentChar = ';'; + /// the character used to start an array '\0' is a default to not use + char arrayStart = '\0'; + /// the character used to end an array '\0' is a default to not use + char arrayEnd = '\0'; + /// the character used to separate elements in an array + char arraySeparator = ' '; + /// the character used separate the name from the value + char valueDelimiter = '='; + + public: + std::string + to_config(const App * /*app*/, bool default_also, bool write_description, std::string prefix) const override; + + std::vector from_config(std::istream &input) const override; + /// Specify the configuration for comment characters + ConfigBase *comment(char cchar) { + commentChar = cchar; + return this; + } + /// Specify the start and end characters for an array + ConfigBase *arrayBounds(char aStart, char aEnd) { + arrayStart = aStart; + arrayEnd = aEnd; + return this; + } + /// Specify the delimiter character for an array + ConfigBase *arrayDelimiter(char aSep) { + arraySeparator = aSep; + return this; + } + /// Specify the delimiter between a name and value + ConfigBase *valueSeparator(char vSep) { + valueDelimiter = vSep; + return this; + } +}; + +/// the default Config is the INI file format +using ConfigINI = ConfigBase; + +/// ConfigTOML generates a TOML compliant output +class ConfigTOML : public ConfigINI { + + public: + ConfigTOML() { + commentChar = '#'; + arrayStart = '['; + arrayEnd = ']'; + arraySeparator = ','; + valueDelimiter = '='; + } +}; +} // namespace CLI + +// From Validators.hpp: + +namespace CLI { + +class Option; + +/// @defgroup validator_group Validators + +/// @brief Some validators that are provided +/// +/// These are simple `std::string(const std::string&)` validators that are useful. They return +/// a string if the validation fails. A custom struct is provided, as well, with the same user +/// semantics, but with the ability to provide a new type name. +/// @{ + +/// +class Validator { + protected: + /// This is the description function, if empty the description_ will be used + std::function desc_function_{[]() { return std::string{}; }}; + + /// This is the base function that is to be called. + /// Returns a string error message if validation fails. + std::function func_{[](std::string &) { return std::string{}; }}; + /// The name for search purposes of the Validator + std::string name_{}; + /// A Validator will only apply to an indexed value (-1 is all elements) + int application_index_ = -1; + /// Enable for Validator to allow it to be disabled if need be + bool active_{true}; + /// specify that a validator should not modify the input + bool non_modifying_{false}; + + public: + Validator() = default; + /// Construct a Validator with just the description string + explicit Validator(std::string validator_desc) : desc_function_([validator_desc]() { return validator_desc; }) {} + /// Construct Validator from basic information + Validator(std::function op, std::string validator_desc, std::string validator_name = "") + : desc_function_([validator_desc]() { return validator_desc; }), func_(std::move(op)), + name_(std::move(validator_name)) {} + /// Set the Validator operation function + Validator &operation(std::function op) { + func_ = std::move(op); + return *this; + } + /// This is the required operator for a Validator - provided to help + /// users (CLI11 uses the member `func` directly) + std::string operator()(std::string &str) const { + std::string retstring; + if(active_) { + if(non_modifying_) { + std::string value = str; + retstring = func_(value); + } else { + retstring = func_(str); + } + } + return retstring; + } + + /// This is the required operator for a Validator - provided to help + /// users (CLI11 uses the member `func` directly) + std::string operator()(const std::string &str) const { + std::string value = str; + return (active_) ? func_(value) : std::string{}; + } + + /// Specify the type string + Validator &description(std::string validator_desc) { + desc_function_ = [validator_desc]() { return validator_desc; }; + return *this; + } + /// Specify the type string + Validator description(std::string validator_desc) const { + Validator newval(*this); + newval.desc_function_ = [validator_desc]() { return validator_desc; }; + return newval; + } + /// Generate type description information for the Validator + std::string get_description() const { + if(active_) { + return desc_function_(); + } + return std::string{}; + } + /// Specify the type string + Validator &name(std::string validator_name) { + name_ = std::move(validator_name); + return *this; + } + /// Specify the type string + Validator name(std::string validator_name) const { + Validator newval(*this); + newval.name_ = std::move(validator_name); + return newval; + } + /// Get the name of the Validator + const std::string &get_name() const { return name_; } + /// Specify whether the Validator is active or not + Validator &active(bool active_val = true) { + active_ = active_val; + return *this; + } + /// Specify whether the Validator is active or not + Validator active(bool active_val = true) const { + Validator newval(*this); + newval.active_ = active_val; + return newval; + } + + /// Specify whether the Validator can be modifying or not + Validator &non_modifying(bool no_modify = true) { + non_modifying_ = no_modify; + return *this; + } + /// Specify the application index of a validator + Validator &application_index(int app_index) { + application_index_ = app_index; + return *this; + } + /// Specify the application index of a validator + Validator application_index(int app_index) const { + Validator newval(*this); + newval.application_index_ = app_index; + return newval; + } + /// Get the current value of the application index + int get_application_index() const { return application_index_; } + /// Get a boolean if the validator is active + bool get_active() const { return active_; } + + /// Get a boolean if the validator is allowed to modify the input returns true if it can modify the input + bool get_modifying() const { return !non_modifying_; } + + /// Combining validators is a new validator. Type comes from left validator if function, otherwise only set if the + /// same. + Validator operator&(const Validator &other) const { + Validator newval; + + newval._merge_description(*this, other, " AND "); + + // Give references (will make a copy in lambda function) + const std::function &f1 = func_; + const std::function &f2 = other.func_; + + newval.func_ = [f1, f2](std::string &input) { + std::string s1 = f1(input); + std::string s2 = f2(input); + if(!s1.empty() && !s2.empty()) + return std::string("(") + s1 + ") AND (" + s2 + ")"; + else + return s1 + s2; + }; + + newval.active_ = (active_ & other.active_); + newval.application_index_ = application_index_; + return newval; + } + + /// Combining validators is a new validator. Type comes from left validator if function, otherwise only set if the + /// same. + Validator operator|(const Validator &other) const { + Validator newval; + + newval._merge_description(*this, other, " OR "); + + // Give references (will make a copy in lambda function) + const std::function &f1 = func_; + const std::function &f2 = other.func_; + + newval.func_ = [f1, f2](std::string &input) { + std::string s1 = f1(input); + std::string s2 = f2(input); + if(s1.empty() || s2.empty()) + return std::string(); + + return std::string("(") + s1 + ") OR (" + s2 + ")"; + }; + newval.active_ = (active_ & other.active_); + newval.application_index_ = application_index_; + return newval; + } + + /// Create a validator that fails when a given validator succeeds + Validator operator!() const { + Validator newval; + const std::function &dfunc1 = desc_function_; + newval.desc_function_ = [dfunc1]() { + auto str = dfunc1(); + return (!str.empty()) ? std::string("NOT ") + str : std::string{}; + }; + // Give references (will make a copy in lambda function) + const std::function &f1 = func_; + + newval.func_ = [f1, dfunc1](std::string &test) -> std::string { + std::string s1 = f1(test); + if(s1.empty()) { + return std::string("check ") + dfunc1() + " succeeded improperly"; + } + return std::string{}; + }; + newval.active_ = active_; + newval.application_index_ = application_index_; + return newval; + } + + private: + void _merge_description(const Validator &val1, const Validator &val2, const std::string &merger) { + + const std::function &dfunc1 = val1.desc_function_; + const std::function &dfunc2 = val2.desc_function_; + + desc_function_ = [=]() { + std::string f1 = dfunc1(); + std::string f2 = dfunc2(); + if((f1.empty()) || (f2.empty())) { + return f1 + f2; + } + return std::string(1, '(') + f1 + ')' + merger + '(' + f2 + ')'; + }; + } +}; // namespace CLI + +/// Class wrapping some of the accessors of Validator +class CustomValidator : public Validator { + public: +}; +// The implementation of the built in validators is using the Validator class; +// the user is only expected to use the const (static) versions (since there's no setup). +// Therefore, this is in detail. +namespace detail { + +/// CLI enumeration of different file types +enum class path_type { nonexistent, file, directory }; + +#if defined CLI11_HAS_FILESYSTEM && CLI11_HAS_FILESYSTEM > 0 +/// get the type of the path from a file name +inline path_type check_path(const char *file) noexcept { + std::error_code ec; + auto stat = std::filesystem::status(file, ec); + if(ec) { + return path_type::nonexistent; + } + switch(stat.type()) { + case std::filesystem::file_type::none: + case std::filesystem::file_type::not_found: + return path_type::nonexistent; + case std::filesystem::file_type::directory: + return path_type::directory; + case std::filesystem::file_type::symlink: + case std::filesystem::file_type::block: + case std::filesystem::file_type::character: + case std::filesystem::file_type::fifo: + case std::filesystem::file_type::socket: + case std::filesystem::file_type::regular: + case std::filesystem::file_type::unknown: + default: + return path_type::file; + } +} +#else +/// get the type of the path from a file name +inline path_type check_path(const char *file) noexcept { +#if defined(_MSC_VER) + struct __stat64 buffer; + if(_stat64(file, &buffer) == 0) { + return ((buffer.st_mode & S_IFDIR) != 0) ? path_type::directory : path_type::file; + } +#else + struct stat buffer; + if(stat(file, &buffer) == 0) { + return ((buffer.st_mode & S_IFDIR) != 0) ? path_type::directory : path_type::file; + } +#endif + return path_type::nonexistent; +} +#endif +/// Check for an existing file (returns error message if check fails) +class ExistingFileValidator : public Validator { + public: + ExistingFileValidator() : Validator("FILE") { + func_ = [](std::string &filename) { + auto path_result = check_path(filename.c_str()); + if(path_result == path_type::nonexistent) { + return "File does not exist: " + filename; + } + if(path_result == path_type::directory) { + return "File is actually a directory: " + filename; + } + return std::string(); + }; + } +}; + +/// Check for an existing directory (returns error message if check fails) +class ExistingDirectoryValidator : public Validator { + public: + ExistingDirectoryValidator() : Validator("DIR") { + func_ = [](std::string &filename) { + auto path_result = check_path(filename.c_str()); + if(path_result == path_type::nonexistent) { + return "Directory does not exist: " + filename; + } + if(path_result == path_type::file) { + return "Directory is actually a file: " + filename; + } + return std::string(); + }; + } +}; + +/// Check for an existing path +class ExistingPathValidator : public Validator { + public: + ExistingPathValidator() : Validator("PATH(existing)") { + func_ = [](std::string &filename) { + auto path_result = check_path(filename.c_str()); + if(path_result == path_type::nonexistent) { + return "Path does not exist: " + filename; + } + return std::string(); + }; + } +}; + +/// Check for an non-existing path +class NonexistentPathValidator : public Validator { + public: + NonexistentPathValidator() : Validator("PATH(non-existing)") { + func_ = [](std::string &filename) { + auto path_result = check_path(filename.c_str()); + if(path_result != path_type::nonexistent) { + return "Path already exists: " + filename; + } + return std::string(); + }; + } +}; + +/// Validate the given string is a legal ipv4 address +class IPV4Validator : public Validator { + public: + IPV4Validator() : Validator("IPV4") { + func_ = [](std::string &ip_addr) { + auto result = CLI::detail::split(ip_addr, '.'); + if(result.size() != 4) { + return std::string("Invalid IPV4 address must have four parts (") + ip_addr + ')'; + } + int num; + for(const auto &var : result) { + bool retval = detail::lexical_cast(var, num); + if(!retval) { + return std::string("Failed parsing number (") + var + ')'; + } + if(num < 0 || num > 255) { + return std::string("Each IP number must be between 0 and 255 ") + var; + } + } + return std::string(); + }; + } +}; + +/// Validate the argument is a number and greater than 0 +class PositiveNumber : public Validator { + public: + PositiveNumber() : Validator("POSITIVE") { + func_ = [](std::string &number_str) { + double number; + if(!detail::lexical_cast(number_str, number)) { + return std::string("Failed parsing number: (") + number_str + ')'; + } + if(number <= 0) { + return std::string("Number less or equal to 0: (") + number_str + ')'; + } + return std::string(); + }; + } +}; +/// Validate the argument is a number and greater than or equal to 0 +class NonNegativeNumber : public Validator { + public: + NonNegativeNumber() : Validator("NONNEGATIVE") { + func_ = [](std::string &number_str) { + double number; + if(!detail::lexical_cast(number_str, number)) { + return std::string("Failed parsing number: (") + number_str + ')'; + } + if(number < 0) { + return std::string("Number less than 0: (") + number_str + ')'; + } + return std::string(); + }; + } +}; + +/// Validate the argument is a number +class Number : public Validator { + public: + Number() : Validator("NUMBER") { + func_ = [](std::string &number_str) { + double number; + if(!detail::lexical_cast(number_str, number)) { + return std::string("Failed parsing as a number (") + number_str + ')'; + } + return std::string(); + }; + } +}; + +} // namespace detail + +// Static is not needed here, because global const implies static. + +/// Check for existing file (returns error message if check fails) +const detail::ExistingFileValidator ExistingFile; + +/// Check for an existing directory (returns error message if check fails) +const detail::ExistingDirectoryValidator ExistingDirectory; + +/// Check for an existing path +const detail::ExistingPathValidator ExistingPath; + +/// Check for an non-existing path +const detail::NonexistentPathValidator NonexistentPath; + +/// Check for an IP4 address +const detail::IPV4Validator ValidIPV4; + +/// Check for a positive number +const detail::PositiveNumber PositiveNumber; + +/// Check for a non-negative number +const detail::NonNegativeNumber NonNegativeNumber; + +/// Check for a number +const detail::Number Number; + +/// Produce a range (factory). Min and max are inclusive. +class Range : public Validator { + public: + /// This produces a range with min and max inclusive. + /// + /// Note that the constructor is templated, but the struct is not, so C++17 is not + /// needed to provide nice syntax for Range(a,b). + template Range(T min, T max) { + std::stringstream out; + out << detail::type_name() << " in [" << min << " - " << max << "]"; + description(out.str()); + + func_ = [min, max](std::string &input) { + T val; + bool converted = detail::lexical_cast(input, val); + if((!converted) || (val < min || val > max)) + return std::string("Value ") + input + " not in range " + std::to_string(min) + " to " + + std::to_string(max); + + return std::string(); + }; + } + + /// Range of one value is 0 to value + template explicit Range(T max) : Range(static_cast(0), max) {} +}; + +/// Produce a bounded range (factory). Min and max are inclusive. +class Bound : public Validator { + public: + /// This bounds a value with min and max inclusive. + /// + /// Note that the constructor is templated, but the struct is not, so C++17 is not + /// needed to provide nice syntax for Range(a,b). + template Bound(T min, T max) { + std::stringstream out; + out << detail::type_name() << " bounded to [" << min << " - " << max << "]"; + description(out.str()); + + func_ = [min, max](std::string &input) { + T val; + bool converted = detail::lexical_cast(input, val); + if(!converted) { + return std::string("Value ") + input + " could not be converted"; + } + if(val < min) + input = detail::to_string(min); + else if(val > max) + input = detail::to_string(max); + + return std::string{}; + }; + } + + /// Range of one value is 0 to value + template explicit Bound(T max) : Bound(static_cast(0), max) {} +}; + +namespace detail { +template ::type>::value, detail::enabler> = detail::dummy> +auto smart_deref(T value) -> decltype(*value) { + return *value; +} + +template < + typename T, + enable_if_t::type>::value, detail::enabler> = detail::dummy> +typename std::remove_reference::type &smart_deref(T &value) { + return value; +} +/// Generate a string representation of a set +template std::string generate_set(const T &set) { + using element_t = typename detail::element_type::type; + using iteration_type_t = typename detail::pair_adaptor::value_type; // the type of the object pair + std::string out(1, '{'); + out.append(detail::join( + detail::smart_deref(set), + [](const iteration_type_t &v) { return detail::pair_adaptor::first(v); }, + ",")); + out.push_back('}'); + return out; +} + +/// Generate a string representation of a map +template std::string generate_map(const T &map, bool key_only = false) { + using element_t = typename detail::element_type::type; + using iteration_type_t = typename detail::pair_adaptor::value_type; // the type of the object pair + std::string out(1, '{'); + out.append(detail::join( + detail::smart_deref(map), + [key_only](const iteration_type_t &v) { + std::string res{detail::to_string(detail::pair_adaptor::first(v))}; + + if(!key_only) { + res.append("->"); + res += detail::to_string(detail::pair_adaptor::second(v)); + } + return res; + }, + ",")); + out.push_back('}'); + return out; +} + +template struct has_find { + template + static auto test(int) -> decltype(std::declval().find(std::declval()), std::true_type()); + template static auto test(...) -> decltype(std::false_type()); + + static const auto value = decltype(test(0))::value; + using type = std::integral_constant; +}; + +/// A search function +template ::value, detail::enabler> = detail::dummy> +auto search(const T &set, const V &val) -> std::pair { + using element_t = typename detail::element_type::type; + auto &setref = detail::smart_deref(set); + auto it = std::find_if(std::begin(setref), std::end(setref), [&val](decltype(*std::begin(setref)) v) { + return (detail::pair_adaptor::first(v) == val); + }); + return {(it != std::end(setref)), it}; +} + +/// A search function that uses the built in find function +template ::value, detail::enabler> = detail::dummy> +auto search(const T &set, const V &val) -> std::pair { + auto &setref = detail::smart_deref(set); + auto it = setref.find(val); + return {(it != std::end(setref)), it}; +} + +/// A search function with a filter function +template +auto search(const T &set, const V &val, const std::function &filter_function) + -> std::pair { + using element_t = typename detail::element_type::type; + // do the potentially faster first search + auto res = search(set, val); + if((res.first) || (!(filter_function))) { + return res; + } + // if we haven't found it do the longer linear search with all the element translations + auto &setref = detail::smart_deref(set); + auto it = std::find_if(std::begin(setref), std::end(setref), [&](decltype(*std::begin(setref)) v) { + V a{detail::pair_adaptor::first(v)}; + a = filter_function(a); + return (a == val); + }); + return {(it != std::end(setref)), it}; +} + +// the following suggestion was made by Nikita Ofitserov(@himikof) +// done in templates to prevent compiler warnings on negation of unsigned numbers + +/// Do a check for overflow on signed numbers +template +inline typename std::enable_if::value, T>::type overflowCheck(const T &a, const T &b) { + if((a > 0) == (b > 0)) { + return ((std::numeric_limits::max)() / (std::abs)(a) < (std::abs)(b)); + } else { + return ((std::numeric_limits::min)() / (std::abs)(a) > -(std::abs)(b)); + } +} +/// Do a check for overflow on unsigned numbers +template +inline typename std::enable_if::value, T>::type overflowCheck(const T &a, const T &b) { + return ((std::numeric_limits::max)() / a < b); +} + +/// Performs a *= b; if it doesn't cause integer overflow. Returns false otherwise. +template typename std::enable_if::value, bool>::type checked_multiply(T &a, T b) { + if(a == 0 || b == 0 || a == 1 || b == 1) { + a *= b; + return true; + } + if(a == (std::numeric_limits::min)() || b == (std::numeric_limits::min)()) { + return false; + } + if(overflowCheck(a, b)) { + return false; + } + a *= b; + return true; +} + +/// Performs a *= b; if it doesn't equal infinity. Returns false otherwise. +template +typename std::enable_if::value, bool>::type checked_multiply(T &a, T b) { + T c = a * b; + if(std::isinf(c) && !std::isinf(a) && !std::isinf(b)) { + return false; + } + a = c; + return true; +} + +} // namespace detail +/// Verify items are in a set +class IsMember : public Validator { + public: + using filter_fn_t = std::function; + + /// This allows in-place construction using an initializer list + template + IsMember(std::initializer_list values, Args &&... args) + : IsMember(std::vector(values), std::forward(args)...) {} + + /// This checks to see if an item is in a set (empty function) + template explicit IsMember(T &&set) : IsMember(std::forward(set), nullptr) {} + + /// This checks to see if an item is in a set: pointer or copy version. You can pass in a function that will filter + /// both sides of the comparison before computing the comparison. + template explicit IsMember(T set, F filter_function) { + + // Get the type of the contained item - requires a container have ::value_type + // if the type does not have first_type and second_type, these are both value_type + using element_t = typename detail::element_type::type; // Removes (smart) pointers if needed + using item_t = typename detail::pair_adaptor::first_type; // Is value_type if not a map + + using local_item_t = typename IsMemberType::type; // This will convert bad types to good ones + // (const char * to std::string) + + // Make a local copy of the filter function, using a std::function if not one already + std::function filter_fn = filter_function; + + // This is the type name for help, it will take the current version of the set contents + desc_function_ = [set]() { return detail::generate_set(detail::smart_deref(set)); }; + + // This is the function that validates + // It stores a copy of the set pointer-like, so shared_ptr will stay alive + func_ = [set, filter_fn](std::string &input) { + local_item_t b; + if(!detail::lexical_cast(input, b)) { + throw ValidationError(input); // name is added later + } + if(filter_fn) { + b = filter_fn(b); + } + auto res = detail::search(set, b, filter_fn); + if(res.first) { + // Make sure the version in the input string is identical to the one in the set + if(filter_fn) { + input = detail::value_string(detail::pair_adaptor::first(*(res.second))); + } + + // Return empty error string (success) + return std::string{}; + } + + // If you reach this point, the result was not found + std::string out(" not in "); + out += detail::generate_set(detail::smart_deref(set)); + return out; + }; + } + + /// You can pass in as many filter functions as you like, they nest (string only currently) + template + IsMember(T &&set, filter_fn_t filter_fn_1, filter_fn_t filter_fn_2, Args &&... other) + : IsMember( + std::forward(set), + [filter_fn_1, filter_fn_2](std::string a) { return filter_fn_2(filter_fn_1(a)); }, + other...) {} +}; + +/// definition of the default transformation object +template using TransformPairs = std::vector>; + +/// Translate named items to other or a value set +class Transformer : public Validator { + public: + using filter_fn_t = std::function; + + /// This allows in-place construction + template + Transformer(std::initializer_list> values, Args &&... args) + : Transformer(TransformPairs(values), std::forward(args)...) {} + + /// direct map of std::string to std::string + template explicit Transformer(T &&mapping) : Transformer(std::forward(mapping), nullptr) {} + + /// This checks to see if an item is in a set: pointer or copy version. You can pass in a function that will filter + /// both sides of the comparison before computing the comparison. + template explicit Transformer(T mapping, F filter_function) { + + static_assert(detail::pair_adaptor::type>::value, + "mapping must produce value pairs"); + // Get the type of the contained item - requires a container have ::value_type + // if the type does not have first_type and second_type, these are both value_type + using element_t = typename detail::element_type::type; // Removes (smart) pointers if needed + using item_t = typename detail::pair_adaptor::first_type; // Is value_type if not a map + using local_item_t = typename IsMemberType::type; // Will convert bad types to good ones + // (const char * to std::string) + + // Make a local copy of the filter function, using a std::function if not one already + std::function filter_fn = filter_function; + + // This is the type name for help, it will take the current version of the set contents + desc_function_ = [mapping]() { return detail::generate_map(detail::smart_deref(mapping)); }; + + func_ = [mapping, filter_fn](std::string &input) { + local_item_t b; + if(!detail::lexical_cast(input, b)) { + return std::string(); + // there is no possible way we can match anything in the mapping if we can't convert so just return + } + if(filter_fn) { + b = filter_fn(b); + } + auto res = detail::search(mapping, b, filter_fn); + if(res.first) { + input = detail::value_string(detail::pair_adaptor::second(*res.second)); + } + return std::string{}; + }; + } + + /// You can pass in as many filter functions as you like, they nest + template + Transformer(T &&mapping, filter_fn_t filter_fn_1, filter_fn_t filter_fn_2, Args &&... other) + : Transformer( + std::forward(mapping), + [filter_fn_1, filter_fn_2](std::string a) { return filter_fn_2(filter_fn_1(a)); }, + other...) {} +}; + +/// translate named items to other or a value set +class CheckedTransformer : public Validator { + public: + using filter_fn_t = std::function; + + /// This allows in-place construction + template + CheckedTransformer(std::initializer_list> values, Args &&... args) + : CheckedTransformer(TransformPairs(values), std::forward(args)...) {} + + /// direct map of std::string to std::string + template explicit CheckedTransformer(T mapping) : CheckedTransformer(std::move(mapping), nullptr) {} + + /// This checks to see if an item is in a set: pointer or copy version. You can pass in a function that will filter + /// both sides of the comparison before computing the comparison. + template explicit CheckedTransformer(T mapping, F filter_function) { + + static_assert(detail::pair_adaptor::type>::value, + "mapping must produce value pairs"); + // Get the type of the contained item - requires a container have ::value_type + // if the type does not have first_type and second_type, these are both value_type + using element_t = typename detail::element_type::type; // Removes (smart) pointers if needed + using item_t = typename detail::pair_adaptor::first_type; // Is value_type if not a map + using local_item_t = typename IsMemberType::type; // Will convert bad types to good ones + // (const char * to std::string) + using iteration_type_t = typename detail::pair_adaptor::value_type; // the type of the object pair + + // Make a local copy of the filter function, using a std::function if not one already + std::function filter_fn = filter_function; + + auto tfunc = [mapping]() { + std::string out("value in "); + out += detail::generate_map(detail::smart_deref(mapping)) + " OR {"; + out += detail::join( + detail::smart_deref(mapping), + [](const iteration_type_t &v) { return detail::to_string(detail::pair_adaptor::second(v)); }, + ","); + out.push_back('}'); + return out; + }; + + desc_function_ = tfunc; + + func_ = [mapping, tfunc, filter_fn](std::string &input) { + local_item_t b; + bool converted = detail::lexical_cast(input, b); + if(converted) { + if(filter_fn) { + b = filter_fn(b); + } + auto res = detail::search(mapping, b, filter_fn); + if(res.first) { + input = detail::value_string(detail::pair_adaptor::second(*res.second)); + return std::string{}; + } + } + for(const auto &v : detail::smart_deref(mapping)) { + auto output_string = detail::value_string(detail::pair_adaptor::second(v)); + if(output_string == input) { + return std::string(); + } + } + + return "Check " + input + " " + tfunc() + " FAILED"; + }; + } + + /// You can pass in as many filter functions as you like, they nest + template + CheckedTransformer(T &&mapping, filter_fn_t filter_fn_1, filter_fn_t filter_fn_2, Args &&... other) + : CheckedTransformer( + std::forward(mapping), + [filter_fn_1, filter_fn_2](std::string a) { return filter_fn_2(filter_fn_1(a)); }, + other...) {} +}; + +/// Helper function to allow ignore_case to be passed to IsMember or Transform +inline std::string ignore_case(std::string item) { return detail::to_lower(item); } + +/// Helper function to allow ignore_underscore to be passed to IsMember or Transform +inline std::string ignore_underscore(std::string item) { return detail::remove_underscore(item); } + +/// Helper function to allow checks to ignore spaces to be passed to IsMember or Transform +inline std::string ignore_space(std::string item) { + item.erase(std::remove(std::begin(item), std::end(item), ' '), std::end(item)); + item.erase(std::remove(std::begin(item), std::end(item), '\t'), std::end(item)); + return item; +} + +/// Multiply a number by a factor using given mapping. +/// Can be used to write transforms for SIZE or DURATION inputs. +/// +/// Example: +/// With mapping = `{"b"->1, "kb"->1024, "mb"->1024*1024}` +/// one can recognize inputs like "100", "12kb", "100 MB", +/// that will be automatically transformed to 100, 14448, 104857600. +/// +/// Output number type matches the type in the provided mapping. +/// Therefore, if it is required to interpret real inputs like "0.42 s", +/// the mapping should be of a type or . +class AsNumberWithUnit : public Validator { + public: + /// Adjust AsNumberWithUnit behavior. + /// CASE_SENSITIVE/CASE_INSENSITIVE controls how units are matched. + /// UNIT_OPTIONAL/UNIT_REQUIRED throws ValidationError + /// if UNIT_REQUIRED is set and unit literal is not found. + enum Options { + CASE_SENSITIVE = 0, + CASE_INSENSITIVE = 1, + UNIT_OPTIONAL = 0, + UNIT_REQUIRED = 2, + DEFAULT = CASE_INSENSITIVE | UNIT_OPTIONAL + }; + + template + explicit AsNumberWithUnit(std::map mapping, + Options opts = DEFAULT, + const std::string &unit_name = "UNIT") { + description(generate_description(unit_name, opts)); + validate_mapping(mapping, opts); + + // transform function + func_ = [mapping, opts](std::string &input) -> std::string { + Number num; + + detail::rtrim(input); + if(input.empty()) { + throw ValidationError("Input is empty"); + } + + // Find split position between number and prefix + auto unit_begin = input.end(); + while(unit_begin > input.begin() && std::isalpha(*(unit_begin - 1), std::locale())) { + --unit_begin; + } + + std::string unit{unit_begin, input.end()}; + input.resize(static_cast(std::distance(input.begin(), unit_begin))); + detail::trim(input); + + if(opts & UNIT_REQUIRED && unit.empty()) { + throw ValidationError("Missing mandatory unit"); + } + if(opts & CASE_INSENSITIVE) { + unit = detail::to_lower(unit); + } + + bool converted = detail::lexical_cast(input, num); + if(!converted) { + throw ValidationError(std::string("Value ") + input + " could not be converted to " + + detail::type_name()); + } + + if(unit.empty()) { + // No need to modify input if no unit passed + return {}; + } + + // find corresponding factor + auto it = mapping.find(unit); + if(it == mapping.end()) { + throw ValidationError(unit + + " unit not recognized. " + "Allowed values: " + + detail::generate_map(mapping, true)); + } + + // perform safe multiplication + bool ok = detail::checked_multiply(num, it->second); + if(!ok) { + throw ValidationError(detail::to_string(num) + " multiplied by " + unit + + " factor would cause number overflow. Use smaller value."); + } + input = detail::to_string(num); + + return {}; + }; + } + + private: + /// Check that mapping contains valid units. + /// Update mapping for CASE_INSENSITIVE mode. + template static void validate_mapping(std::map &mapping, Options opts) { + for(auto &kv : mapping) { + if(kv.first.empty()) { + throw ValidationError("Unit must not be empty."); + } + if(!detail::isalpha(kv.first)) { + throw ValidationError("Unit must contain only letters."); + } + } + + // make all units lowercase if CASE_INSENSITIVE + if(opts & CASE_INSENSITIVE) { + std::map lower_mapping; + for(auto &kv : mapping) { + auto s = detail::to_lower(kv.first); + if(lower_mapping.count(s)) { + throw ValidationError(std::string("Several matching lowercase unit representations are found: ") + + s); + } + lower_mapping[detail::to_lower(kv.first)] = kv.second; + } + mapping = std::move(lower_mapping); + } + } + + /// Generate description like this: NUMBER [UNIT] + template static std::string generate_description(const std::string &name, Options opts) { + std::stringstream out; + out << detail::type_name() << ' '; + if(opts & UNIT_REQUIRED) { + out << name; + } else { + out << '[' << name << ']'; + } + return out.str(); + } +}; + +/// Converts a human-readable size string (with unit literal) to uin64_t size. +/// Example: +/// "100" => 100 +/// "1 b" => 100 +/// "10Kb" => 10240 // you can configure this to be interpreted as kilobyte (*1000) or kibibyte (*1024) +/// "10 KB" => 10240 +/// "10 kb" => 10240 +/// "10 kib" => 10240 // *i, *ib are always interpreted as *bibyte (*1024) +/// "10kb" => 10240 +/// "2 MB" => 2097152 +/// "2 EiB" => 2^61 // Units up to exibyte are supported +class AsSizeValue : public AsNumberWithUnit { + public: + using result_t = std::uint64_t; + + /// If kb_is_1000 is true, + /// interpret 'kb', 'k' as 1000 and 'kib', 'ki' as 1024 + /// (same applies to higher order units as well). + /// Otherwise, interpret all literals as factors of 1024. + /// The first option is formally correct, but + /// the second interpretation is more wide-spread + /// (see https://en.wikipedia.org/wiki/Binary_prefix). + explicit AsSizeValue(bool kb_is_1000) : AsNumberWithUnit(get_mapping(kb_is_1000)) { + if(kb_is_1000) { + description("SIZE [b, kb(=1000b), kib(=1024b), ...]"); + } else { + description("SIZE [b, kb(=1024b), ...]"); + } + } + + private: + /// Get mapping + static std::map init_mapping(bool kb_is_1000) { + std::map m; + result_t k_factor = kb_is_1000 ? 1000 : 1024; + result_t ki_factor = 1024; + result_t k = 1; + result_t ki = 1; + m["b"] = 1; + for(std::string p : {"k", "m", "g", "t", "p", "e"}) { + k *= k_factor; + ki *= ki_factor; + m[p] = k; + m[p + "b"] = k; + m[p + "i"] = ki; + m[p + "ib"] = ki; + } + return m; + } + + /// Cache calculated mapping + static std::map get_mapping(bool kb_is_1000) { + if(kb_is_1000) { + static auto m = init_mapping(true); + return m; + } else { + static auto m = init_mapping(false); + return m; + } + } +}; + +namespace detail { +/// Split a string into a program name and command line arguments +/// the string is assumed to contain a file name followed by other arguments +/// the return value contains is a pair with the first argument containing the program name and the second +/// everything else. +inline std::pair split_program_name(std::string commandline) { + // try to determine the programName + std::pair vals; + trim(commandline); + auto esp = commandline.find_first_of(' ', 1); + while(detail::check_path(commandline.substr(0, esp).c_str()) != path_type::file) { + esp = commandline.find_first_of(' ', esp + 1); + if(esp == std::string::npos) { + // if we have reached the end and haven't found a valid file just assume the first argument is the + // program name + esp = commandline.find_first_of(' ', 1); + break; + } + } + vals.first = commandline.substr(0, esp); + rtrim(vals.first); + // strip the program name + vals.second = (esp != std::string::npos) ? commandline.substr(esp + 1) : std::string{}; + ltrim(vals.second); + return vals; +} + +} // namespace detail +/// @} + +} // namespace CLI + +// From FormatterFwd.hpp: + +namespace CLI { + +class Option; +class App; + +/// This enum signifies the type of help requested +/// +/// This is passed in by App; all user classes must accept this as +/// the second argument. + +enum class AppFormatMode { + Normal, ///< The normal, detailed help + All, ///< A fully expanded help + Sub, ///< Used when printed as part of expanded subcommand +}; + +/// This is the minimum requirements to run a formatter. +/// +/// A user can subclass this is if they do not care at all +/// about the structure in CLI::Formatter. +class FormatterBase { + protected: + /// @name Options + ///@{ + + /// The width of the first column + std::size_t column_width_{30}; + + /// @brief The required help printout labels (user changeable) + /// Values are Needs, Excludes, etc. + std::map labels_{}; + + ///@} + /// @name Basic + ///@{ + + public: + FormatterBase() = default; + FormatterBase(const FormatterBase &) = default; + FormatterBase(FormatterBase &&) = default; + + /// Adding a destructor in this form to work around bug in GCC 4.7 + virtual ~FormatterBase() noexcept {} // NOLINT(modernize-use-equals-default) + + /// This is the key method that puts together help + virtual std::string make_help(const App *, std::string, AppFormatMode) const = 0; + + ///@} + /// @name Setters + ///@{ + + /// Set the "REQUIRED" label + void label(std::string key, std::string val) { labels_[key] = val; } + + /// Set the column width + void column_width(std::size_t val) { column_width_ = val; } + + ///@} + /// @name Getters + ///@{ + + /// Get the current value of a name (REQUIRED, etc.) + std::string get_label(std::string key) const { + if(labels_.find(key) == labels_.end()) + return key; + else + return labels_.at(key); + } + + /// Get the current column width + std::size_t get_column_width() const { return column_width_; } + + ///@} +}; + +/// This is a specialty override for lambda functions +class FormatterLambda final : public FormatterBase { + using funct_t = std::function; + + /// The lambda to hold and run + funct_t lambda_; + + public: + /// Create a FormatterLambda with a lambda function + explicit FormatterLambda(funct_t funct) : lambda_(std::move(funct)) {} + + /// Adding a destructor (mostly to make GCC 4.7 happy) + ~FormatterLambda() noexcept override {} // NOLINT(modernize-use-equals-default) + + /// This will simply call the lambda function + std::string make_help(const App *app, std::string name, AppFormatMode mode) const override { + return lambda_(app, name, mode); + } +}; + +/// This is the default Formatter for CLI11. It pretty prints help output, and is broken into quite a few +/// overridable methods, to be highly customizable with minimal effort. +class Formatter : public FormatterBase { + public: + Formatter() = default; + Formatter(const Formatter &) = default; + Formatter(Formatter &&) = default; + + /// @name Overridables + ///@{ + + /// This prints out a group of options with title + /// + virtual std::string make_group(std::string group, bool is_positional, std::vector opts) const; + + /// This prints out just the positionals "group" + virtual std::string make_positionals(const App *app) const; + + /// This prints out all the groups of options + std::string make_groups(const App *app, AppFormatMode mode) const; + + /// This prints out all the subcommands + virtual std::string make_subcommands(const App *app, AppFormatMode mode) const; + + /// This prints out a subcommand + virtual std::string make_subcommand(const App *sub) const; + + /// This prints out a subcommand in help-all + virtual std::string make_expanded(const App *sub) const; + + /// This prints out all the groups of options + virtual std::string make_footer(const App *app) const; + + /// This displays the description line + virtual std::string make_description(const App *app) const; + + /// This displays the usage line + virtual std::string make_usage(const App *app, std::string name) const; + + /// This puts everything together + std::string make_help(const App * /*app*/, std::string, AppFormatMode) const override; + + ///@} + /// @name Options + ///@{ + + /// This prints out an option help line, either positional or optional form + virtual std::string make_option(const Option *opt, bool is_positional) const { + std::stringstream out; + detail::format_help( + out, make_option_name(opt, is_positional) + make_option_opts(opt), make_option_desc(opt), column_width_); + return out.str(); + } + + /// @brief This is the name part of an option, Default: left column + virtual std::string make_option_name(const Option *, bool) const; + + /// @brief This is the options part of the name, Default: combined into left column + virtual std::string make_option_opts(const Option *) const; + + /// @brief This is the description. Default: Right column, on new line if left column too large + virtual std::string make_option_desc(const Option *) const; + + /// @brief This is used to print the name on the USAGE line + virtual std::string make_option_usage(const Option *opt) const; + + ///@} +}; + +} // namespace CLI + +// From Option.hpp: + +namespace CLI { + +using results_t = std::vector; +/// callback function definition +using callback_t = std::function; + +class Option; +class App; + +using Option_p = std::unique_ptr