From a4ac072780551257e5b48ad22107189ba7d274a4 Mon Sep 17 00:00:00 2001 From: Irina Yatsenko <36858951+irinayat-MS@users.noreply.github.com> Date: Tue, 1 Dec 2020 14:07:42 -0800 Subject: [PATCH 01/27] Initial file structure for the resource tracer --- src/QirRuntime/lib/CMakeLists.txt | 2 +- src/QirRuntime/lib/Tracer/CMakeLists.txt | 27 ++++ src/QirRuntime/lib/Tracer/README.md | 5 + src/QirRuntime/lib/Tracer/tracer-qir.cpp | 52 +++++++ src/QirRuntime/lib/Tracer/tracer.cpp | 7 + src/QirRuntime/lib/Tracer/tracer.hpp | 16 +++ src/QirRuntime/test.py | 3 +- src/QirRuntime/test/CMakeLists.txt | 1 + src/QirRuntime/test/QIR-tracer/CMakeLists.txt | 77 ++++++++++ .../test/QIR-tracer/qir-test-trace.ll | 15 ++ .../test/QIR-tracer/qir-test-trace.qs | 19 +++ .../test/QIR-tracer/qir-tracer-core.qs | 21 +++ .../test/QIR-tracer/qir-tracer-driver.cpp | 13 ++ .../test/QIR-tracer/qir-tracer-target.qs | 135 ++++++++++++++++++ src/QirRuntime/test/unittests/CMakeLists.txt | 1 + src/QirRuntime/test/unittests/TracerTests.cpp | 9 ++ 16 files changed, 401 insertions(+), 2 deletions(-) create mode 100644 src/QirRuntime/lib/Tracer/CMakeLists.txt create mode 100644 src/QirRuntime/lib/Tracer/README.md create mode 100644 src/QirRuntime/lib/Tracer/tracer-qir.cpp create mode 100644 src/QirRuntime/lib/Tracer/tracer.cpp create mode 100644 src/QirRuntime/lib/Tracer/tracer.hpp create mode 100644 src/QirRuntime/test/QIR-tracer/CMakeLists.txt create mode 100644 src/QirRuntime/test/QIR-tracer/qir-test-trace.ll create mode 100644 src/QirRuntime/test/QIR-tracer/qir-test-trace.qs create mode 100644 src/QirRuntime/test/QIR-tracer/qir-tracer-core.qs create mode 100644 src/QirRuntime/test/QIR-tracer/qir-tracer-driver.cpp create mode 100644 src/QirRuntime/test/QIR-tracer/qir-tracer-target.qs create mode 100644 src/QirRuntime/test/unittests/TracerTests.cpp diff --git a/src/QirRuntime/lib/CMakeLists.txt b/src/QirRuntime/lib/CMakeLists.txt index 3589d6b764e..8c8a30f8255 100644 --- a/src/QirRuntime/lib/CMakeLists.txt +++ b/src/QirRuntime/lib/CMakeLists.txt @@ -1,6 +1,6 @@ add_subdirectory(QIR) add_subdirectory(Simulators) - +add_subdirectory(Tracer) #=============================================================================== # Produce the qdk dynamic library diff --git a/src/QirRuntime/lib/Tracer/CMakeLists.txt b/src/QirRuntime/lib/Tracer/CMakeLists.txt new file mode 100644 index 00000000000..7c9aaa08fcc --- /dev/null +++ b/src/QirRuntime/lib/Tracer/CMakeLists.txt @@ -0,0 +1,27 @@ +set(component_name "tracer") + +# the tracer implements its own management of qubits/results but reuses the rest of the qir-runtime +# TODO: consider splitting qir-rt into two libs +set(source_files + "../QIR/arrays.cpp" + "../QIR/callables.cpp" + "../QIR/strings.cpp" + "../QIR/utils.cpp" + "tracer-qir.cpp" + "tracer.cpp" +) + +set(includes + "${public_includes}" + "${PROJECT_SOURCE_DIR}/lib/QIR" +) + + +#=============================================================================== +# Produce static lib for users to link directly to + +add_library(${component_name} STATIC ${source_files}) +target_include_directories(${component_name} PUBLIC ${includes}) +target_link_libraries(${component_name} ${CMAKE_DL_LIBS}) + +# The tracer cannot be included into qdk.dll so we don't build an object lib for it diff --git a/src/QirRuntime/lib/Tracer/README.md b/src/QirRuntime/lib/Tracer/README.md new file mode 100644 index 00000000000..5a918b37eee --- /dev/null +++ b/src/QirRuntime/lib/Tracer/README.md @@ -0,0 +1,5 @@ +# Resource Tracer Design Document # + +The purpose of the Resource Tracer is to provide efficient and flexible way to estimate resources of a quantum program in QIR representation. + +Details are TBD. \ No newline at end of file diff --git a/src/QirRuntime/lib/Tracer/tracer-qir.cpp b/src/QirRuntime/lib/Tracer/tracer-qir.cpp new file mode 100644 index 00000000000..a5df9526b31 --- /dev/null +++ b/src/QirRuntime/lib/Tracer/tracer-qir.cpp @@ -0,0 +1,52 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +#include + +#include "tracer.hpp" +#include "qirTypes.hpp" + +extern "C" +{ + Result UseZero() + { + return reinterpret_cast(0); + } + + Result UseOne() + { + return reinterpret_cast(1); + } + + QUBIT* quantum__rt__qubit_allocate() // NOLINT + { + return nullptr; + } + + void quantum__rt__qubit_release(QUBIT* qubit) // NOLINT + { + } + + void quantum__rt__result_reference(RESULT* r) // NOLINT + { + } + + void quantum__rt__result_unreference(RESULT* r) // NOLINT + { + } + + bool quantum__rt__result_equal(RESULT* r1, RESULT* r2) // NOLINT + { + return false; + } + + QirString* quantum__rt__result_to_string(RESULT* result) // NOLINT + { + return nullptr; + } + + QirString* quantum__rt__qubit_to_string(QUBIT* qubit) // NOLINT + { + return nullptr; + } +} \ No newline at end of file diff --git a/src/QirRuntime/lib/Tracer/tracer.cpp b/src/QirRuntime/lib/Tracer/tracer.cpp new file mode 100644 index 00000000000..dad2a222fb2 --- /dev/null +++ b/src/QirRuntime/lib/Tracer/tracer.cpp @@ -0,0 +1,7 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +#include + +#include "tracer.hpp" + diff --git a/src/QirRuntime/lib/Tracer/tracer.hpp b/src/QirRuntime/lib/Tracer/tracer.hpp new file mode 100644 index 00000000000..bc0b79f958b --- /dev/null +++ b/src/QirRuntime/lib/Tracer/tracer.hpp @@ -0,0 +1,16 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +#include "CoreTypes.hpp" + +// The tracer does _not_ implement ISimulator interface by design to avoid virtual calls and enable as many compiler +// optimizations (inlining, etc.) as possible. +class CTracer +{ + // Start with no reuse of qubits. + long lastQubitId = -1; + + public: + Qubit AllocateQubit(); + void ReleaseQubit(Qubit q); +}; \ No newline at end of file diff --git a/src/QirRuntime/test.py b/src/QirRuntime/test.py index e69749d2b2f..950569fbd4c 100644 --- a/src/QirRuntime/test.py +++ b/src/QirRuntime/test.py @@ -77,7 +77,8 @@ def log(message): "fullstate-simulator-tests", "qir-runtime-unittests", "qir-static-tests", - "qir-dynamic-tests" + "qir-dynamic-tests", + "qir-tracer-tests" ] for name in test_binaries: diff --git a/src/QirRuntime/test/CMakeLists.txt b/src/QirRuntime/test/CMakeLists.txt index 8d2fac8c254..e5541f78589 100644 --- a/src/QirRuntime/test/CMakeLists.txt +++ b/src/QirRuntime/test/CMakeLists.txt @@ -1,4 +1,5 @@ add_subdirectory(FullstateSimulator) add_subdirectory(QIR-dynamic) add_subdirectory(QIR-static) +add_subdirectory(QIR-tracer) add_subdirectory(unittests) diff --git a/src/QirRuntime/test/QIR-tracer/CMakeLists.txt b/src/QirRuntime/test/QIR-tracer/CMakeLists.txt new file mode 100644 index 00000000000..6fc1f0f9d71 --- /dev/null +++ b/src/QirRuntime/test/QIR-tracer/CMakeLists.txt @@ -0,0 +1,77 @@ +# compile test ll files into a library, have to use custom commands for this +set(CLANG_ARGS "-c") +if (CMAKE_BUILD_TYPE STREQUAL "Debug") + set(CLANG_ARGS + "${CLANG_ARGS}" + "-O0" + "-D_DEBUG" + ) +endif() + +set(TEST_FILES + qir-test-trace +) + +foreach(file ${TEST_FILES}) + set(INFILE "${CMAKE_CURRENT_SOURCE_DIR}/${file}.ll") + set(OBJFILE "${CMAKE_CURRENT_BINARY_DIR}/${file}.obj") + + set(QIR_SINGLE_FILE_COMPILE_ACTION "qir_single_file_compile_action_${file}") + add_custom_command(OUTPUT ${QIR_SINGLE_FILE_COMPILE_ACTION} + COMMAND ${CMAKE_CXX_COMPILER} + ARGS ${CLANG_ARGS} ${INFILE} "-o" ${OBJFILE} + DEPENDS ${INFILE} + COMMENT "Compiling qir test file ${file}" + VERBATIM) + + set(QIR_SINGLE_FILE_COMPILE "qir_single_file_compile_${file}") + add_custom_target(${QIR_SINGLE_FILE_COMPILE} DEPENDS ${QIR_SINGLE_FILE_COMPILE_ACTION}) + + if (WIN32) + set(QIR_TESTS_LIB "${CMAKE_CURRENT_BINARY_DIR}/${file}.lib") + else() + set(QIR_TESTS_LIB "${CMAKE_CURRENT_BINARY_DIR}/libqir_${file}.a") + endif() + list(APPEND QIR_TESTS_LIBS ${QIR_TESTS_LIB}) + + add_custom_command(OUTPUT ${QIR_TESTS_LIB} + COMMAND ${CMAKE_AR} + ARGS "rc" ${QIR_TESTS_LIB} ${OBJFILE} + DEPENDS ${QIR_SINGLE_FILE_COMPILE} ${INFILE} + COMMENT "Adding QIR tests ${file} into a lib" + VERBATIM) + +endforeach() + +add_custom_target(qir_tracer_test_lib DEPENDS ${QIR_TESTS_LIBS}) + +#============================================================================== +# The executable target for QIR tests triggers the custom actions to compile ll files +# +add_executable(qir-tracer-tests + qir-tracer-driver.cpp) + +if (WIN32) + set(QIR_BRIDGE_UTILITY_LIB "${PROJECT_BINARY_DIR}/lib/QIR/qir-bridge-u.lib") +# set(QIR_BRIDGE_QIS_UTILITY_LIB "${PROJECT_BINARY_DIR}/lib/QIR/qir-bridge-qis-u.lib") +else() + set(QIR_BRIDGE_UTILITY_LIB "${PROJECT_BINARY_DIR}/lib/QIR/libqir-bridge-u.a") +# set(QIR_BRIDGE_QIS_UTILITY_LIB "${PROJECT_BINARY_DIR}/lib/QIR/libqir-bridge-qis-u.a") +endif() + +target_link_libraries(qir-tracer-tests PUBLIC + ${QIR_TESTS_LIBS} + ${QIR_BRIDGE_UTILITY_LIB} + tracer +) + +target_include_directories(qir-tracer-tests PUBLIC + "${test_includes}" + "${public_includes}" + "${PROJECT_SOURCE_DIR}/lib/QIR" +) +add_dependencies(qir-tracer-tests qir_tracer_test_lib) + +install(TARGETS qir-tracer-tests RUNTIME DESTINATION "${CMAKE_BINARY_DIR}/bin") +add_unit_test(qir-tracer-tests) + diff --git a/src/QirRuntime/test/QIR-tracer/qir-test-trace.ll b/src/QirRuntime/test/QIR-tracer/qir-test-trace.ll new file mode 100644 index 00000000000..48883cc8040 --- /dev/null +++ b/src/QirRuntime/test/QIR-tracer/qir-test-trace.ll @@ -0,0 +1,15 @@ +; Copyright (c) Microsoft Corporation. All rights reserved. +; Licensed under the MIT License. + +%Result = type opaque +%Range = type { i64, i64, i64 } +%TupleHeader = type { i32 } +%Array = type opaque +%Callable = type opaque +%String = type opaque + + +define i32 @To_Be_Generated() { +entry: + ret i32 0 +} diff --git a/src/QirRuntime/test/QIR-tracer/qir-test-trace.qs b/src/QirRuntime/test/QIR-tracer/qir-test-trace.qs new file mode 100644 index 00000000000..252e45ce50a --- /dev/null +++ b/src/QirRuntime/test/QIR-tracer/qir-test-trace.qs @@ -0,0 +1,19 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +namespace Microsoft.Quantum.Testing.Tracer +{ + open Microsoft.Quantum.Intrinsic; + + @EntryPoint() + operation something() : Union + { + using (qs = Qubit[5]) + { + X(qs[0]); + Rx(qs[1]); + CNOT(qs[1], qs[2]); + + } + } +} diff --git a/src/QirRuntime/test/QIR-tracer/qir-tracer-core.qs b/src/QirRuntime/test/QIR-tracer/qir-tracer-core.qs new file mode 100644 index 00000000000..c2d244a09f3 --- /dev/null +++ b/src/QirRuntime/test/QIR-tracer/qir-tracer-core.qs @@ -0,0 +1,21 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +namespace Microsoft.Quantum.Core{ + + @Attribute() + newtype Attribute = Unit; + + @Attribute() + newtype Inline = Unit; + + @Attribute() + newtype EntryPoint = Unit; + +} + +namespace Microsoft.Quantum.Targeting { + + @Attribute() + newtype TargetInstruction = String; +} diff --git a/src/QirRuntime/test/QIR-tracer/qir-tracer-driver.cpp b/src/QirRuntime/test/QIR-tracer/qir-tracer-driver.cpp new file mode 100644 index 00000000000..92c336f6b0e --- /dev/null +++ b/src/QirRuntime/test/QIR-tracer/qir-tracer-driver.cpp @@ -0,0 +1,13 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +#include + +#define CATCH_CONFIG_MAIN // This tells Catch to provide a main() - only do this in one cpp file +#include "catch.hpp" + +extern "C" int To_Be_Generated(); // NOLINT +TEST_CASE("Test that we are building the new components correctly", "[qir-tracer]") +{ + REQUIRE(0 == To_Be_Generated()); +} diff --git a/src/QirRuntime/test/QIR-tracer/qir-tracer-target.qs b/src/QirRuntime/test/QIR-tracer/qir-tracer-target.qs new file mode 100644 index 00000000000..70fb2da71c3 --- /dev/null +++ b/src/QirRuntime/test/QIR-tracer/qir-tracer-target.qs @@ -0,0 +1,135 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +namespace Microsoft.Quantum.Instructions { + + operation S (qb : Qubit) : Unit { + body intrinsic; + } + + operation Rx (theta : Double, qb : Qubit) : Unit { + body intrinsic; + } + + operation Rz (theta : Double, qb : Qubit) : Unit { + body intrinsic; + } +} + +namespace Microsoft.Quantum.Intrinsic { + + open Microsoft.Quantum.Targeting; + open Microsoft.Quantum.Instructions as Phys; + + @Inline() + function PI() : Double + { + return 3.14159265357989; + } + + function IntAsDouble(i : Int) : Double { + body intrinsic; + } + + operation X(qb : Qubit) : Unit + is Adj { + body intrinsic; + adjoint self; + } + + operation Z(qb : Qubit) : Unit + is Adj { + body intrinsic; + adjoint self; + } + + operation H(qb : Qubit) : Unit + is Adj { + body intrinsic; + adjoint self; + } + + operation T(qb : Qubit) : Unit + is Adj { + body intrinsic; + } + + operation CNOT(control : Qubit, target : Qubit) : Unit + is Adj { + body intrinsic; + adjoint self; + } + + @TargetInstruction("mz") + operation M(qb : Qubit) : Result { + body intrinsic; + } + + operation Measure(bases : Pauli[], qubits : Qubit[]) : Result { + body intrinsic; + } + + operation MResetZ(qb : Qubit) : Result + { + let res = M(qb); + if (res == One) + { + X(qb); + } + return res; + } + + @Inline() + operation S(qb : Qubit) : Unit + is Adj { + body (...) + { + Phys.S(qb); + } + adjoint (...) + { + Phys.S(qb); + Z(qb); + } + } + + @Inline() + operation Rx(theta : Double, qb : Qubit) : Unit + is Adj { + body (...) + { + Phys.Rx(theta, qb); + } + adjoint (...) + { + Phys.Rx(-theta, qb); + } + } + + @Inline() + operation Rz(theta : Double, qb : Qubit) : Unit + is Adj + Ctl { + body (...) + { + Phys.Rz(theta, qb); + } + adjoint (...) + { + Phys.Rz(-theta, qb); + } + controlled (ctls, ...) + { + Phys.Rz(theta / 2.0, qb); + CNOT(ctls[0], qb); + Phys.Rz(-theta / 2.0, qb); + CNOT(ctls[0], qb); + } + controlled adjoint (ctls, ...) + { + Phys.Rz(-theta / 2.0, qb); + CNOT(ctls[0], qb); + Phys.Rz(theta / 2.0, qb); + CNOT(ctls[0], qb); + } + } +} diff --git a/src/QirRuntime/test/unittests/CMakeLists.txt b/src/QirRuntime/test/unittests/CMakeLists.txt index a6fb9a30662..d38b0f5745d 100644 --- a/src/QirRuntime/test/unittests/CMakeLists.txt +++ b/src/QirRuntime/test/unittests/CMakeLists.txt @@ -5,6 +5,7 @@ add_executable(qir-runtime-unittests driver.cpp QirRuntimeTests.cpp ToffoliTests.cpp + TracerTests.cpp ) target_link_libraries(qir-runtime-unittests PUBLIC diff --git a/src/QirRuntime/test/unittests/TracerTests.cpp b/src/QirRuntime/test/unittests/TracerTests.cpp new file mode 100644 index 00000000000..0e71a85ef41 --- /dev/null +++ b/src/QirRuntime/test/unittests/TracerTests.cpp @@ -0,0 +1,9 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +#include "catch.hpp" + +TEST_CASE("To check that we are build the new components", "[tracer]") +{ + REQUIRE(true); +} \ No newline at end of file From c60b86fcf8ee567b2344eb9caec11e4361235798 Mon Sep 17 00:00:00 2001 From: Irina Yatsenko Date: Tue, 22 Dec 2020 19:45:40 -0800 Subject: [PATCH 02/27] Creating file structure for trc bridge --- src/QirRuntime/lib/Tracer/CMakeLists.txt | 3 +- src/QirRuntime/lib/Tracer/README.md | 2 +- src/QirRuntime/lib/Tracer/bridge-trc.ll | 139 ++++++++++++++++++ .../Tracer/{tracer-qir.cpp => tracer-rt.cpp} | 0 src/QirRuntime/lib/Tracer/tracer-trc.cpp | 106 +++++++++++++ src/QirRuntime/lib/Tracer/tracer.cpp | 4 + src/QirRuntime/lib/Tracer/tracer.hpp | 12 +- 7 files changed, 263 insertions(+), 3 deletions(-) create mode 100644 src/QirRuntime/lib/Tracer/bridge-trc.ll rename src/QirRuntime/lib/Tracer/{tracer-qir.cpp => tracer-rt.cpp} (100%) create mode 100644 src/QirRuntime/lib/Tracer/tracer-trc.cpp diff --git a/src/QirRuntime/lib/Tracer/CMakeLists.txt b/src/QirRuntime/lib/Tracer/CMakeLists.txt index 7c9aaa08fcc..3715343a72f 100644 --- a/src/QirRuntime/lib/Tracer/CMakeLists.txt +++ b/src/QirRuntime/lib/Tracer/CMakeLists.txt @@ -7,7 +7,8 @@ set(source_files "../QIR/callables.cpp" "../QIR/strings.cpp" "../QIR/utils.cpp" - "tracer-qir.cpp" + "tracer-rt.cpp" + "tracer-trc.cpp" "tracer.cpp" ) diff --git a/src/QirRuntime/lib/Tracer/README.md b/src/QirRuntime/lib/Tracer/README.md index 5a918b37eee..55a15f58e56 100644 --- a/src/QirRuntime/lib/Tracer/README.md +++ b/src/QirRuntime/lib/Tracer/README.md @@ -2,4 +2,4 @@ The purpose of the Resource Tracer is to provide efficient and flexible way to estimate resources of a quantum program in QIR representation. -Details are TBD. \ No newline at end of file +Details are TBD. diff --git a/src/QirRuntime/lib/Tracer/bridge-trc.ll b/src/QirRuntime/lib/Tracer/bridge-trc.ll new file mode 100644 index 00000000000..abaf2210351 --- /dev/null +++ b/src/QirRuntime/lib/Tracer/bridge-trc.ll @@ -0,0 +1,139 @@ +; Copyright (c) Microsoft Corporation. All rights reserved. +; Licensed under the MIT License. + +;======================================================================================================================= +; QIR types +; +%Array = type opaque +%Callable = type opaque +%Qubit = type opaque +%Range = type { i64, i64, i64 } +%Result = type opaque +%String = type opaque +%TupleHeader = type { i32 } +%Pauli = type {i2} + +;======================================================================================================================= +; Native types +; NB: there is no overloading at IR level, so a call/invoke will be made even +; if the definition of the function mismatches the declaration of the arguments. +; It means we could declare here the bridge's C-functions using QIR types +; and avoid bitcasts. However, it seems prudent to be more explicit about +; what's going on and declare the true signatures, as generated by Clang. +; +%class.QUBIT = type opaque +%class.RESULT = type opaque +%struct.QirArray = type opaque +%struct.QirCallable = type opaque +%struct.QirRange = type { i64, i64, i64 } +%struct.QirString = type opaque +%struct.QirTupleHeader = type { i32 } + +;=============================================================================== +; declarations of the native methods this bridge delegates to +; + +declare void @quantum__qis__z(%class.QUBIT*) + +declare void @quantum__qis__crx(%struct.QirArray*, double, %class.QUBIT*) +declare void @quantum__qis__crz(%struct.QirArray*, double, %class.QUBIT*) + +;=============================================================================== +; quantum__trc namespace implementations +; +define double @__quantum__qis__intAsDouble(i64 %i) +{ + %d = call double @quantum__qis__intAsDouble(i64 %i) + ret double %d +} + +define void @__quantum__qis__cnot__(%Qubit* %.qc, %Qubit* %.qt) { + %qc = bitcast %Qubit* %.qc to %class.QUBIT* + %qt = bitcast %Qubit* %.qt to %class.QUBIT* + call void @quantum__qis__cnot(%class.QUBIT* %qc, %class.QUBIT* %qt) + ret void +} + +define void @__quantum__qis__h__(%Qubit* %.q) { + %q = bitcast %Qubit* %.q to %class.QUBIT* + call void @quantum__qis__h(%class.QUBIT* %q) + ret void +} + +define %Result* @__quantum__qis__measure(%Array* %.paulis, %Array* %.qubits) { + %paulis = bitcast %Array* %.paulis to %struct.QirArray* + %qubits = bitcast %Array* %.qubits to %struct.QirArray* + %r = call %class.RESULT* @quantum__qis__measure(%struct.QirArray* %paulis, %struct.QirArray* %qubits) + %.r = bitcast %class.RESULT* %r to %Result* + ret %Result* %.r +} + +define %Result* @__quantum__qis__mz(%Qubit* %.q) { + %q = bitcast %Qubit* %.q to %class.QUBIT* + %r = call %class.RESULT* @quantum__qis__mz(%class.QUBIT* %q) + %.r = bitcast %class.RESULT* %r to %Result* + ret %Result* %.r +} + +define void @__quantum__qis__s__(%Qubit* %.q) { + %q = bitcast %Qubit* %.q to %class.QUBIT* + call void @quantum__qis__s(%class.QUBIT* %q) + ret void +} + +define void @__quantum__qis__t__(%Qubit* %.q) { + %q = bitcast %Qubit* %.q to %class.QUBIT* + call void @quantum__qis__t(%class.QUBIT* %q) + ret void +} + +define void @__quantum__qis__rx__(double %.theta, %Qubit* %.q) { + %q = bitcast %Qubit* %.q to %class.QUBIT* + call void @quantum__qis__rx(double %.theta, %class.QUBIT* %q) + ret void +} + +define void @__quantum__qis__ry__(double %.theta, %Qubit* %.q) { + %q = bitcast %Qubit* %.q to %class.QUBIT* + call void @quantum__qis__ry(double %.theta, %class.QUBIT* %q) + ret void +} + +define void @__quantum__qis__rz__(double %.theta, %Qubit* %.q) { + %q = bitcast %Qubit* %.q to %class.QUBIT* + call void @quantum__qis__rz(double %.theta, %class.QUBIT* %q) + ret void +} + +define void @__quantum__qis__x__(%Qubit* %.q) { + %q = bitcast %Qubit* %.q to %class.QUBIT* + call void @quantum__qis__x(%class.QUBIT* %q) + ret void +} + +define void @__quantum__qis__y__(%Qubit* %.q) { + %q = bitcast %Qubit* %.q to %class.QUBIT* + call void @quantum__qis__y(%class.QUBIT* %q) + ret void +} + +define void @__quantum__qis__z__(%Qubit* %.q) { + %q = bitcast %Qubit* %.q to %class.QUBIT* + call void @quantum__qis__z(%class.QUBIT* %q) + ret void +} + + +define void @__quantum__qis__crx__(%Array* %.ctls, double %.theta, %Qubit* %.q) { + %q = bitcast %Qubit* %.q to %class.QUBIT* + %ctls = bitcast %Array* %.ctls to %struct.QirArray* + call void @quantum__qis__crx(%struct.QirArray* %ctls, double %.theta, %class.QUBIT* %q) + ret void +} + +define void @__quantum__qis__crz__(%Array* %.ctls, double %.theta, %Qubit* %.q) { + %q = bitcast %Qubit* %.q to %class.QUBIT* + %ctls = bitcast %Array* %.ctls to %struct.QirArray* + call void @quantum__qis__crz(%struct.QirArray* %ctls, double %.theta, %class.QUBIT* %q) + ret void +} diff --git a/src/QirRuntime/lib/Tracer/tracer-qir.cpp b/src/QirRuntime/lib/Tracer/tracer-rt.cpp similarity index 100% rename from src/QirRuntime/lib/Tracer/tracer-qir.cpp rename to src/QirRuntime/lib/Tracer/tracer-rt.cpp diff --git a/src/QirRuntime/lib/Tracer/tracer-trc.cpp b/src/QirRuntime/lib/Tracer/tracer-trc.cpp new file mode 100644 index 00000000000..60b2541757e --- /dev/null +++ b/src/QirRuntime/lib/Tracer/tracer-trc.cpp @@ -0,0 +1,106 @@ +// Copyright (c) // NOLINT{} Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +#include + +#include "CoreTypes.hpp" +#include "qirTypes.hpp" +#include "tracer.hpp" + +extern thread_local std::shared_ptr tracer; + +extern "C" +{ + void quantum__trc__on_operation_start(int64_t id) // NOLINT + { + } + void quantum__trc__on_operation_end(int64_t id) // NOLINT + { + } + + void quantum__trc__hadamard(Qubit target) // NOLINT + { + } + void quantum__trc__swap(Qubit q1, Qubit q2) // NOLINT + { + } + + void quantum__trc__single_qubit_op_0(int32_t duration, Qubit target) // NOLINT + { + tracer->TraceSingleQubitOp<0>(duration, target); // NOLINT + } + void quantum__trc__single_qubit_op_1(int32_t duration, Qubit target) // NOLINT + { + } + void quantum__trc__single_qubit_op_2(int32_t duration, Qubit target) // NOLINT + { + } + void quantum__trc__single_qubit_op_3(int32_t duration, Qubit target) // NOLINT + { + } + void quantum__trc__single_qubit_op_4(int32_t duration, Qubit target) // NOLINT + { + } + void quantum__trc__single_qubit_op_5(int32_t duration, Qubit target) // NOLINT + { + } + void quantum__trc__single_qubit_op_6(int32_t duration, Qubit target) // NOLINT + { + } + void quantum__trc__single_qubit_op_7(int32_t duration, Qubit target) // NOLINT + { + } + void quantum__trc__single_qubit_op_8(int32_t duration, Qubit target) // NOLINT + { + } + void quantum__trc__single_qubit_op_9(int32_t duration, Qubit target) // NOLINT + { + } + void quantum__trc__single_qubit_op_10(int32_t duration, Qubit target) // NOLINT + { + } + void quantum__trc__single_qubit_op_11(int32_t duration, Qubit target) // NOLINT + { + } + + void quantum__trc__single_qubit_op_ctl_0(int32_t duration, QirArray* controls, Qubit* target) // NOLINT + { + } + void quantum__trc__single_qubit_op_ctl_1(int32_t duration, QirArray* controls, Qubit* target) // NOLINT + { + } + void quantum__trc__single_qubit_op_ctl_2(int32_t duration, QirArray* controls, Qubit* target) // NOLINT + { + } + void quantum__trc__single_qubit_op_ctl_3(int32_t duration, QirArray* controls, Qubit* target) // NOLINT + { + } + void quantum__trc__single_qubit_op_ctl_4(int32_t duration, QirArray* controls, Qubit* target) // NOLINT + { + } + void quantum__trc__single_qubit_op_ctl_5(int32_t duration, QirArray* controls, Qubit* target) // NOLINT + { + } + void quantum__trc__single_qubit_op_ctl_6(int32_t duration, QirArray* controls, Qubit* target) // NOLINT + { + } + void quantum__trc__single_qubit_op_ctl_7(int32_t duration, QirArray* controls, Qubit* target) // NOLINT + { + } + void quantum__trc__single_qubit_op_ctl_8(int32_t duration, QirArray* controls, Qubit* target) // NOLINT + { + } + void quantum__trc__single_qubit_op_ctl_9(int32_t duration, QirArray* controls, Qubit* target) // NOLINT + { + } + void quantum__trc__single_qubit_op_ctl_10(int32_t duration, QirArray* controls, Qubit* target) // NOLINT + { + } + void quantum__trc__single_qubit_op_ctl_11(int32_t duration, QirArray* controls, Qubit* target) // NOLINT + { + } + + void quantum__trc__global_barrier(const char* name) // NOLINT + { + } +} \ No newline at end of file diff --git a/src/QirRuntime/lib/Tracer/tracer.cpp b/src/QirRuntime/lib/Tracer/tracer.cpp index dad2a222fb2..e620e0e7a49 100644 --- a/src/QirRuntime/lib/Tracer/tracer.cpp +++ b/src/QirRuntime/lib/Tracer/tracer.cpp @@ -5,3 +5,7 @@ #include "tracer.hpp" +void InitializeTracer() +{ + tracer = std::make_shared(); +} \ No newline at end of file diff --git a/src/QirRuntime/lib/Tracer/tracer.hpp b/src/QirRuntime/lib/Tracer/tracer.hpp index bc0b79f958b..496483f0fbd 100644 --- a/src/QirRuntime/lib/Tracer/tracer.hpp +++ b/src/QirRuntime/lib/Tracer/tracer.hpp @@ -1,6 +1,8 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. +#include + #include "CoreTypes.hpp" // The tracer does _not_ implement ISimulator interface by design to avoid virtual calls and enable as many compiler @@ -13,4 +15,12 @@ class CTracer public: Qubit AllocateQubit(); void ReleaseQubit(Qubit q); -}; \ No newline at end of file + + template void TraceSingleQubitOp(int32_t duration, Qubit target) + { + // figure out the layering, etc. + } +}; + +thread_local std::shared_ptr tracer = nullptr; +void InitializeTracer(); \ No newline at end of file From 4979b52e6db0f129ec9133c60f620bc2da38ff30 Mon Sep 17 00:00:00 2001 From: Irina Yatsenko Date: Tue, 5 Jan 2021 12:05:43 -0800 Subject: [PATCH 03/27] Bucketing target.qs and the corresponding bridge --- src/QirRuntime/lib/Tracer/CMakeLists.txt | 21 +- src/QirRuntime/lib/Tracer/bridge-trc.ll | 119 ++------- src/QirRuntime/lib/Tracer/tracer-rt.cpp | 52 ---- src/QirRuntime/lib/Tracer/tracer-trc.cpp | 88 ++---- src/QirRuntime/lib/Tracer/tracer.cpp | 19 +- src/QirRuntime/lib/Tracer/tracer.hpp | 96 ++++++- src/QirRuntime/test/QIR-tracer/CMakeLists.txt | 54 +--- .../test/QIR-tracer/qir-test-intrinsics.ll | 184 +++++++++++++ .../test/QIR-tracer/qir-test-intrinsics.qs | 70 +++++ .../test/QIR-tracer/qir-test-trace.ll | 15 -- .../test/QIR-tracer/qir-test-trace.qs | 19 -- .../test/QIR-tracer/qir-tracer-core.qs | 11 +- .../test/QIR-tracer/qir-tracer-driver.cpp | 12 +- .../test/QIR-tracer/qir-tracer-target.qs | 252 ++++++++++++------ 14 files changed, 590 insertions(+), 422 deletions(-) delete mode 100644 src/QirRuntime/lib/Tracer/tracer-rt.cpp create mode 100644 src/QirRuntime/test/QIR-tracer/qir-test-intrinsics.ll create mode 100644 src/QirRuntime/test/QIR-tracer/qir-test-intrinsics.qs delete mode 100644 src/QirRuntime/test/QIR-tracer/qir-test-trace.ll delete mode 100644 src/QirRuntime/test/QIR-tracer/qir-test-trace.qs diff --git a/src/QirRuntime/lib/Tracer/CMakeLists.txt b/src/QirRuntime/lib/Tracer/CMakeLists.txt index 3715343a72f..1426c9cc1aa 100644 --- a/src/QirRuntime/lib/Tracer/CMakeLists.txt +++ b/src/QirRuntime/lib/Tracer/CMakeLists.txt @@ -1,13 +1,11 @@ set(component_name "tracer") +set(bridge_lib "bridge_trc_u") + +compile_from_qir(bridge-trc ${bridge_lib}) # the tracer implements its own management of qubits/results but reuses the rest of the qir-runtime # TODO: consider splitting qir-rt into two libs set(source_files - "../QIR/arrays.cpp" - "../QIR/callables.cpp" - "../QIR/strings.cpp" - "../QIR/utils.cpp" - "tracer-rt.cpp" "tracer-trc.cpp" "tracer.cpp" ) @@ -17,12 +15,13 @@ set(includes "${PROJECT_SOURCE_DIR}/lib/QIR" ) - -#=============================================================================== -# Produce static lib for users to link directly to - add_library(${component_name} STATIC ${source_files}) + target_include_directories(${component_name} PUBLIC ${includes}) -target_link_libraries(${component_name} ${CMAKE_DL_LIBS}) -# The tracer cannot be included into qdk.dll so we don't build an object lib for it +MESSAGE(INFO "*** ${QIR_UTILITY_LIB}") +target_link_libraries(${component_name} + ${QIR_UTILITY_LIB} # absolute path to the bridge library, set by compile_from_ir +) + +add_dependencies(${component_name} ${bridge_lib}) diff --git a/src/QirRuntime/lib/Tracer/bridge-trc.ll b/src/QirRuntime/lib/Tracer/bridge-trc.ll index abaf2210351..725a701979b 100644 --- a/src/QirRuntime/lib/Tracer/bridge-trc.ll +++ b/src/QirRuntime/lib/Tracer/bridge-trc.ll @@ -5,135 +5,56 @@ ; QIR types ; %Array = type opaque -%Callable = type opaque %Qubit = type opaque -%Range = type { i64, i64, i64 } %Result = type opaque -%String = type opaque -%TupleHeader = type { i32 } -%Pauli = type {i2} + ;======================================================================================================================= ; Native types -; NB: there is no overloading at IR level, so a call/invoke will be made even -; if the definition of the function mismatches the declaration of the arguments. -; It means we could declare here the bridge's C-functions using QIR types -; and avoid bitcasts. However, it seems prudent to be more explicit about -; what's going on and declare the true signatures, as generated by Clang. ; %class.QUBIT = type opaque %class.RESULT = type opaque %struct.QirArray = type opaque -%struct.QirCallable = type opaque -%struct.QirRange = type { i64, i64, i64 } -%struct.QirString = type opaque -%struct.QirTupleHeader = type { i32 } + ;=============================================================================== ; declarations of the native methods this bridge delegates to ; -declare void @quantum__qis__z(%class.QUBIT*) - -declare void @quantum__qis__crx(%struct.QirArray*, double, %class.QUBIT*) -declare void @quantum__qis__crz(%struct.QirArray*, double, %class.QUBIT*) +declare void @quantum__trc__single_qubit_op(i32 %id, i32 %duration, %class.QUBIT*) +declare void @quantum__trc__single_qubit_op_ctl(i32 %id, i32 %duration, %struct.QirArray*, %class.QUBIT*) +declare void @quantum__trc__multi_qubit_op(i32 %id, i32 %duration, %struct.QirArray*) +declare void @quantum__trc__multi_qubit_op_ctl(i32 %id, i32 %duration, %struct.QirArray*, %struct.QirArray*) ;=============================================================================== ; quantum__trc namespace implementations ; -define double @__quantum__qis__intAsDouble(i64 %i) +define void @__quantum__trc__single_qubit_op(i32 %id, i32 %duration, %Qubit* %.q) { - %d = call double @quantum__qis__intAsDouble(i64 %i) - ret double %d -} - -define void @__quantum__qis__cnot__(%Qubit* %.qc, %Qubit* %.qt) { - %qc = bitcast %Qubit* %.qc to %class.QUBIT* - %qt = bitcast %Qubit* %.qt to %class.QUBIT* - call void @quantum__qis__cnot(%class.QUBIT* %qc, %class.QUBIT* %qt) - ret void -} - -define void @__quantum__qis__h__(%Qubit* %.q) { - %q = bitcast %Qubit* %.q to %class.QUBIT* - call void @quantum__qis__h(%class.QUBIT* %q) - ret void -} - -define %Result* @__quantum__qis__measure(%Array* %.paulis, %Array* %.qubits) { - %paulis = bitcast %Array* %.paulis to %struct.QirArray* - %qubits = bitcast %Array* %.qubits to %struct.QirArray* - %r = call %class.RESULT* @quantum__qis__measure(%struct.QirArray* %paulis, %struct.QirArray* %qubits) - %.r = bitcast %class.RESULT* %r to %Result* - ret %Result* %.r -} - -define %Result* @__quantum__qis__mz(%Qubit* %.q) { - %q = bitcast %Qubit* %.q to %class.QUBIT* - %r = call %class.RESULT* @quantum__qis__mz(%class.QUBIT* %q) - %.r = bitcast %class.RESULT* %r to %Result* - ret %Result* %.r -} - -define void @__quantum__qis__s__(%Qubit* %.q) { - %q = bitcast %Qubit* %.q to %class.QUBIT* - call void @quantum__qis__s(%class.QUBIT* %q) - ret void -} - -define void @__quantum__qis__t__(%Qubit* %.q) { - %q = bitcast %Qubit* %.q to %class.QUBIT* - call void @quantum__qis__t(%class.QUBIT* %q) - ret void -} - -define void @__quantum__qis__rx__(double %.theta, %Qubit* %.q) { %q = bitcast %Qubit* %.q to %class.QUBIT* - call void @quantum__qis__rx(double %.theta, %class.QUBIT* %q) + call void @quantum__trc__single_qubit_op(i32 %id, i32 %duration, %class.QUBIT* %q) ret void } -define void @__quantum__qis__ry__(double %.theta, %Qubit* %.q) { - %q = bitcast %Qubit* %.q to %class.QUBIT* - call void @quantum__qis__ry(double %.theta, %class.QUBIT* %q) - ret void -} - -define void @__quantum__qis__rz__(double %.theta, %Qubit* %.q) { - %q = bitcast %Qubit* %.q to %class.QUBIT* - call void @quantum__qis__rz(double %.theta, %class.QUBIT* %q) - ret void -} - -define void @__quantum__qis__x__(%Qubit* %.q) { - %q = bitcast %Qubit* %.q to %class.QUBIT* - call void @quantum__qis__x(%class.QUBIT* %q) - ret void -} - -define void @__quantum__qis__y__(%Qubit* %.q) { - %q = bitcast %Qubit* %.q to %class.QUBIT* - call void @quantum__qis__y(%class.QUBIT* %q) - ret void -} - -define void @__quantum__qis__z__(%Qubit* %.q) { +define void @__quantum__trc__single_qubit_op_ctl(i32 %id, i32 %duration, %Array* %.ctls, %Qubit* %.q) +{ %q = bitcast %Qubit* %.q to %class.QUBIT* - call void @quantum__qis__z(%class.QUBIT* %q) + %ctls = bitcast %Array* %.ctls to %struct.QirArray* + call void @quantum__trc__single_qubit_op_ctl(i32 %id, i32 %duration, %struct.QirArray* %ctls, %class.QUBIT* %q) ret void } - -define void @__quantum__qis__crx__(%Array* %.ctls, double %.theta, %Qubit* %.q) { - %q = bitcast %Qubit* %.q to %class.QUBIT* - %ctls = bitcast %Array* %.ctls to %struct.QirArray* - call void @quantum__qis__crx(%struct.QirArray* %ctls, double %.theta, %class.QUBIT* %q) +define void @__quantum__trc__multi_qubit_op(i32 %id, i32 %duration, %Array* %.qs) +{ + %qs = bitcast %Array* %.qs to %struct.QirArray* + call void @quantum__trc__multi_qubit_op(i32 %id, i32 %duration, %struct.QirArray* %qs) ret void } -define void @__quantum__qis__crz__(%Array* %.ctls, double %.theta, %Qubit* %.q) { - %q = bitcast %Qubit* %.q to %class.QUBIT* +define void @__quantum__trc__multi_qubit_op_ctl(i32 %id, i32 %duration, %Array* %.ctls, %Array* %.qs) +{ %ctls = bitcast %Array* %.ctls to %struct.QirArray* - call void @quantum__qis__crz(%struct.QirArray* %ctls, double %.theta, %class.QUBIT* %q) + %qs = bitcast %Array* %.qs to %struct.QirArray* + call void @quantum__trc__multi_qubit_op_ctl(i32 %id, i32 %duration, %struct.QirArray* %ctls, %struct.QirArray* %qs) ret void } diff --git a/src/QirRuntime/lib/Tracer/tracer-rt.cpp b/src/QirRuntime/lib/Tracer/tracer-rt.cpp deleted file mode 100644 index a5df9526b31..00000000000 --- a/src/QirRuntime/lib/Tracer/tracer-rt.cpp +++ /dev/null @@ -1,52 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -#include - -#include "tracer.hpp" -#include "qirTypes.hpp" - -extern "C" -{ - Result UseZero() - { - return reinterpret_cast(0); - } - - Result UseOne() - { - return reinterpret_cast(1); - } - - QUBIT* quantum__rt__qubit_allocate() // NOLINT - { - return nullptr; - } - - void quantum__rt__qubit_release(QUBIT* qubit) // NOLINT - { - } - - void quantum__rt__result_reference(RESULT* r) // NOLINT - { - } - - void quantum__rt__result_unreference(RESULT* r) // NOLINT - { - } - - bool quantum__rt__result_equal(RESULT* r1, RESULT* r2) // NOLINT - { - return false; - } - - QirString* quantum__rt__result_to_string(RESULT* result) // NOLINT - { - return nullptr; - } - - QirString* quantum__rt__qubit_to_string(QUBIT* qubit) // NOLINT - { - return nullptr; - } -} \ No newline at end of file diff --git a/src/QirRuntime/lib/Tracer/tracer-trc.cpp b/src/QirRuntime/lib/Tracer/tracer-trc.cpp index 60b2541757e..9df5f97d85c 100644 --- a/src/QirRuntime/lib/Tracer/tracer-trc.cpp +++ b/src/QirRuntime/lib/Tracer/tracer-trc.cpp @@ -7,8 +7,15 @@ #include "qirTypes.hpp" #include "tracer.hpp" -extern thread_local std::shared_ptr tracer; +namespace Microsoft +{ +namespace Quantum +{ + extern thread_local std::shared_ptr tracer; +} +} // namespace Microsoft +using namespace Microsoft::Quantum; extern "C" { void quantum__trc__on_operation_start(int64_t id) // NOLINT @@ -18,89 +25,28 @@ extern "C" { } - void quantum__trc__hadamard(Qubit target) // NOLINT - { - } void quantum__trc__swap(Qubit q1, Qubit q2) // NOLINT { } - void quantum__trc__single_qubit_op_0(int32_t duration, Qubit target) // NOLINT - { - tracer->TraceSingleQubitOp<0>(duration, target); // NOLINT - } - void quantum__trc__single_qubit_op_1(int32_t duration, Qubit target) // NOLINT - { - } - void quantum__trc__single_qubit_op_2(int32_t duration, Qubit target) // NOLINT - { - } - void quantum__trc__single_qubit_op_3(int32_t duration, Qubit target) // NOLINT - { - } - void quantum__trc__single_qubit_op_4(int32_t duration, Qubit target) // NOLINT - { - } - void quantum__trc__single_qubit_op_5(int32_t duration, Qubit target) // NOLINT - { - } - void quantum__trc__single_qubit_op_6(int32_t duration, Qubit target) // NOLINT - { - } - void quantum__trc__single_qubit_op_7(int32_t duration, Qubit target) // NOLINT - { - } - void quantum__trc__single_qubit_op_8(int32_t duration, Qubit target) // NOLINT - { - } - void quantum__trc__single_qubit_op_9(int32_t duration, Qubit target) // NOLINT - { - } - void quantum__trc__single_qubit_op_10(int32_t duration, Qubit target) // NOLINT - { - } - void quantum__trc__single_qubit_op_11(int32_t duration, Qubit target) // NOLINT + void quantum__trc__global_barrier(const char* name) // NOLINT { } - void quantum__trc__single_qubit_op_ctl_0(int32_t duration, QirArray* controls, Qubit* target) // NOLINT - { - } - void quantum__trc__single_qubit_op_ctl_1(int32_t duration, QirArray* controls, Qubit* target) // NOLINT - { - } - void quantum__trc__single_qubit_op_ctl_2(int32_t duration, QirArray* controls, Qubit* target) // NOLINT - { - } - void quantum__trc__single_qubit_op_ctl_3(int32_t duration, QirArray* controls, Qubit* target) // NOLINT + void quantum__trc__single_qubit_op(int32_t id, int32_t duration, Qubit target) // NOLINT { + tracer->TraceSingleQubitOp(id, duration, reinterpret_cast(target)); // NOLINT } - void quantum__trc__single_qubit_op_ctl_4(int32_t duration, QirArray* controls, Qubit* target) // NOLINT + void quantum__trc__multi_qubit_op(int32_t id, int32_t duration, QirArray* targets) // NOLINT { + // TBD } - void quantum__trc__single_qubit_op_ctl_5(int32_t duration, QirArray* controls, Qubit* target) // NOLINT + void quantum__trc__single_qubit_op_ctl(int32_t id, int32_t duration, QirArray* controls, Qubit* target) // NOLINT { + // TBD } - void quantum__trc__single_qubit_op_ctl_6(int32_t duration, QirArray* controls, Qubit* target) // NOLINT - { - } - void quantum__trc__single_qubit_op_ctl_7(int32_t duration, QirArray* controls, Qubit* target) // NOLINT - { - } - void quantum__trc__single_qubit_op_ctl_8(int32_t duration, QirArray* controls, Qubit* target) // NOLINT - { - } - void quantum__trc__single_qubit_op_ctl_9(int32_t duration, QirArray* controls, Qubit* target) // NOLINT - { - } - void quantum__trc__single_qubit_op_ctl_10(int32_t duration, QirArray* controls, Qubit* target) // NOLINT - { - } - void quantum__trc__single_qubit_op_ctl_11(int32_t duration, QirArray* controls, Qubit* target) // NOLINT - { - } - - void quantum__trc__global_barrier(const char* name) // NOLINT + void quantum__trc__multi_qubit_op_ctl(int32_t id, int32_t duration, QirArray* controls, QirArray* targets) // NOLINT { + // TBD } } \ No newline at end of file diff --git a/src/QirRuntime/lib/Tracer/tracer.cpp b/src/QirRuntime/lib/Tracer/tracer.cpp index e620e0e7a49..14b8803b3b6 100644 --- a/src/QirRuntime/lib/Tracer/tracer.cpp +++ b/src/QirRuntime/lib/Tracer/tracer.cpp @@ -5,7 +5,20 @@ #include "tracer.hpp" -void InitializeTracer() +namespace Microsoft { - tracer = std::make_shared(); -} \ No newline at end of file +namespace Quantum +{ + thread_local std::shared_ptr tracer = nullptr; + std::shared_ptr CreateTracer() + { + tracer = std::make_shared(); + return tracer; + } + + std::unique_ptr CreateFullstateSimulator() + { + throw std::logic_error("Tracer should not instantiate full state simulator"); + } +} // namespace Quantum +} // namespace Microsoft \ No newline at end of file diff --git a/src/QirRuntime/lib/Tracer/tracer.hpp b/src/QirRuntime/lib/Tracer/tracer.hpp index 496483f0fbd..2d914b5292d 100644 --- a/src/QirRuntime/lib/Tracer/tracer.hpp +++ b/src/QirRuntime/lib/Tracer/tracer.hpp @@ -4,23 +4,91 @@ #include #include "CoreTypes.hpp" +#include "QuantumApi_I.hpp" -// The tracer does _not_ implement ISimulator interface by design to avoid virtual calls and enable as many compiler -// optimizations (inlining, etc.) as possible. -class CTracer +namespace Microsoft { - // Start with no reuse of qubits. - long lastQubitId = -1; +namespace Quantum +{ + /*====================================================================================================================== + TracedQubit + ======================================================================================================================*/ + struct TracedQubit + { + static const long INVALID = -1; + + long id = INVALID; - public: - Qubit AllocateQubit(); - void ReleaseQubit(Qubit q); + // Last layer the qubit was used in, `INVALID` means the qubit haven't been used yet in any operations of + // non-zero duration. + int layer = INVALID; + }; - template void TraceSingleQubitOp(int32_t duration, Qubit target) + /*====================================================================================================================== + The tracer implements resource estimation. See readme in this folder for details. + ======================================================================================================================*/ + class CTracer : public ISimulator { - // figure out the layering, etc. - } -}; + // Start with no reuse of qubits. + long lastQubitId = -1; + + public: + IQuantumGateSet* AsQuantumGateSet() override + { + return nullptr; + } + IDiagnostics* AsDiagnostics() override + { + return nullptr; + } + Qubit AllocateQubit() override + { + return reinterpret_cast(++lastQubitId); + } + void ReleaseQubit(Qubit qubit) override + { + // nothing for now + } + std::string QubitToString(Qubit qubit) override + { + throw std::logic_error("not_implemented"); + } + Result M(Qubit target) override + { + throw std::logic_error("not_implemented"); + } + Result Measure(long numBases, PauliId bases[], long numTargets, Qubit targets[]) override + { + throw std::logic_error("not_implemented"); + } + void ReleaseResult(Result result) override + { + throw std::logic_error("not_implemented"); + } + bool AreEqualResults(Result r1, Result r2) override + { + throw std::logic_error("not_implemented"); + } + ResultValue GetResultValue(Result result) override + { + throw std::logic_error("not_implemented"); + } + Result UseZero() override + { + return reinterpret_cast(0); + } + Result UseOne() override + { + return reinterpret_cast(1); + } + + void TraceSingleQubitOp(int32_t id, int32_t duration, TracedQubit* target) + { + // figure out the layering, etc. + } + }; + + std::shared_ptr CreateTracer(); -thread_local std::shared_ptr tracer = nullptr; -void InitializeTracer(); \ No newline at end of file +} // namespace Quantum +} // namespace Microsoft \ No newline at end of file diff --git a/src/QirRuntime/test/QIR-tracer/CMakeLists.txt b/src/QirRuntime/test/QIR-tracer/CMakeLists.txt index 6fc1f0f9d71..852dc3374b6 100644 --- a/src/QirRuntime/test/QIR-tracer/CMakeLists.txt +++ b/src/QirRuntime/test/QIR-tracer/CMakeLists.txt @@ -1,46 +1,10 @@ -# compile test ll files into a library, have to use custom commands for this -set(CLANG_ARGS "-c") -if (CMAKE_BUILD_TYPE STREQUAL "Debug") - set(CLANG_ARGS - "${CLANG_ARGS}" - "-O0" - "-D_DEBUG" - ) -endif() - set(TEST_FILES - qir-test-trace + qir-test-intrinsics ) foreach(file ${TEST_FILES}) - set(INFILE "${CMAKE_CURRENT_SOURCE_DIR}/${file}.ll") - set(OBJFILE "${CMAKE_CURRENT_BINARY_DIR}/${file}.obj") - - set(QIR_SINGLE_FILE_COMPILE_ACTION "qir_single_file_compile_action_${file}") - add_custom_command(OUTPUT ${QIR_SINGLE_FILE_COMPILE_ACTION} - COMMAND ${CMAKE_CXX_COMPILER} - ARGS ${CLANG_ARGS} ${INFILE} "-o" ${OBJFILE} - DEPENDS ${INFILE} - COMMENT "Compiling qir test file ${file}" - VERBATIM) - - set(QIR_SINGLE_FILE_COMPILE "qir_single_file_compile_${file}") - add_custom_target(${QIR_SINGLE_FILE_COMPILE} DEPENDS ${QIR_SINGLE_FILE_COMPILE_ACTION}) - - if (WIN32) - set(QIR_TESTS_LIB "${CMAKE_CURRENT_BINARY_DIR}/${file}.lib") - else() - set(QIR_TESTS_LIB "${CMAKE_CURRENT_BINARY_DIR}/libqir_${file}.a") - endif() - list(APPEND QIR_TESTS_LIBS ${QIR_TESTS_LIB}) - - add_custom_command(OUTPUT ${QIR_TESTS_LIB} - COMMAND ${CMAKE_AR} - ARGS "rc" ${QIR_TESTS_LIB} ${OBJFILE} - DEPENDS ${QIR_SINGLE_FILE_COMPILE} ${INFILE} - COMMENT "Adding QIR tests ${file} into a lib" - VERBATIM) - + compile_from_qir(${file} "") # don't create a target per file + list(APPEND QIR_TESTS_LIBS ${QIR_UTILITY_LIB}) endforeach() add_custom_target(qir_tracer_test_lib DEPENDS ${QIR_TESTS_LIBS}) @@ -51,27 +15,19 @@ add_custom_target(qir_tracer_test_lib DEPENDS ${QIR_TESTS_LIBS}) add_executable(qir-tracer-tests qir-tracer-driver.cpp) -if (WIN32) - set(QIR_BRIDGE_UTILITY_LIB "${PROJECT_BINARY_DIR}/lib/QIR/qir-bridge-u.lib") -# set(QIR_BRIDGE_QIS_UTILITY_LIB "${PROJECT_BINARY_DIR}/lib/QIR/qir-bridge-qis-u.lib") -else() - set(QIR_BRIDGE_UTILITY_LIB "${PROJECT_BINARY_DIR}/lib/QIR/libqir-bridge-u.a") -# set(QIR_BRIDGE_QIS_UTILITY_LIB "${PROJECT_BINARY_DIR}/lib/QIR/libqir-bridge-qis-u.a") -endif() - target_link_libraries(qir-tracer-tests PUBLIC ${QIR_TESTS_LIBS} ${QIR_BRIDGE_UTILITY_LIB} tracer + qir-rt-support ) target_include_directories(qir-tracer-tests PUBLIC "${test_includes}" "${public_includes}" - "${PROJECT_SOURCE_DIR}/lib/QIR" + "${PROJECT_SOURCE_DIR}/lib/Tracer" ) add_dependencies(qir-tracer-tests qir_tracer_test_lib) install(TARGETS qir-tracer-tests RUNTIME DESTINATION "${CMAKE_BINARY_DIR}/bin") add_unit_test(qir-tracer-tests) - diff --git a/src/QirRuntime/test/QIR-tracer/qir-test-intrinsics.ll b/src/QirRuntime/test/QIR-tracer/qir-test-intrinsics.ll new file mode 100644 index 00000000000..fa5023fb43f --- /dev/null +++ b/src/QirRuntime/test/QIR-tracer/qir-test-intrinsics.ll @@ -0,0 +1,184 @@ + +%Result = type opaque +%Range = type { i64, i64, i64 } +%Qubit = type opaque +%Array = type opaque +%Tuple = type opaque + +@ResultZero = external global %Result* +@ResultOne = external global %Result* +@PauliI = constant i2 0 +@PauliX = constant i2 1 +@PauliY = constant i2 -1 +@PauliZ = constant i2 -2 +@EmptyRange = internal constant %Range { i64 0, i64 1, i64 -1 } + +@Microsoft_Quantum_Testing_Tracer_AllIntrinsics = alias i1 (), i1 ()* @Microsoft__Quantum__Testing__Tracer__AllIntrinsics__body + +define void @Microsoft__Quantum__Intrinsic__X__body(%Qubit* %qb) { +entry: + call void @__quantum__trc__single_qubit_op(i64 0, i64 1, %Qubit* %qb) + ret void +} + +declare void @__quantum__trc__single_qubit_op(i64, i64, %Qubit*) + +define void @Microsoft__Quantum__Intrinsic__X__adj(%Qubit* %qb) { +entry: + call void @__quantum__trc__single_qubit_op(i64 0, i64 1, %Qubit* %qb) + ret void +} + +define void @Microsoft__Quantum__Intrinsic__X__ctl(%Array* %ctls, %Qubit* %qb) { +entry: + %0 = call i64 @__quantum__rt__array_get_length(%Array* %ctls, i32 0) + %1 = icmp eq i64 %0, 1 + br i1 %1, label %then0__1, label %else__1 + +then0__1: ; preds = %entry + call void @__quantum__trc__single_qubit_op_ctl(i64 1, i64 1, %Array* %ctls, %Qubit* %qb) + br label %continue__1 + +else__1: ; preds = %entry + call void @__quantum__trc__single_qubit_op_ctl(i64 2, i64 1, %Array* %ctls, %Qubit* %qb) + br label %continue__1 + +continue__1: ; preds = %else__1, %then0__1 + ret void +} + +declare i64 @__quantum__rt__array_get_length(%Array*, i32) + +declare void @__quantum__trc__single_qubit_op_ctl(i64, i64, %Array*, %Qubit*) + +define void @Microsoft__Quantum__Intrinsic__X__ctladj(%Array* %ctls, %Qubit* %qb) { +entry: + %0 = call i64 @__quantum__rt__array_get_length(%Array* %ctls, i32 0) + %1 = icmp eq i64 %0, 1 + br i1 %1, label %then0__1, label %else__1 + +then0__1: ; preds = %entry + call void @__quantum__trc__single_qubit_op_ctl(i64 1, i64 1, %Array* %ctls, %Qubit* %qb) + br label %continue__1 + +else__1: ; preds = %entry + call void @__quantum__trc__single_qubit_op_ctl(i64 2, i64 1, %Array* %ctls, %Qubit* %qb) + br label %continue__1 + +continue__1: ; preds = %else__1, %then0__1 + ret void +} + +define i1 @Microsoft__Quantum__Testing__Tracer__AllIntrinsics__body() #0 { +entry: + %res = alloca i1 + store i1 false, i1* %res + %qs = call %Array* @__quantum__rt__qubit_allocate_array(i64 3) + %0 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 0) + %1 = bitcast i8* %0 to %Qubit** + %.qb = load %Qubit*, %Qubit** %1 + call void @__quantum__trc__single_qubit_op(i64 0, i64 1, %Qubit* %.qb) + %2 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 0) + %3 = bitcast i8* %2 to %Qubit** + %.qb1 = load %Qubit*, %Qubit** %3 + call void @__quantum__trc__single_qubit_op(i64 0, i64 1, %Qubit* %.qb1) + %c = call %Qubit* @__quantum__rt__qubit_allocate() + %4 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %5 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %4, i64 0) + %6 = bitcast i8* %5 to %Qubit** + store %Qubit* %c, %Qubit** %6 + %7 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 0) + %8 = bitcast i8* %7 to %Qubit** + %9 = load %Qubit*, %Qubit** %8 + %10 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %11 = bitcast %Tuple* %10 to { %Array*, %Qubit* }* + %12 = getelementptr { %Array*, %Qubit* }, { %Array*, %Qubit* }* %11, i64 0, i32 0 + %13 = getelementptr { %Array*, %Qubit* }, { %Array*, %Qubit* }* %11, i64 0, i32 1 + store %Array* %4, %Array** %12 + call void @__quantum__rt__array_reference(%Array* %4) + store %Qubit* %9, %Qubit** %13 + %14 = getelementptr { %Array*, %Qubit* }, { %Array*, %Qubit* }* %11, i64 0, i32 0 + %.ctls = load %Array*, %Array** %14 + %15 = getelementptr { %Array*, %Qubit* }, { %Array*, %Qubit* }* %11, i64 0, i32 1 + %.qb2 = load %Qubit*, %Qubit** %15 + %16 = call i64 @__quantum__rt__array_get_length(%Array* %.ctls, i32 0) + %17 = icmp eq i64 %16, 1 + br i1 %17, label %then0__1, label %else__1 + +then0__1: ; preds = %entry + call void @__quantum__trc__single_qubit_op_ctl(i64 1, i64 1, %Array* %.ctls, %Qubit* %.qb2) + br label %continue__1 + +else__1: ; preds = %entry + call void @__quantum__trc__single_qubit_op_ctl(i64 2, i64 1, %Array* %.ctls, %Qubit* %.qb2) + br label %continue__1 + +continue__1: ; preds = %else__1, %then0__1 + call void @__quantum__rt__qubit_release(%Qubit* %c) +; call void @__quantum__rt__array_unreference(%Array* %4) + %18 = getelementptr { %Array*, %Qubit* }, { %Array*, %Qubit* }* %11, i64 0, i32 0 + %19 = load %Array*, %Array** %18 +; call void @__quantum__rt__array_unreference(%Array* %19) + %20 = bitcast { %Array*, %Qubit* }* %11 to %Tuple* +; call void @__quantum__rt__tuple_unreference(%Tuple* %20) + %cc = call %Array* @__quantum__rt__qubit_allocate_array(i64 2) + %21 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 0) + %22 = bitcast i8* %21 to %Qubit** + %23 = load %Qubit*, %Qubit** %22 + %24 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %25 = bitcast %Tuple* %24 to { %Array*, %Qubit* }* + %26 = getelementptr { %Array*, %Qubit* }, { %Array*, %Qubit* }* %25, i64 0, i32 0 + %27 = getelementptr { %Array*, %Qubit* }, { %Array*, %Qubit* }* %25, i64 0, i32 1 + store %Array* %cc, %Array** %26 + call void @__quantum__rt__array_reference(%Array* %cc) + store %Qubit* %23, %Qubit** %27 + %28 = getelementptr { %Array*, %Qubit* }, { %Array*, %Qubit* }* %25, i64 0, i32 0 + %.ctls3 = load %Array*, %Array** %28 + %29 = getelementptr { %Array*, %Qubit* }, { %Array*, %Qubit* }* %25, i64 0, i32 1 + %.qb4 = load %Qubit*, %Qubit** %29 + %30 = call i64 @__quantum__rt__array_get_length(%Array* %.ctls3, i32 0) + %31 = icmp eq i64 %30, 1 + br i1 %31, label %then0__2, label %else__2 + +then0__2: ; preds = %continue__1 + call void @__quantum__trc__single_qubit_op_ctl(i64 1, i64 1, %Array* %.ctls3, %Qubit* %.qb4) + br label %continue__2 + +else__2: ; preds = %continue__1 + call void @__quantum__trc__single_qubit_op_ctl(i64 2, i64 1, %Array* %.ctls3, %Qubit* %.qb4) + br label %continue__2 + +continue__2: ; preds = %else__2, %then0__2 + call void @__quantum__rt__qubit_release_array(%Array* %cc) +; call void @__quantum__rt__array_unreference(%Array* %cc) + %32 = getelementptr { %Array*, %Qubit* }, { %Array*, %Qubit* }* %25, i64 0, i32 0 + %33 = load %Array*, %Array** %32 +; call void @__quantum__rt__array_unreference(%Array* %33) + %34 = bitcast { %Array*, %Qubit* }* %25 to %Tuple* +; call void @__quantum__rt__tuple_unreference(%Tuple* %34) +; call void @__quantum__rt__qubit_release_array(%Array* %qs) +; call void @__quantum__rt__array_unreference(%Array* %qs) + ret i1 true +} + +declare %Qubit* @__quantum__rt__qubit_allocate() + +declare %Array* @__quantum__rt__qubit_allocate_array(i64) + +declare i8* @__quantum__rt__array_get_element_ptr_1d(%Array*, i64) + +declare %Array* @__quantum__rt__array_create_1d(i32, i64) + +declare %Tuple* @__quantum__rt__tuple_create(i64) + +declare void @__quantum__rt__array_reference(%Array*) + +declare void @__quantum__rt__qubit_release(%Qubit*) + +declare void @__quantum__rt__array_unreference(%Array*) + +declare void @__quantum__rt__tuple_unreference(%Tuple*) + +declare void @__quantum__rt__qubit_release_array(%Array*) + +attributes #0 = { "EntryPoint" } diff --git a/src/QirRuntime/test/QIR-tracer/qir-test-intrinsics.qs b/src/QirRuntime/test/QIR-tracer/qir-test-intrinsics.qs new file mode 100644 index 00000000000..9491ea765bf --- /dev/null +++ b/src/QirRuntime/test/QIR-tracer/qir-test-intrinsics.qs @@ -0,0 +1,70 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +namespace Microsoft.Quantum.Testing.Tracer +{ + open Microsoft.Quantum.Intrinsic; + + @EntryPoint() + operation AllIntrinsics() : Bool + { + mutable res = false; + using (qs = Qubit[3]) + { + X(qs[0]); + // Y(qs[0]); + // Z(qs[1]); + // H(qs[1]); + // CNOT(qs[1], qs[2]); + // Rx(0.3, qs[0]); + // Ry(0.4, qs[1]); + // Rz(0.5, qs[2]); + // //SWAP(qs[0], qs[2]); + // S(qs[1]); + // T(qs[2]); + + Adjoint X(qs[0]); + // Adjoint Y(qs[0]); + // Adjoint Z(qs[1]); + // Adjoint H(qs[1]); + // Adjoint CNOT(qs[1], qs[2]); + // Adjoint Rx(0.3, qs[0]); + // Adjoint Ry(0.4, qs[1]); + // Adjoint Rz(0.5, qs[2]); + // //Adjoint SWAP(qs[0], qs[2]); + // Adjoint S(qs[1]); + // Adjoint T(qs[2]); + + using (c = Qubit()) + { + Controlled X([c], (qs[0])); + // Controlled Y([c], (qs[0])); + // Controlled Z([c], (qs[1])); + // Controlled H([c], (qs[1])); + // Controlled Rx([c], (0.3, qs[0])); + // Controlled Ry([c], (0.4, qs[1])); + // Controlled Rz([c], (0.5, qs[2])); + // //Controlled SWAP([c], (qs[0], qs[2])); + // Controlled S([c], (qs[1])); + // Controlled T([c], (qs[2])); + } + + using (cc = Qubit[2]) + { + Controlled X(cc, (qs[0])); + // Controlled Y(cc, (qs[0])); + // Controlled Z(cc, (qs[1])); + // Controlled H(cc, (qs[1])); + // Controlled Rx(cc, (0.3, qs[0])); + // Controlled Ry(cc, (0.4, qs[1])); + // Controlled Rz(cc, (0.5, qs[2])); + // //Controlled SWAP(cc, (qs[0], qs[2])); + // Controlled S(cc, (qs[1])); + // Controlled T(cc, (qs[2])); + } + + //set res = (M(qs[0]) == Measure([PauliY, PauliX], [qs[1], qs[2]])); + } + return true; + } +} diff --git a/src/QirRuntime/test/QIR-tracer/qir-test-trace.ll b/src/QirRuntime/test/QIR-tracer/qir-test-trace.ll deleted file mode 100644 index 48883cc8040..00000000000 --- a/src/QirRuntime/test/QIR-tracer/qir-test-trace.ll +++ /dev/null @@ -1,15 +0,0 @@ -; Copyright (c) Microsoft Corporation. All rights reserved. -; Licensed under the MIT License. - -%Result = type opaque -%Range = type { i64, i64, i64 } -%TupleHeader = type { i32 } -%Array = type opaque -%Callable = type opaque -%String = type opaque - - -define i32 @To_Be_Generated() { -entry: - ret i32 0 -} diff --git a/src/QirRuntime/test/QIR-tracer/qir-test-trace.qs b/src/QirRuntime/test/QIR-tracer/qir-test-trace.qs deleted file mode 100644 index 252e45ce50a..00000000000 --- a/src/QirRuntime/test/QIR-tracer/qir-test-trace.qs +++ /dev/null @@ -1,19 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -namespace Microsoft.Quantum.Testing.Tracer -{ - open Microsoft.Quantum.Intrinsic; - - @EntryPoint() - operation something() : Union - { - using (qs = Qubit[5]) - { - X(qs[0]); - Rx(qs[1]); - CNOT(qs[1], qs[2]); - - } - } -} diff --git a/src/QirRuntime/test/QIR-tracer/qir-tracer-core.qs b/src/QirRuntime/test/QIR-tracer/qir-tracer-core.qs index c2d244a09f3..de169cab594 100644 --- a/src/QirRuntime/test/QIR-tracer/qir-tracer-core.qs +++ b/src/QirRuntime/test/QIR-tracer/qir-tracer-core.qs @@ -12,10 +12,19 @@ namespace Microsoft.Quantum.Core{ @Attribute() newtype EntryPoint = Unit; + function Length<'T> (array : 'T[]) : Int { body intrinsic; } + + function RangeStart (range : Range) : Int { body intrinsic; } + + function RangeStep (range : Range) : Int { body intrinsic; } + + function RangeEnd (range : Range) : Int { body intrinsic; } + + function RangeReverse (range : Range) : Range { body intrinsic; } } namespace Microsoft.Quantum.Targeting { @Attribute() newtype TargetInstruction = String; -} +} \ No newline at end of file diff --git a/src/QirRuntime/test/QIR-tracer/qir-tracer-driver.cpp b/src/QirRuntime/test/QIR-tracer/qir-tracer-driver.cpp index 92c336f6b0e..687e6dd1a1a 100644 --- a/src/QirRuntime/test/QIR-tracer/qir-tracer-driver.cpp +++ b/src/QirRuntime/test/QIR-tracer/qir-tracer-driver.cpp @@ -6,8 +6,16 @@ #define CATCH_CONFIG_MAIN // This tells Catch to provide a main() - only do this in one cpp file #include "catch.hpp" -extern "C" int To_Be_Generated(); // NOLINT +#include "tracer.hpp" + +using namespace std; +using namespace Microsoft::Quantum; + +extern "C" bool Microsoft__Quantum__Testing__Tracer__AllIntrinsics__body(); // NOLINT TEST_CASE("Test that we are building the new components correctly", "[qir-tracer]") { - REQUIRE(0 == To_Be_Generated()); + shared_ptr tr = CreateTracer(); + SetSimulatorForQIR(tr.get()); + + REQUIRE(Microsoft__Quantum__Testing__Tracer__AllIntrinsics__body()); } diff --git a/src/QirRuntime/test/QIR-tracer/qir-tracer-target.qs b/src/QirRuntime/test/QIR-tracer/qir-tracer-target.qs index 70fb2da71c3..c0d1fe4bec0 100644 --- a/src/QirRuntime/test/QIR-tracer/qir-tracer-target.qs +++ b/src/QirRuntime/test/QIR-tracer/qir-tracer-target.qs @@ -3,133 +3,213 @@ namespace Microsoft.Quantum.Instructions { - operation S (qb : Qubit) : Unit { + open Microsoft.Quantum.Targeting; + + @TargetInstruction("single_qubit_op") + operation single_qubit_op (op_id: Int, duration: Int, qb : Qubit) : Unit { body intrinsic; } - operation Rx (theta : Double, qb : Qubit) : Unit { + @TargetInstruction("multi_qubit_op") + operation multi_qubit_op (op_id: Int, duration: Int, qbs : Qubit[]) : Unit { body intrinsic; } - operation Rz (theta : Double, qb : Qubit) : Unit { + @TargetInstruction("single_qubit_op_ctl") + operation single_qubit_op_ctl (op_id: Int, duration: Int, ctl : Qubit[], qb : Qubit) : Unit { body intrinsic; } -} - -namespace Microsoft.Quantum.Intrinsic { - open Microsoft.Quantum.Targeting; - open Microsoft.Quantum.Instructions as Phys; - - @Inline() - function PI() : Double - { - return 3.14159265357989; - } - - function IntAsDouble(i : Int) : Double { + @TargetInstruction("multi_qubit_op_ctl") + operation multi_qubit_op_ctl (op_id: Int, duration: Int, ctl : Qubit[], qbs : Qubit[]) : Unit { body intrinsic; } +} - operation X(qb : Qubit) : Unit - is Adj { - body intrinsic; - adjoint self; - } - operation Z(qb : Qubit) : Unit - is Adj { - body intrinsic; - adjoint self; - } +namespace Microsoft.Quantum.Intrinsic { - operation H(qb : Qubit) : Unit - is Adj { - body intrinsic; - adjoint self; - } + open Microsoft.Quantum.Core; + open Microsoft.Quantum.Instructions as Phys; - operation T(qb : Qubit) : Unit - is Adj { - body intrinsic; - } + @Inline() + operation X(qb : Qubit) : Unit + is Adj + Ctl { + body (...) { Phys.single_qubit_op(0, 1, qb); } + adjoint (...) { Phys.single_qubit_op(0, 1, qb); } + controlled (ctls, ...) + { + if (Length(ctls) == 1) { Phys.single_qubit_op_ctl(1, 1, ctls, qb); } + else { Phys.single_qubit_op_ctl(2, 1, ctls, qb); } + } + controlled adjoint (ctls, ...) + { + if (Length(ctls) == 1) { Phys.single_qubit_op_ctl(1, 1, ctls, qb); } + else { Phys.single_qubit_op_ctl(2, 1, ctls, qb); } + } + } operation CNOT(control : Qubit, target : Qubit) : Unit is Adj { - body intrinsic; - adjoint self; - } - - @TargetInstruction("mz") - operation M(qb : Qubit) : Result { - body intrinsic; - } - - operation Measure(bases : Pauli[], qubits : Qubit[]) : Result { - body intrinsic; + body (...) { Controlled X([control], target); } + adjoint (...) { Controlled X([control], target); } } - operation MResetZ(qb : Qubit) : Result - { - let res = M(qb); - if (res == One) + @Inline() + operation Y(qb : Qubit) : Unit + is Adj + Ctl{ + body (...) { Phys.single_qubit_op(3, 1, qb); } + adjoint (...) { Phys.single_qubit_op(3, 1, qb); } + controlled (ctls, ...) { - X(qb); + if (Length(ctls) == 1) { Phys.single_qubit_op_ctl(4, 1, ctls, qb); } + else { Phys.single_qubit_op_ctl(5, 1, ctls, qb); } + } + controlled adjoint (ctls, ...) + { + if (Length(ctls) == 1) { Phys.single_qubit_op_ctl(4, 1, ctls, qb); } + else { Phys.single_qubit_op_ctl(5, 1, ctls, qb); } } - return res; } @Inline() - operation S(qb : Qubit) : Unit - is Adj { - body (...) + operation Z(qb : Qubit) : Unit + is Adj + Ctl { + body (...) { Phys.single_qubit_op(6, 1, qb); } + adjoint (...) { Phys.single_qubit_op(6, 1, qb); } + controlled (ctls, ...) { - Phys.S(qb); - } - adjoint (...) + if (Length(ctls) == 1) { Phys.single_qubit_op_ctl(7, 1, ctls, qb); } + else { Phys.single_qubit_op_ctl(8, 1, ctls, qb); } + } + controlled adjoint (ctls, ...) { - Phys.S(qb); - Z(qb); + if (Length(ctls) == 1) { Phys.single_qubit_op_ctl(7, 1, ctls, qb); } + else { Phys.single_qubit_op_ctl(8, 1, ctls, qb); } } - } + } @Inline() - operation Rx(theta : Double, qb : Qubit) : Unit - is Adj { - body (...) + operation H(qb : Qubit) : Unit + is Adj + Ctl { + body (...) { Phys.single_qubit_op(9, 1, qb); } + adjoint (...) { Phys.single_qubit_op(9, 1, qb); } + controlled (ctls, ...) { - Phys.Rx(theta, qb); - } - adjoint (...) + Phys.single_qubit_op_ctl(10, 1, ctls, qb); + } + controlled adjoint (ctls, ...) { - Phys.Rx(-theta, qb); - } - } + Phys.single_qubit_op_ctl(10, 1, ctls, qb); + } + } @Inline() - operation Rz(theta : Double, qb : Qubit) : Unit + operation Tz(qb : Qubit) : Unit is Adj + Ctl { - body (...) + body (...) { Phys.single_qubit_op(11, 1, qb); } + adjoint (...) { Phys.single_qubit_op(11, 1, qb); } + controlled (ctls, ...) { - Phys.Rz(theta, qb); - } - adjoint (...) + Phys.single_qubit_op_ctl(12, 1, ctls, qb); + } + controlled adjoint (ctls, ...) { - Phys.Rz(-theta, qb); - } + Phys.single_qubit_op_ctl(12, 1, ctls, qb); + } + } + + @Inline() + operation Tx(qb : Qubit) : Unit + is Adj + Ctl { + body (...) { Phys.single_qubit_op(13, 1, qb); } + adjoint (...) { Phys.single_qubit_op(13, 1, qb); } controlled (ctls, ...) { - Phys.Rz(theta / 2.0, qb); - CNOT(ctls[0], qb); - Phys.Rz(-theta / 2.0, qb); - CNOT(ctls[0], qb); + Phys.single_qubit_op_ctl(14, 1, ctls, qb); } controlled adjoint (ctls, ...) { - Phys.Rz(-theta / 2.0, qb); - CNOT(ctls[0], qb); - Phys.Rz(theta / 2.0, qb); - CNOT(ctls[0], qb); + Phys.single_qubit_op_ctl(14, 1, ctls, qb); } - } + } + + @Inline() + operation T(qb : Qubit) : Unit + is Adj + Ctl { + body (...) { Tz(qb); } + adjoint (...) { Tz(qb); } + controlled (ctls, ...) { Controlled Tz(ctls, qb); } + controlled adjoint (ctls, ...) { Controlled Adjoint Tz(ctls, qb); } + } + + @Inline() + operation Sz(qb : Qubit) : Unit + is Adj + Ctl { + body (...) { Phys.single_qubit_op(15, 1, qb); } + adjoint (...) { Phys.single_qubit_op(15, 1, qb); } + controlled (ctls, ...) { Phys.single_qubit_op_ctl(16, 1, ctls, qb); } + controlled adjoint (ctls, ...) { Phys.single_qubit_op_ctl(16, 1, ctls, qb); } + } + + @Inline() + operation Sx(qb : Qubit) : Unit + is Adj + Ctl { + body (...) { Phys.single_qubit_op(17, 1, qb); } + adjoint (...) { Phys.single_qubit_op(17, 1, qb); } + controlled (ctls, ...) { Phys.single_qubit_op_ctl(18, 1, ctls, qb); } + controlled adjoint (ctls, ...) { Phys.single_qubit_op_ctl(18, 1, ctls, qb); } + } + + @Inline() + operation S(qb : Qubit) : Unit + is Adj + Ctl { + body (...) { Sz(qb); } + adjoint (...) { Sz(qb); } + controlled (ctls, ...) { Controlled Sz(ctls, qb); } + controlled adjoint (ctls, ...) { Controlled Adjoint Sz(ctls, qb); } + } + + @Inline() + operation Rx(theta : Double, qb : Qubit) : Unit + is Adj + Ctl { + body (...) { Phys.single_qubit_op(19, 1, qb); } + adjoint (...) { Phys.single_qubit_op(19, 1, qb); } + controlled (ctls, ...) { Phys.single_qubit_op_ctl(20, 1, ctls, qb); } + controlled adjoint (ctls, ...) { Phys.single_qubit_op_ctl(20, 1, ctls, qb); } + } + + @Inline() + operation Ry(theta : Double, qb : Qubit) : Unit + is Adj + Ctl { + body (...) { Phys.single_qubit_op(21, 1, qb); } + adjoint (...) { Phys.single_qubit_op(21, 1, qb); } + controlled (ctls, ...) { Phys.single_qubit_op_ctl(22, 1, ctls, qb); } + controlled adjoint (ctls, ...) { Phys.single_qubit_op_ctl(22, 1, ctls, qb); } + } + + @Inline() + operation Rz(theta : Double, qb : Qubit) : Unit + is Adj + Ctl { + body (...) { Phys.single_qubit_op(23, 1, qb); } + adjoint (...) { Phys.single_qubit_op(24, 1, qb); } + controlled (ctls, ...) { Phys.single_qubit_op_ctl(25, 1, ctls, qb); } + controlled adjoint (ctls, ...) { Phys.single_qubit_op_ctl(25, 1, ctls, qb); } + } + + + operation SWAP(a : Qubit, b : Qubit) : Unit + is Adj { + body intrinsic; + adjoint self; + } + + operation M(qb : Qubit) : Result { + body intrinsic; + } + + operation Measure(bases : Pauli[], qubits : Qubit[]) : Result { + body intrinsic; + } + } From b7bfcd53acf7c3512ce30559384f9a170184ad31 Mon Sep 17 00:00:00 2001 From: Irina Yatsenko Date: Fri, 8 Jan 2021 12:15:24 -0800 Subject: [PATCH 04/27] Start implementing layering (non-zero single-qubit ops) --- src/QirRuntime/lib/Tracer/tracer-trc.cpp | 7 +- src/QirRuntime/lib/Tracer/tracer.cpp | 61 +++++++++++- src/QirRuntime/lib/Tracer/tracer.hpp | 94 ++++++++++++++++--- .../test/QIR-tracer/qir-tracer-driver.cpp | 13 ++- src/QirRuntime/test/unittests/CMakeLists.txt | 2 + src/QirRuntime/test/unittests/TracerTests.cpp | 79 +++++++++++++++- 6 files changed, 234 insertions(+), 22 deletions(-) diff --git a/src/QirRuntime/lib/Tracer/tracer-trc.cpp b/src/QirRuntime/lib/Tracer/tracer-trc.cpp index 9df5f97d85c..dfa7bd63047 100644 --- a/src/QirRuntime/lib/Tracer/tracer-trc.cpp +++ b/src/QirRuntime/lib/Tracer/tracer-trc.cpp @@ -35,15 +35,16 @@ extern "C" void quantum__trc__single_qubit_op(int32_t id, int32_t duration, Qubit target) // NOLINT { - tracer->TraceSingleQubitOp(id, duration, reinterpret_cast(target)); // NOLINT + tracer->TraceSingleQubitOp(id, duration, target); } void quantum__trc__multi_qubit_op(int32_t id, int32_t duration, QirArray* targets) // NOLINT { // TBD } - void quantum__trc__single_qubit_op_ctl(int32_t id, int32_t duration, QirArray* controls, Qubit* target) // NOLINT + void quantum__trc__single_qubit_op_ctl(int32_t id, int32_t duration, QirArray* controls, Qubit target) // NOLINT { - // TBD + tracer->TraceControlledSingleQubitOp( + id, duration, controls->count, reinterpret_cast(controls->buffer), target); } void quantum__trc__multi_qubit_op_ctl(int32_t id, int32_t duration, QirArray* controls, QirArray* targets) // NOLINT { diff --git a/src/QirRuntime/lib/Tracer/tracer.cpp b/src/QirRuntime/lib/Tracer/tracer.cpp index 14b8803b3b6..f65c6895637 100644 --- a/src/QirRuntime/lib/Tracer/tracer.cpp +++ b/src/QirRuntime/lib/Tracer/tracer.cpp @@ -5,6 +5,8 @@ #include "tracer.hpp" +using namespace std; + namespace Microsoft { namespace Quantum @@ -16,9 +18,64 @@ namespace Quantum return tracer; } - std::unique_ptr CreateFullstateSimulator() + void CTracer::TraceSingleQubitOp(int32_t id, int32_t opDuration, Qubit target) + { + QubitState& qstate = this->qubits[reinterpret_cast(target)]; + if (opDuration == 0) + { + // TODO + } + else + { + // Figure out the layer this operation should go into. + int layerToInsertInto = INVALID; + if (qstate.layer != INVALID) + { + Layer& lastUsedIn = this->metricsByLayer[qstate.layer]; + if (qstate.lastUsedTime + opDuration <= lastUsedIn.startTime + lastUsedIn.duration) + { + layerToInsertInto = qstate.layer; + } + else if (opDuration <= this->preferredLayerDuration && qstate.layer + 1 < this->metricsByLayer.size()) + { + layerToInsertInto = qstate.layer + 1; + } + } + else if (opDuration <= this->preferredLayerDuration && !this->metricsByLayer.empty()) + { + // the qubit hasn't been used in any of the layers yet -- add it to the first layer + layerToInsertInto = 0; + } + + if (layerToInsertInto == INVALID) + { + // Create a new layer for the operation. + Time layerStartTime = 0; + if (!this->metricsByLayer.empty()) + { + const Layer& lastLayer = this->metricsByLayer.back(); + layerStartTime = lastLayer.startTime + lastLayer.duration; + } + this->metricsByLayer.push_back(Layer{max(this->preferredLayerDuration, opDuration), layerStartTime}); + layerToInsertInto = static_cast(this->metricsByLayer.size()) - 1; + } + + // Add the operation to the layer. + auto inserted = this->metricsByLayer[layerToInsertInto].operations.insert({id, 1}); + if (!inserted.second) + { + assert(inserted.first->first == id); + inserted.first->second += 1; + } + + // Update the qubit state. + qstate.layer = layerToInsertInto; + Time layerStart = this->metricsByLayer[layerToInsertInto].startTime; + qstate.lastUsedTime = max(layerStart, qstate.lastUsedTime) + opDuration; + } + } + void CTracer::TraceControlledSingleQubitOp(int32_t id, int32_t duration, int64_t nCtrls, Qubit* ctls, Qubit target) { - throw std::logic_error("Tracer should not instantiate full state simulator"); } } // namespace Quantum } // namespace Microsoft \ No newline at end of file diff --git a/src/QirRuntime/lib/Tracer/tracer.hpp b/src/QirRuntime/lib/Tracer/tracer.hpp index 2d914b5292d..af6988c58ce 100644 --- a/src/QirRuntime/lib/Tracer/tracer.hpp +++ b/src/QirRuntime/lib/Tracer/tracer.hpp @@ -2,6 +2,8 @@ // Licensed under the MIT License. #include +#include +#include #include "CoreTypes.hpp" #include "QuantumApi_I.hpp" @@ -10,29 +12,66 @@ namespace Microsoft { namespace Quantum { - /*====================================================================================================================== - TracedQubit - ======================================================================================================================*/ - struct TracedQubit + using OpId = int32_t; + using Time = int32_t; + using Duration = int32_t; + + constexpr long INVALID = -1; + + /*================================================================================================================== + Layer + ==================================================================================================================*/ + struct Layer { - static const long INVALID = -1; + // Width of the layer on the time axis. + const Duration duration; + + // Start time of the layer. + const Time startTime; + + std::unordered_map operations; - long id = INVALID; + Layer(Duration duration, Time startTime) + : duration(duration) + , startTime(startTime) + { + } + }; - // Last layer the qubit was used in, `INVALID` means the qubit haven't been used yet in any operations of - // non-zero duration. + /*================================================================================================================== + QubitState + ==================================================================================================================*/ + struct QubitState + { + // The last layer this qubit was used in, `INVALID` means the qubit haven't been used yet in any + // operations of non-zero duration. int layer = INVALID; + + // For layers with duration greater than one, multiple operations might fit on the same qubit, if the operations + // are short. `lastUsedTime` is the end time of the last operation, the qubit participated it. + Time lastUsedTime = 0; + + std::vector pendingZeroOps; }; - /*====================================================================================================================== + /*================================================================================================================== The tracer implements resource estimation. See readme in this folder for details. - ======================================================================================================================*/ + ==================================================================================================================*/ class CTracer : public ISimulator { // Start with no reuse of qubits. - long lastQubitId = -1; + std::vector qubits; + + // The preferred duration of a layer. + int preferredLayerDuration = INVALID; + + // The index into the vector is treated as implicit id of the layer. + std::vector metricsByLayer; public: + // ------------------------------------------------------------------------------------------------------------- + // ISimulator interface + // ------------------------------------------------------------------------------------------------------------- IQuantumGateSet* AsQuantumGateSet() override { return nullptr; @@ -43,7 +82,9 @@ namespace Quantum } Qubit AllocateQubit() override { - return reinterpret_cast(++lastQubitId); + size_t qubit = qubits.size(); + qubits.push_back({}); + return reinterpret_cast(qubit); } void ReleaseQubit(Qubit qubit) override { @@ -82,9 +123,34 @@ namespace Quantum return reinterpret_cast(1); } - void TraceSingleQubitOp(int32_t id, int32_t duration, TracedQubit* target) + // ------------------------------------------------------------------------------------------------------------- + // Instead of implementing IQuantumGateSet, the tracer provides 'tracing-by-id' methods. The QIR generation + // should translate all intrinsics to invoke these methods. + // ------------------------------------------------------------------------------------------------------------- + void TraceSingleQubitOp(OpId id, Duration duration, Qubit target); + void TraceControlledSingleQubitOp(OpId id, Duration duration, int64_t nCtrls, Qubit* ctls, Qubit target); + void TraceMultiQubitOp(OpId id, Duration duration, int64_t nTargets, Qubit* targets); + void TraceControlledMultiQubitOp( + OpId id, + Duration duration, + int64_t nCtrls, + Qubit* ctls, + int64_t nTargets, + Qubit* targets); + + // ------------------------------------------------------------------------------------------------------------- + // Configuring the tracer and getting data back from it. + // ------------------------------------------------------------------------------------------------------------- + void SetPreferredLayerDuration(int dur) + { + this->preferredLayerDuration = dur; + } + + // Temporary method for initial testing + // TODO: replace with a safer accessor + const std::vector& UseLayers() { - // figure out the layering, etc. + return this->metricsByLayer; } }; diff --git a/src/QirRuntime/test/QIR-tracer/qir-tracer-driver.cpp b/src/QirRuntime/test/QIR-tracer/qir-tracer-driver.cpp index 687e6dd1a1a..eb39f89542d 100644 --- a/src/QirRuntime/test/QIR-tracer/qir-tracer-driver.cpp +++ b/src/QirRuntime/test/QIR-tracer/qir-tracer-driver.cpp @@ -11,8 +11,19 @@ using namespace std; using namespace Microsoft::Quantum; +namespace Microsoft +{ +namespace Quantum +{ + std::unique_ptr CreateFullstateSimulator() + { + throw std::logic_error("Tracer should not instantiate full state simulator"); + } +} // namespace Quantum +} // namespace Microsoft + extern "C" bool Microsoft__Quantum__Testing__Tracer__AllIntrinsics__body(); // NOLINT -TEST_CASE("Test that we are building the new components correctly", "[qir-tracer]") +TEST_CASE("Test that we are building the new components correctly", "[skip]") { shared_ptr tr = CreateTracer(); SetSimulatorForQIR(tr.get()); diff --git a/src/QirRuntime/test/unittests/CMakeLists.txt b/src/QirRuntime/test/unittests/CMakeLists.txt index d38b0f5745d..22b2bc2802f 100644 --- a/src/QirRuntime/test/unittests/CMakeLists.txt +++ b/src/QirRuntime/test/unittests/CMakeLists.txt @@ -12,12 +12,14 @@ target_link_libraries(qir-runtime-unittests PUBLIC qir-rt-support qir-qis-support simulators + tracer ) target_include_directories(qir-runtime-unittests PUBLIC "${test_includes}" ${public_includes} "${PROJECT_SOURCE_DIR}/lib/QIR" + "${PROJECT_SOURCE_DIR}/lib/Tracer" ) install(TARGETS qir-runtime-unittests RUNTIME DESTINATION "${CMAKE_BINARY_DIR}/bin") add_unit_test(qir-runtime-unittests) diff --git a/src/QirRuntime/test/unittests/TracerTests.cpp b/src/QirRuntime/test/unittests/TracerTests.cpp index 0e71a85ef41..679d926cefd 100644 --- a/src/QirRuntime/test/unittests/TracerTests.cpp +++ b/src/QirRuntime/test/unittests/TracerTests.cpp @@ -3,7 +3,82 @@ #include "catch.hpp" -TEST_CASE("To check that we are build the new components", "[tracer]") +#include "CoreTypes.hpp" +#include "tracer.hpp" + +using namespace std; +using namespace Microsoft::Quantum; + +TEST_CASE("Layering distinct operations of non-zero durations", "[tracer]") +{ + shared_ptr tr = CreateTracer(); + tr->SetPreferredLayerDuration(3); + + Qubit q1 = tr->AllocateQubit(); + Qubit q2 = tr->AllocateQubit(); + Qubit q3 = tr->AllocateQubit(); + + tr->TraceSingleQubitOp(1, 1, q1); // L(0,3) should be created + tr->TraceSingleQubitOp(2, 2, q1); // add the op into L(0,3) + tr->TraceSingleQubitOp(3, 1, q2); // add the op into L(0,3) + tr->TraceSingleQubitOp(4, 3, q2); // create new layer L(3,3) + tr->TraceSingleQubitOp(5, 4, q2); // create new layer L(6,4) + tr->TraceSingleQubitOp(6, 2, q1); // add the op into L(3,3) + tr->TraceSingleQubitOp(7, 1, q3); // add the op into L(0,3) + + const vector& layers = tr->UseLayers(); + REQUIRE(layers.size() == 3); + CHECK(layers[0].startTime == 0); + CHECK(layers[0].operations.size() == 4); + CHECK(layers[1].startTime == 3); + CHECK(layers[1].operations.size() == 2); + CHECK(layers[2].startTime == 6); + CHECK(layers[2].operations.size() == 1); +} + +TEST_CASE("Operations with same id are counted together", "[tracer]") { - REQUIRE(true); + shared_ptr tr = CreateTracer(); + tr->SetPreferredLayerDuration(3); + + Qubit q1 = tr->AllocateQubit(); + Qubit q2 = tr->AllocateQubit(); + Qubit q3 = tr->AllocateQubit(); + + // All of these ops should fit into a single layer L(0,3) + tr->TraceSingleQubitOp(1, 1, q1); + tr->TraceSingleQubitOp(2, 2, q1); + tr->TraceSingleQubitOp(1, 1, q2); + tr->TraceSingleQubitOp(2, 1, q2); + tr->TraceSingleQubitOp(1, 1, q2); + tr->TraceSingleQubitOp(3, 2, q3); + + const vector& layers = tr->UseLayers(); + REQUIRE(layers.size() == 1); + CHECK(layers[0].operations.size() == 3); + const auto& ops = layers[0].operations; + CHECK(ops.find(1)->second == 3); + CHECK(ops.find(2)->second == 2); + CHECK(ops.find(3)->second == 1); +} + +TEST_CASE("Layering operations of zero duration", "[skip]") +{ + shared_ptr tr = CreateTracer(); + tr->SetPreferredLayerDuration(3); + + Qubit q1 = tr->AllocateQubit(); + Qubit q2 = tr->AllocateQubit(); + Qubit q3 = tr->AllocateQubit(); + + tr->TraceSingleQubitOp(1, 1, q1); // L(0,3) should be created + tr->TraceSingleQubitOp(2, 0, q1); // add the op into L(0,3) + tr->TraceSingleQubitOp(3, 0, q3); // pending zero op (will remain orphan) + tr->TraceSingleQubitOp(4, 0, q2); // pending zero op + tr->TraceSingleQubitOp(5, 0, q2); // another pending zero op + tr->TraceSingleQubitOp(6, 1, q2); // add the op into L(0,3) together with the pending ones + + const vector& layers = tr->UseLayers(); + REQUIRE(layers.size() == 1); + CHECK(layers[0].operations.size() == 5); } \ No newline at end of file From 71fa200a11bfa2a998bdee0ae77f0ae582a1efb8 Mon Sep 17 00:00:00 2001 From: Irina Yatsenko Date: Fri, 8 Jan 2021 14:32:56 -0800 Subject: [PATCH 05/27] Layering of zero-duration single qubit ops --- src/QirRuntime/lib/Tracer/tracer.cpp | 33 ++++++++++++++----- src/QirRuntime/lib/Tracer/tracer.hpp | 3 ++ src/QirRuntime/test/unittests/TracerTests.cpp | 2 +- 3 files changed, 29 insertions(+), 9 deletions(-) diff --git a/src/QirRuntime/lib/Tracer/tracer.cpp b/src/QirRuntime/lib/Tracer/tracer.cpp index f65c6895637..0893e9626ec 100644 --- a/src/QirRuntime/lib/Tracer/tracer.cpp +++ b/src/QirRuntime/lib/Tracer/tracer.cpp @@ -18,12 +18,29 @@ namespace Quantum return tracer; } - void CTracer::TraceSingleQubitOp(int32_t id, int32_t opDuration, Qubit target) + void CTracer::AddOperationToLayer(OpId id, size_t layer) + { + auto inserted = this->metricsByLayer[layer].operations.insert({id, 1}); + if (!inserted.second) + { + assert(inserted.first->first == id); + inserted.first->second += 1; + } + } + + void CTracer::TraceSingleQubitOp(OpId id, Duration opDuration, Qubit target) { QubitState& qstate = this->qubits[reinterpret_cast(target)]; if (opDuration == 0) { - // TODO + if (qstate.layer != INVALID) + { + AddOperationToLayer(id, qstate.layer); + } + else + { + qstate.pendingZeroOps.push_back(id); + } } else { @@ -60,18 +77,18 @@ namespace Quantum layerToInsertInto = static_cast(this->metricsByLayer.size()) - 1; } - // Add the operation to the layer. - auto inserted = this->metricsByLayer[layerToInsertInto].operations.insert({id, 1}); - if (!inserted.second) + // Add the operation and the pending zero-duration ones into the layer. + AddOperationToLayer(id, layerToInsertInto); + for (OpId idPending : qstate.pendingZeroOps) { - assert(inserted.first->first == id); - inserted.first->second += 1; + AddOperationToLayer(idPending, layerToInsertInto); } // Update the qubit state. qstate.layer = layerToInsertInto; - Time layerStart = this->metricsByLayer[layerToInsertInto].startTime; + const Time layerStart = this->metricsByLayer[layerToInsertInto].startTime; qstate.lastUsedTime = max(layerStart, qstate.lastUsedTime) + opDuration; + qstate.pendingZeroOps.clear(); } } void CTracer::TraceControlledSingleQubitOp(int32_t id, int32_t duration, int64_t nCtrls, Qubit* ctls, Qubit target) diff --git a/src/QirRuntime/lib/Tracer/tracer.hpp b/src/QirRuntime/lib/Tracer/tracer.hpp index af6988c58ce..660a4d05c06 100644 --- a/src/QirRuntime/lib/Tracer/tracer.hpp +++ b/src/QirRuntime/lib/Tracer/tracer.hpp @@ -68,6 +68,9 @@ namespace Quantum // The index into the vector is treated as implicit id of the layer. std::vector metricsByLayer; + private: + void AddOperationToLayer(OpId id, size_t layer); + public: // ------------------------------------------------------------------------------------------------------------- // ISimulator interface diff --git a/src/QirRuntime/test/unittests/TracerTests.cpp b/src/QirRuntime/test/unittests/TracerTests.cpp index 679d926cefd..b9e86a22819 100644 --- a/src/QirRuntime/test/unittests/TracerTests.cpp +++ b/src/QirRuntime/test/unittests/TracerTests.cpp @@ -62,7 +62,7 @@ TEST_CASE("Operations with same id are counted together", "[tracer]") CHECK(ops.find(3)->second == 1); } -TEST_CASE("Layering operations of zero duration", "[skip]") +TEST_CASE("Layering operations of zero duration", "[tracer]") { shared_ptr tr = CreateTracer(); tr->SetPreferredLayerDuration(3); From 94fe79064a032edebba62ee4b66d70b1ac471012 Mon Sep 17 00:00:00 2001 From: Irina Yatsenko Date: Fri, 8 Jan 2021 16:17:36 -0800 Subject: [PATCH 06/27] Layering of controlled single qubit ops --- src/QirRuntime/lib/Tracer/tracer.cpp | 149 +++++++++++++----- src/QirRuntime/lib/Tracer/tracer.hpp | 33 +++- src/QirRuntime/test/unittests/TracerTests.cpp | 91 ++++++++--- 3 files changed, 203 insertions(+), 70 deletions(-) diff --git a/src/QirRuntime/lib/Tracer/tracer.cpp b/src/QirRuntime/lib/Tracer/tracer.cpp index 0893e9626ec..c59f0902f67 100644 --- a/src/QirRuntime/lib/Tracer/tracer.cpp +++ b/src/QirRuntime/lib/Tracer/tracer.cpp @@ -18,8 +18,57 @@ namespace Quantum return tracer; } - void CTracer::AddOperationToLayer(OpId id, size_t layer) + //------------------------------------------------------------------------------------------------------------------ + // CTracer::CreateNewLayer + //------------------------------------------------------------------------------------------------------------------ + LayerId CTracer::CreateNewLayer(Duration opDuration) { + // Create a new layer for the operation. + Time layerStartTime = 0; + if (!this->metricsByLayer.empty()) + { + const Layer& lastLayer = this->metricsByLayer.back(); + layerStartTime = lastLayer.startTime + lastLayer.duration; + } + this->metricsByLayer.push_back(Layer{max(this->preferredLayerDuration, opDuration), layerStartTime}); + return this->metricsByLayer.size() - 1; + } + + //------------------------------------------------------------------------------------------------------------------ + // CTracer::FindLayerToInsertOperationInto + //------------------------------------------------------------------------------------------------------------------ + LayerId CTracer::FindLayerToInsertOperationInto(Qubit q, Duration opDuration) const + { + const QubitState& qstate = this->UseQubit(q); + + LayerId layerToInsertInto = INVALID; + if (qstate.layer != INVALID) + { + const Layer& lastUsedIn = this->metricsByLayer[qstate.layer]; + if (qstate.lastUsedTime + opDuration <= lastUsedIn.startTime + lastUsedIn.duration) + { + layerToInsertInto = qstate.layer; + } + else if (opDuration <= this->preferredLayerDuration && qstate.layer + 1 < this->metricsByLayer.size()) + { + layerToInsertInto = qstate.layer + 1; + } + } + else if (opDuration <= this->preferredLayerDuration && !this->metricsByLayer.empty()) + { + // the qubit hasn't been used in any of the layers yet -- add it to the first layer + layerToInsertInto = 0; + } + + return layerToInsertInto; + } + + //------------------------------------------------------------------------------------------------------------------ + // CTracer::AddOperationToLayer + //------------------------------------------------------------------------------------------------------------------ + void CTracer::AddOperationToLayer(OpId id, LayerId layer) + { + assert(layer < this->metricsByLayer.size()); auto inserted = this->metricsByLayer[layer].operations.insert({id, 1}); if (!inserted.second) { @@ -28,14 +77,35 @@ namespace Quantum } } + //------------------------------------------------------------------------------------------------------------------ + // CTracer::UpdateQubitState + //------------------------------------------------------------------------------------------------------------------ + void CTracer::UpdateQubitState(Qubit q, LayerId layer, Duration opDuration) + { + QubitState& qstate = this->UseQubit(q); + for (OpId idPending : qstate.pendingZeroOps) + { + this->AddOperationToLayer(idPending, layer); + } + + // Update the qubit state. + qstate.layer = layer; + const Time layerStart = this->metricsByLayer[layer].startTime; + qstate.lastUsedTime = max(layerStart, qstate.lastUsedTime) + opDuration; + qstate.pendingZeroOps.clear(); + } + + //------------------------------------------------------------------------------------------------------------------ + // CTracer::TraceSingleQubitOp + //------------------------------------------------------------------------------------------------------------------ void CTracer::TraceSingleQubitOp(OpId id, Duration opDuration, Qubit target) { - QubitState& qstate = this->qubits[reinterpret_cast(target)]; if (opDuration == 0) { + QubitState& qstate = this->UseQubit(target); if (qstate.layer != INVALID) { - AddOperationToLayer(id, qstate.layer); + this->AddOperationToLayer(id, qstate.layer); } else { @@ -45,54 +115,47 @@ namespace Quantum else { // Figure out the layer this operation should go into. - int layerToInsertInto = INVALID; - if (qstate.layer != INVALID) - { - Layer& lastUsedIn = this->metricsByLayer[qstate.layer]; - if (qstate.lastUsedTime + opDuration <= lastUsedIn.startTime + lastUsedIn.duration) - { - layerToInsertInto = qstate.layer; - } - else if (opDuration <= this->preferredLayerDuration && qstate.layer + 1 < this->metricsByLayer.size()) - { - layerToInsertInto = qstate.layer + 1; - } - } - else if (opDuration <= this->preferredLayerDuration && !this->metricsByLayer.empty()) - { - // the qubit hasn't been used in any of the layers yet -- add it to the first layer - layerToInsertInto = 0; - } - + LayerId layerToInsertInto = this->FindLayerToInsertOperationInto(target, opDuration); if (layerToInsertInto == INVALID) { - // Create a new layer for the operation. - Time layerStartTime = 0; - if (!this->metricsByLayer.empty()) - { - const Layer& lastLayer = this->metricsByLayer.back(); - layerStartTime = lastLayer.startTime + lastLayer.duration; - } - this->metricsByLayer.push_back(Layer{max(this->preferredLayerDuration, opDuration), layerStartTime}); - layerToInsertInto = static_cast(this->metricsByLayer.size()) - 1; + layerToInsertInto = this->CreateNewLayer(opDuration); } // Add the operation and the pending zero-duration ones into the layer. - AddOperationToLayer(id, layerToInsertInto); - for (OpId idPending : qstate.pendingZeroOps) - { - AddOperationToLayer(idPending, layerToInsertInto); - } - - // Update the qubit state. - qstate.layer = layerToInsertInto; - const Time layerStart = this->metricsByLayer[layerToInsertInto].startTime; - qstate.lastUsedTime = max(layerStart, qstate.lastUsedTime) + opDuration; - qstate.pendingZeroOps.clear(); + this->AddOperationToLayer(id, layerToInsertInto); + this->UpdateQubitState(target, layerToInsertInto, opDuration); } } - void CTracer::TraceControlledSingleQubitOp(int32_t id, int32_t duration, int64_t nCtrls, Qubit* ctls, Qubit target) + + //------------------------------------------------------------------------------------------------------------------ + // CTracer::TraceControlledSingleQubitOp + //------------------------------------------------------------------------------------------------------------------ + void CTracer::TraceControlledSingleQubitOp(OpId id, Duration opDuration, int64_t nCtrls, Qubit* ctls, Qubit target) { + // Special-casing operations of duration zero enables potentially better reuse of qubits, when we'll start + // optimizing for circuit width. However, tracking _the same_ pending operation across _multiple_ qubits is + // tricky and not worth the effort, so we don't do it. + + // Figure out the layer this operation should go into. + LayerId layerToInsertInto = this->FindLayerToInsertOperationInto(target, opDuration); + for (int64_t i = 0; i < nCtrls && layerToInsertInto != INVALID; i++) + { + layerToInsertInto = max(layerToInsertInto, this->FindLayerToInsertOperationInto(ctls[i], opDuration)); + } + if (layerToInsertInto == INVALID) + { + layerToInsertInto = this->CreateNewLayer(opDuration); + } + + // Add the operation into the layer. + this->AddOperationToLayer(id, layerToInsertInto); + + // Update the state of the involved qubits. + this->UpdateQubitState(target, layerToInsertInto, opDuration); + for (int64_t i = 0; i < nCtrls; i++) + { + this->UpdateQubitState(ctls[i], layerToInsertInto, opDuration); + } } } // namespace Quantum } // namespace Microsoft \ No newline at end of file diff --git a/src/QirRuntime/lib/Tracer/tracer.hpp b/src/QirRuntime/lib/Tracer/tracer.hpp index 660a4d05c06..0435b02921c 100644 --- a/src/QirRuntime/lib/Tracer/tracer.hpp +++ b/src/QirRuntime/lib/Tracer/tracer.hpp @@ -1,6 +1,7 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. +#include #include #include #include @@ -15,8 +16,9 @@ namespace Quantum using OpId = int32_t; using Time = int32_t; using Duration = int32_t; + using LayerId = size_t; - constexpr long INVALID = -1; + constexpr LayerId INVALID = std::numeric_limits::max(); /*================================================================================================================== Layer @@ -45,7 +47,7 @@ namespace Quantum { // The last layer this qubit was used in, `INVALID` means the qubit haven't been used yet in any // operations of non-zero duration. - int layer = INVALID; + LayerId layer = INVALID; // For layers with duration greater than one, multiple operations might fit on the same qubit, if the operations // are short. `lastUsedTime` is the end time of the last operation, the qubit participated it. @@ -63,13 +65,36 @@ namespace Quantum std::vector qubits; // The preferred duration of a layer. - int preferredLayerDuration = INVALID; + int preferredLayerDuration = 0; // The index into the vector is treated as implicit id of the layer. std::vector metricsByLayer; private: - void AddOperationToLayer(OpId id, size_t layer); + QubitState& UseQubit(Qubit q) + { + size_t qubitIndex = reinterpret_cast(q); + assert(qubitIndex < this->qubits.size()); + return this->qubits[qubitIndex]; + } + const QubitState& UseQubit(Qubit q) const + { + size_t qubitIndex = reinterpret_cast(q); + assert(qubitIndex < this->qubits.size()); + return this->qubits[qubitIndex]; + } + + // If no appropriate layer found, return `INVALID` + LayerId FindLayerToInsertOperationInto(Qubit q, Duration opDuration) const; + + // Returns the index of the created layer. + LayerId CreateNewLayer(Duration opDuration); + + // Adds operation with given id into the given layer. Assumes that duration contraints have been satisfied. + void AddOperationToLayer(OpId id, LayerId layer); + + // Update the qubit state with the new layer information + void UpdateQubitState(Qubit q, LayerId layer, Duration opDuration); public: // ------------------------------------------------------------------------------------------------------------- diff --git a/src/QirRuntime/test/unittests/TracerTests.cpp b/src/QirRuntime/test/unittests/TracerTests.cpp index b9e86a22819..bc3fd976e7d 100644 --- a/src/QirRuntime/test/unittests/TracerTests.cpp +++ b/src/QirRuntime/test/unittests/TracerTests.cpp @@ -9,7 +9,7 @@ using namespace std; using namespace Microsoft::Quantum; -TEST_CASE("Layering distinct operations of non-zero durations", "[tracer]") +TEST_CASE("Layering distinct single-qubit operations of non-zero durations", "[tracer]") { shared_ptr tr = CreateTracer(); tr->SetPreferredLayerDuration(3); @@ -24,7 +24,7 @@ TEST_CASE("Layering distinct operations of non-zero durations", "[tracer]") tr->TraceSingleQubitOp(4, 3, q2); // create new layer L(3,3) tr->TraceSingleQubitOp(5, 4, q2); // create new layer L(6,4) tr->TraceSingleQubitOp(6, 2, q1); // add the op into L(3,3) - tr->TraceSingleQubitOp(7, 1, q3); // add the op into L(0,3) + tr->TraceSingleQubitOp(7, 1, q3); // add the op into L(0,3) const vector& layers = tr->UseLayers(); REQUIRE(layers.size() == 3); @@ -36,6 +36,72 @@ TEST_CASE("Layering distinct operations of non-zero durations", "[tracer]") CHECK(layers[2].operations.size() == 1); } +TEST_CASE("Layering single-qubit operations of zero duration", "[tracer]") +{ + shared_ptr tr = CreateTracer(); + tr->SetPreferredLayerDuration(3); + + Qubit q1 = tr->AllocateQubit(); + Qubit q2 = tr->AllocateQubit(); + Qubit q3 = tr->AllocateQubit(); + + tr->TraceSingleQubitOp(1, 1, q1); // L(0,3) should be created + tr->TraceSingleQubitOp(2, 0, q1); // add the op into L(0,3) + tr->TraceSingleQubitOp(3, 0, q3); // pending zero op (will remain orphan) + tr->TraceSingleQubitOp(4, 0, q2); // pending zero op + tr->TraceSingleQubitOp(5, 0, q2); // another pending zero op + tr->TraceSingleQubitOp(6, 1, q2); // add the op into L(0,3) together with the pending ones + + const vector& layers = tr->UseLayers(); + REQUIRE(layers.size() == 1); + CHECK(layers[0].operations.size() == 5); +} + +TEST_CASE("Layering distinct controlled single-qubit operations", "[tracer]") +{ + shared_ptr tr = CreateTracer(); + tr->SetPreferredLayerDuration(3); + + Qubit q1 = tr->AllocateQubit(); + Qubit q2 = tr->AllocateQubit(); + Qubit q3 = tr->AllocateQubit(); + Qubit q4 = tr->AllocateQubit(); + Qubit q5 = tr->AllocateQubit(); + Qubit q6 = tr->AllocateQubit(); + + tr->TraceControlledSingleQubitOp(1 /*id*/, 1 /*dur*/, 1 /*nCtls*/, &q1 /*ctls*/, q2 /*target*/); + tr->TraceSingleQubitOp(2 /*id*/, 2 /*dur*/, q2); + // q2 now is at the limit of the layer duration + + tr->TraceControlledSingleQubitOp(3 /*id*/, 1 /*dur*/, 1 /*nCtls*/, &q2 /*ctls*/, q3 /*target*/); + // because of q2 this should have created a new layer + + tr->TraceControlledSingleQubitOp(4 /*id*/, 0 /*dur*/, 1 /*nCtls*/, &q4 /*ctls*/, q5 /*target*/); + tr->TraceSingleQubitOp(5 /*id*/, 1 /*dur*/, q6); + tr->TraceControlledSingleQubitOp(6 /*id*/, 1 /*dur*/, 1 /*nCtls*/, &q1 /*ctls*/, q6 /*target*/); + // these ops should fall through into the first layer (notice no special handling of duration zero) + + tr->TraceControlledSingleQubitOp(7 /*id*/, 1 /*dur*/, 1 /*nCtls*/, &q3 /*ctls*/, q4 /*target*/); + // because of q3 should be added into the second layer + + const vector& layers = tr->UseLayers(); + REQUIRE(layers.size() == 2); + + REQUIRE(layers[0].operations.size() == 5); + const auto& ops0 = layers[0].operations; + CHECK(ops0.find(1) != ops0.end()); + CHECK(ops0.find(2) != ops0.end()); + CHECK(ops0.find(4) != ops0.end()); + CHECK(ops0.find(5) != ops0.end()); + CHECK(ops0.find(6) != ops0.end()); + + CHECK(layers[1].operations.size() == 2); + const auto& ops1 = layers[1].operations; + CHECK(ops1.find(3) != ops1.end()); + CHECK(ops1.find(7) != ops1.end()); +} + +// TODO: add controlled and multi-qubit ops TEST_CASE("Operations with same id are counted together", "[tracer]") { shared_ptr tr = CreateTracer(); @@ -61,24 +127,3 @@ TEST_CASE("Operations with same id are counted together", "[tracer]") CHECK(ops.find(2)->second == 2); CHECK(ops.find(3)->second == 1); } - -TEST_CASE("Layering operations of zero duration", "[tracer]") -{ - shared_ptr tr = CreateTracer(); - tr->SetPreferredLayerDuration(3); - - Qubit q1 = tr->AllocateQubit(); - Qubit q2 = tr->AllocateQubit(); - Qubit q3 = tr->AllocateQubit(); - - tr->TraceSingleQubitOp(1, 1, q1); // L(0,3) should be created - tr->TraceSingleQubitOp(2, 0, q1); // add the op into L(0,3) - tr->TraceSingleQubitOp(3, 0, q3); // pending zero op (will remain orphan) - tr->TraceSingleQubitOp(4, 0, q2); // pending zero op - tr->TraceSingleQubitOp(5, 0, q2); // another pending zero op - tr->TraceSingleQubitOp(6, 1, q2); // add the op into L(0,3) together with the pending ones - - const vector& layers = tr->UseLayers(); - REQUIRE(layers.size() == 1); - CHECK(layers[0].operations.size() == 5); -} \ No newline at end of file From a7d3b7a4206b08b18ec3276f2c0b4eca5913400d Mon Sep 17 00:00:00 2001 From: Irina Yatsenko Date: Mon, 11 Jan 2021 08:01:49 -0800 Subject: [PATCH 07/27] Layering of multi-qubit ops --- src/QirRuntime/lib/Tracer/tracer-trc.cpp | 15 ++++---- src/QirRuntime/lib/Tracer/tracer.cpp | 38 +++++++++++++++---- src/QirRuntime/lib/Tracer/tracer.hpp | 16 ++++---- src/QirRuntime/test/unittests/TracerTests.cpp | 34 +++++++++++------ 4 files changed, 71 insertions(+), 32 deletions(-) diff --git a/src/QirRuntime/lib/Tracer/tracer-trc.cpp b/src/QirRuntime/lib/Tracer/tracer-trc.cpp index dfa7bd63047..5570bbd084b 100644 --- a/src/QirRuntime/lib/Tracer/tracer-trc.cpp +++ b/src/QirRuntime/lib/Tracer/tracer-trc.cpp @@ -37,17 +37,18 @@ extern "C" { tracer->TraceSingleQubitOp(id, duration, target); } - void quantum__trc__multi_qubit_op(int32_t id, int32_t duration, QirArray* targets) // NOLINT + void quantum__trc__single_qubit_op_ctl(int32_t id, int32_t duration, QirArray* ctls, Qubit target) // NOLINT { - // TBD + tracer->TraceMultiQubitOp(id, duration, ctls->count, reinterpret_cast(ctls->buffer), 1, &target); } - void quantum__trc__single_qubit_op_ctl(int32_t id, int32_t duration, QirArray* controls, Qubit target) // NOLINT + void quantum__trc__multi_qubit_op(int32_t id, int32_t duration, QirArray* targets) // NOLINT { - tracer->TraceControlledSingleQubitOp( - id, duration, controls->count, reinterpret_cast(controls->buffer), target); + tracer->TraceMultiQubitOp(id, duration, 0, nullptr, targets->count, reinterpret_cast(targets->buffer)); } - void quantum__trc__multi_qubit_op_ctl(int32_t id, int32_t duration, QirArray* controls, QirArray* targets) // NOLINT + void quantum__trc__multi_qubit_op_ctl(int32_t id, int32_t duration, QirArray* ctls, QirArray* targets) // NOLINT { - // TBD + tracer->TraceMultiQubitOp( + id, duration, ctls->count, reinterpret_cast(ctls->buffer), targets->count, + reinterpret_cast(targets->buffer)); } } \ No newline at end of file diff --git a/src/QirRuntime/lib/Tracer/tracer.cpp b/src/QirRuntime/lib/Tracer/tracer.cpp index c59f0902f67..1605d650b42 100644 --- a/src/QirRuntime/lib/Tracer/tracer.cpp +++ b/src/QirRuntime/lib/Tracer/tracer.cpp @@ -130,17 +130,38 @@ namespace Quantum //------------------------------------------------------------------------------------------------------------------ // CTracer::TraceControlledSingleQubitOp //------------------------------------------------------------------------------------------------------------------ - void CTracer::TraceControlledSingleQubitOp(OpId id, Duration opDuration, int64_t nCtrls, Qubit* ctls, Qubit target) + void CTracer::TraceMultiQubitOp( + OpId id, + Duration opDuration, + int64_t nFirstGroup, + Qubit* firstGroup, + int64_t nSecondGroup, + Qubit* secondGroup) { + assert(nFirstGroup >= 0); + assert(nSecondGroup > 0); + + // Operations that involve a single qubit can special case duration zero. + if (nFirstGroup == 0 && nSecondGroup == 1) + { + this->TraceSingleQubitOp(id, opDuration, secondGroup[0]); + return; + } + // Special-casing operations of duration zero enables potentially better reuse of qubits, when we'll start // optimizing for circuit width. However, tracking _the same_ pending operation across _multiple_ qubits is // tricky and not worth the effort, so we don't do it. // Figure out the layer this operation should go into. - LayerId layerToInsertInto = this->FindLayerToInsertOperationInto(target, opDuration); - for (int64_t i = 0; i < nCtrls && layerToInsertInto != INVALID; i++) + LayerId layerToInsertInto = this->FindLayerToInsertOperationInto(secondGroup[0], opDuration); + for (int64_t i = 1; i < nSecondGroup && layerToInsertInto != INVALID; i++) { - layerToInsertInto = max(layerToInsertInto, this->FindLayerToInsertOperationInto(ctls[i], opDuration)); + layerToInsertInto = + max(layerToInsertInto, this->FindLayerToInsertOperationInto(secondGroup[i], opDuration)); + } + for (int64_t i = 0; i < nFirstGroup && layerToInsertInto != INVALID; i++) + { + layerToInsertInto = max(layerToInsertInto, this->FindLayerToInsertOperationInto(firstGroup[i], opDuration)); } if (layerToInsertInto == INVALID) { @@ -151,10 +172,13 @@ namespace Quantum this->AddOperationToLayer(id, layerToInsertInto); // Update the state of the involved qubits. - this->UpdateQubitState(target, layerToInsertInto, opDuration); - for (int64_t i = 0; i < nCtrls; i++) + for (int64_t i = 0; i < nFirstGroup; i++) + { + this->UpdateQubitState(firstGroup[i], layerToInsertInto, opDuration); + } + for (int64_t i = 0; i < nSecondGroup; i++) { - this->UpdateQubitState(ctls[i], layerToInsertInto, opDuration); + this->UpdateQubitState(secondGroup[i], layerToInsertInto, opDuration); } } } // namespace Quantum diff --git a/src/QirRuntime/lib/Tracer/tracer.hpp b/src/QirRuntime/lib/Tracer/tracer.hpp index 0435b02921c..7dae5ccb488 100644 --- a/src/QirRuntime/lib/Tracer/tracer.hpp +++ b/src/QirRuntime/lib/Tracer/tracer.hpp @@ -154,17 +154,19 @@ namespace Quantum // ------------------------------------------------------------------------------------------------------------- // Instead of implementing IQuantumGateSet, the tracer provides 'tracing-by-id' methods. The QIR generation // should translate all intrinsics to invoke these methods. + // The tracer doesn't differentiate between control and target qubits. However, While it could provide a single + // generic tracing method for and array of qubits, that would require the clients to copy control and target + // qubits into the same array. To avoid the copy, the tracer provides a method that takes two groups of qubits, + // where the first one can be empty or can be viewed as the set of controls. // ------------------------------------------------------------------------------------------------------------- void TraceSingleQubitOp(OpId id, Duration duration, Qubit target); - void TraceControlledSingleQubitOp(OpId id, Duration duration, int64_t nCtrls, Qubit* ctls, Qubit target); - void TraceMultiQubitOp(OpId id, Duration duration, int64_t nTargets, Qubit* targets); - void TraceControlledMultiQubitOp( + void TraceMultiQubitOp( OpId id, Duration duration, - int64_t nCtrls, - Qubit* ctls, - int64_t nTargets, - Qubit* targets); + int64_t nFirstGroup, + Qubit* firstGroup, + int64_t nSecondGroup, + Qubit* secondGroup); // ------------------------------------------------------------------------------------------------------------- // Configuring the tracer and getting data back from it. diff --git a/src/QirRuntime/test/unittests/TracerTests.cpp b/src/QirRuntime/test/unittests/TracerTests.cpp index bc3fd976e7d..2e602b4f634 100644 --- a/src/QirRuntime/test/unittests/TracerTests.cpp +++ b/src/QirRuntime/test/unittests/TracerTests.cpp @@ -69,20 +69,29 @@ TEST_CASE("Layering distinct controlled single-qubit operations", "[tracer]") Qubit q5 = tr->AllocateQubit(); Qubit q6 = tr->AllocateQubit(); - tr->TraceControlledSingleQubitOp(1 /*id*/, 1 /*dur*/, 1 /*nCtls*/, &q1 /*ctls*/, q2 /*target*/); - tr->TraceSingleQubitOp(2 /*id*/, 2 /*dur*/, q2); + tr->TraceMultiQubitOp(1 /*id*/, 1 /*dur*/, 1 /*nFirst*/, &q1 /*first*/, 1 /*nSecond*/, &q2 /*second*/); + tr->TraceMultiQubitOp(2 /*id*/, 2 /*dur*/, 0 /*nFirst*/, nullptr /*first*/, 1 /*nSecond*/, &q2 /*second*/); // q2 now is at the limit of the layer duration - tr->TraceControlledSingleQubitOp(3 /*id*/, 1 /*dur*/, 1 /*nCtls*/, &q2 /*ctls*/, q3 /*target*/); - // because of q2 this should have created a new layer + Qubit qs12[2] = {q1, q2}; + tr->TraceMultiQubitOp(3 /*id*/, 1 /*dur*/, 0 /*nFirst*/, nullptr /*first*/, 2 /*nSecond*/, qs12 /*second*/); + tr->TraceMultiQubitOp(4 /*id*/, 1 /*dur*/, 1 /*nFirst*/, &q2 /*first*/, 1 /*nSecond*/, &q3 /*second*/); + // because of q2, both ops should have been added to a new layer, which now "catches" q1, q2, q3 - tr->TraceControlledSingleQubitOp(4 /*id*/, 0 /*dur*/, 1 /*nCtls*/, &q4 /*ctls*/, q5 /*target*/); - tr->TraceSingleQubitOp(5 /*id*/, 1 /*dur*/, q6); - tr->TraceControlledSingleQubitOp(6 /*id*/, 1 /*dur*/, 1 /*nCtls*/, &q1 /*ctls*/, q6 /*target*/); + tr->TraceMultiQubitOp(5 /*id*/, 0 /*dur*/, 1 /*nFirst*/, &q4 /*first*/, 1 /*nSecond*/, &q5 /*second*/); + tr->TraceSingleQubitOp(6 /*id*/, 1 /*dur*/, q6); // these ops should fall through into the first layer (notice no special handling of duration zero) - tr->TraceControlledSingleQubitOp(7 /*id*/, 1 /*dur*/, 1 /*nCtls*/, &q3 /*ctls*/, q4 /*target*/); - // because of q3 should be added into the second layer + tr->TraceMultiQubitOp(7 /*id*/, 1 /*dur*/, 1 /*nFirst*/, &q1 /*first*/, 1 /*nSecond*/, &q6 /*second*/); + tr->TraceMultiQubitOp(8 /*id*/, 1 /*dur*/, 1 /*nFirst*/, &q3 /*first*/, 1 /*nSecond*/, &q4 /*second*/); + // because of q1 and q3, thiese ops should be added into the second layer, which now has all but q5 + + tr->TraceSingleQubitOp(9, 1, q5); + // should fall through to the first layer + + Qubit qs46[2] = {q4, q6}; + tr->TraceMultiQubitOp(10 /*id*/, 1 /*dur*/, 3 /*nFirst*/, &q3 /*first*/, 1 /*nSecond*/, &q5 /*second*/); + // because of q4, should be added into the second layer const vector& layers = tr->UseLayers(); REQUIRE(layers.size() == 2); @@ -91,14 +100,17 @@ TEST_CASE("Layering distinct controlled single-qubit operations", "[tracer]") const auto& ops0 = layers[0].operations; CHECK(ops0.find(1) != ops0.end()); CHECK(ops0.find(2) != ops0.end()); - CHECK(ops0.find(4) != ops0.end()); CHECK(ops0.find(5) != ops0.end()); CHECK(ops0.find(6) != ops0.end()); + CHECK(ops0.find(9) != ops0.end()); - CHECK(layers[1].operations.size() == 2); + CHECK(layers[1].operations.size() == 5); const auto& ops1 = layers[1].operations; CHECK(ops1.find(3) != ops1.end()); + CHECK(ops1.find(4) != ops1.end()); CHECK(ops1.find(7) != ops1.end()); + CHECK(ops1.find(8) != ops1.end()); + CHECK(ops1.find(10) != ops1.end()); } // TODO: add controlled and multi-qubit ops From bdb1fdea57e13c798031a116a987909cf2ab51a0 Mon Sep 17 00:00:00 2001 From: Irina Yatsenko Date: Mon, 11 Jan 2021 11:16:08 -0800 Subject: [PATCH 08/27] Updated generated QIR for the test (still not quite right) --- .../test/QIR-tracer/qir-test-intrinsics.ll | 98 +++++++++++-------- .../test/QIR-tracer/qir-tracer-driver.cpp | 2 +- 2 files changed, 57 insertions(+), 43 deletions(-) diff --git a/src/QirRuntime/test/QIR-tracer/qir-test-intrinsics.ll b/src/QirRuntime/test/QIR-tracer/qir-test-intrinsics.ll index fa5023fb43f..b95a35bf1dc 100644 --- a/src/QirRuntime/test/QIR-tracer/qir-test-intrinsics.ll +++ b/src/QirRuntime/test/QIR-tracer/qir-test-intrinsics.ll @@ -31,6 +31,7 @@ entry: define void @Microsoft__Quantum__Intrinsic__X__ctl(%Array* %ctls, %Qubit* %qb) { entry: +; call void @__quantum__rt__array_add_access(%Array* %ctls) %0 = call i64 @__quantum__rt__array_get_length(%Array* %ctls, i32 0) %1 = icmp eq i64 %0, 1 br i1 %1, label %then0__1, label %else__1 @@ -44,15 +45,21 @@ else__1: ; preds = %entry br label %continue__1 continue__1: ; preds = %else__1, %then0__1 +; call void @__quantum__rt__array_remove_access(%Array* %ctls) ret void } +declare void @__quantum__rt__array_add_access(%Array*) + declare i64 @__quantum__rt__array_get_length(%Array*, i32) declare void @__quantum__trc__single_qubit_op_ctl(i64, i64, %Array*, %Qubit*) +declare void @__quantum__rt__array_remove_access(%Array*) + define void @Microsoft__Quantum__Intrinsic__X__ctladj(%Array* %ctls, %Qubit* %qb) { entry: +; call void @__quantum__rt__array_add_access(%Array* %ctls) %0 = call i64 @__quantum__rt__array_get_length(%Array* %ctls, i32 0) %1 = icmp eq i64 %0, 1 br i1 %1, label %then0__1, label %else__1 @@ -66,6 +73,7 @@ else__1: ; preds = %entry br label %continue__1 continue__1: ; preds = %else__1, %then0__1 +; call void @__quantum__rt__array_remove_access(%Array* %ctls) ret void } @@ -74,14 +82,15 @@ entry: %res = alloca i1 store i1 false, i1* %res %qs = call %Array* @__quantum__rt__qubit_allocate_array(i64 3) +; call void @__quantum__rt__array_add_access(%Array* %qs) %0 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 0) %1 = bitcast i8* %0 to %Qubit** - %.qb = load %Qubit*, %Qubit** %1 - call void @__quantum__trc__single_qubit_op(i64 0, i64 1, %Qubit* %.qb) + %qb__inline__1 = load %Qubit*, %Qubit** %1 + call void @__quantum__trc__single_qubit_op(i64 0, i64 1, %Qubit* %qb__inline__1) %2 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 0) %3 = bitcast i8* %2 to %Qubit** - %.qb1 = load %Qubit*, %Qubit** %3 - call void @__quantum__trc__single_qubit_op(i64 0, i64 1, %Qubit* %.qb1) + %qb__inline__2 = load %Qubit*, %Qubit** %3 + call void @__quantum__trc__single_qubit_op(i64 0, i64 1, %Qubit* %qb__inline__2) %c = call %Qubit* @__quantum__rt__qubit_allocate() %4 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) %5 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %4, i64 0) @@ -98,66 +107,71 @@ entry: call void @__quantum__rt__array_reference(%Array* %4) store %Qubit* %9, %Qubit** %13 %14 = getelementptr { %Array*, %Qubit* }, { %Array*, %Qubit* }* %11, i64 0, i32 0 - %.ctls = load %Array*, %Array** %14 + %ctls__inline__3 = load %Array*, %Array** %14 +; call void @__quantum__rt__array_add_access(%Array* %ctls__inline__3) %15 = getelementptr { %Array*, %Qubit* }, { %Array*, %Qubit* }* %11, i64 0, i32 1 - %.qb2 = load %Qubit*, %Qubit** %15 - %16 = call i64 @__quantum__rt__array_get_length(%Array* %.ctls, i32 0) + %qb__inline__3 = load %Qubit*, %Qubit** %15 + %16 = call i64 @__quantum__rt__array_get_length(%Array* %ctls__inline__3, i32 0) %17 = icmp eq i64 %16, 1 br i1 %17, label %then0__1, label %else__1 then0__1: ; preds = %entry - call void @__quantum__trc__single_qubit_op_ctl(i64 1, i64 1, %Array* %.ctls, %Qubit* %.qb2) + call void @__quantum__trc__single_qubit_op_ctl(i64 1, i64 1, %Array* %ctls__inline__3, %Qubit* %qb__inline__3) br label %continue__1 else__1: ; preds = %entry - call void @__quantum__trc__single_qubit_op_ctl(i64 2, i64 1, %Array* %.ctls, %Qubit* %.qb2) + call void @__quantum__trc__single_qubit_op_ctl(i64 2, i64 1, %Array* %ctls__inline__3, %Qubit* %qb__inline__3) br label %continue__1 continue__1: ; preds = %else__1, %then0__1 - call void @__quantum__rt__qubit_release(%Qubit* %c) -; call void @__quantum__rt__array_unreference(%Array* %4) +; call void @__quantum__rt__array_remove_access(%Array* %ctls__inline__3) + call void @__quantum__rt__array_unreference(%Array* %4) %18 = getelementptr { %Array*, %Qubit* }, { %Array*, %Qubit* }* %11, i64 0, i32 0 %19 = load %Array*, %Array** %18 -; call void @__quantum__rt__array_unreference(%Array* %19) - %20 = bitcast { %Array*, %Qubit* }* %11 to %Tuple* -; call void @__quantum__rt__tuple_unreference(%Tuple* %20) + call void @__quantum__rt__array_unreference(%Array* %19) + call void @__quantum__rt__tuple_unreference(%Tuple* %10) + call void @__quantum__rt__qubit_release(%Qubit* %c) %cc = call %Array* @__quantum__rt__qubit_allocate_array(i64 2) - %21 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 0) - %22 = bitcast i8* %21 to %Qubit** - %23 = load %Qubit*, %Qubit** %22 - %24 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %25 = bitcast %Tuple* %24 to { %Array*, %Qubit* }* - %26 = getelementptr { %Array*, %Qubit* }, { %Array*, %Qubit* }* %25, i64 0, i32 0 - %27 = getelementptr { %Array*, %Qubit* }, { %Array*, %Qubit* }* %25, i64 0, i32 1 - store %Array* %cc, %Array** %26 +; call void @__quantum__rt__array_add_access(%Array* %cc) + %20 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 0) + %21 = bitcast i8* %20 to %Qubit** + %22 = load %Qubit*, %Qubit** %21 + %23 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %24 = bitcast %Tuple* %23 to { %Array*, %Qubit* }* + %25 = getelementptr { %Array*, %Qubit* }, { %Array*, %Qubit* }* %24, i64 0, i32 0 + %26 = getelementptr { %Array*, %Qubit* }, { %Array*, %Qubit* }* %24, i64 0, i32 1 + store %Array* %cc, %Array** %25 call void @__quantum__rt__array_reference(%Array* %cc) - store %Qubit* %23, %Qubit** %27 - %28 = getelementptr { %Array*, %Qubit* }, { %Array*, %Qubit* }* %25, i64 0, i32 0 - %.ctls3 = load %Array*, %Array** %28 - %29 = getelementptr { %Array*, %Qubit* }, { %Array*, %Qubit* }* %25, i64 0, i32 1 - %.qb4 = load %Qubit*, %Qubit** %29 - %30 = call i64 @__quantum__rt__array_get_length(%Array* %.ctls3, i32 0) - %31 = icmp eq i64 %30, 1 - br i1 %31, label %then0__2, label %else__2 + store %Qubit* %22, %Qubit** %26 + %27 = getelementptr { %Array*, %Qubit* }, { %Array*, %Qubit* }* %24, i64 0, i32 0 + %ctls__inline__4 = load %Array*, %Array** %27 +; call void @__quantum__rt__array_add_access(%Array* %ctls__inline__4) + %28 = getelementptr { %Array*, %Qubit* }, { %Array*, %Qubit* }* %24, i64 0, i32 1 + %qb__inline__4 = load %Qubit*, %Qubit** %28 + %29 = call i64 @__quantum__rt__array_get_length(%Array* %ctls__inline__4, i32 0) + %30 = icmp eq i64 %29, 1 + br i1 %30, label %then0__2, label %else__2 then0__2: ; preds = %continue__1 - call void @__quantum__trc__single_qubit_op_ctl(i64 1, i64 1, %Array* %.ctls3, %Qubit* %.qb4) + call void @__quantum__trc__single_qubit_op_ctl(i64 1, i64 1, %Array* %ctls__inline__4, %Qubit* %qb__inline__4) br label %continue__2 else__2: ; preds = %continue__1 - call void @__quantum__trc__single_qubit_op_ctl(i64 2, i64 1, %Array* %.ctls3, %Qubit* %.qb4) + call void @__quantum__trc__single_qubit_op_ctl(i64 2, i64 1, %Array* %ctls__inline__4, %Qubit* %qb__inline__4) br label %continue__2 continue__2: ; preds = %else__2, %then0__2 +; call void @__quantum__rt__array_remove_access(%Array* %ctls__inline__4) + %31 = getelementptr { %Array*, %Qubit* }, { %Array*, %Qubit* }* %24, i64 0, i32 0 + %32 = load %Array*, %Array** %31 + call void @__quantum__rt__array_unreference(%Array* %32) + call void @__quantum__rt__tuple_unreference(%Tuple* %23) call void @__quantum__rt__qubit_release_array(%Array* %cc) -; call void @__quantum__rt__array_unreference(%Array* %cc) - %32 = getelementptr { %Array*, %Qubit* }, { %Array*, %Qubit* }* %25, i64 0, i32 0 - %33 = load %Array*, %Array** %32 -; call void @__quantum__rt__array_unreference(%Array* %33) - %34 = bitcast { %Array*, %Qubit* }* %25 to %Tuple* -; call void @__quantum__rt__tuple_unreference(%Tuple* %34) -; call void @__quantum__rt__qubit_release_array(%Array* %qs) -; call void @__quantum__rt__array_unreference(%Array* %qs) +; call void @__quantum__rt__array_remove_access(%Array* %cc) + call void @__quantum__rt__array_unreference(%Array* %cc) + call void @__quantum__rt__qubit_release_array(%Array* %qs) +; call void @__quantum__rt__array_remove_access(%Array* %qs) + call void @__quantum__rt__array_unreference(%Array* %qs) ret i1 true } @@ -173,12 +187,12 @@ declare %Tuple* @__quantum__rt__tuple_create(i64) declare void @__quantum__rt__array_reference(%Array*) -declare void @__quantum__rt__qubit_release(%Qubit*) - declare void @__quantum__rt__array_unreference(%Array*) declare void @__quantum__rt__tuple_unreference(%Tuple*) +declare void @__quantum__rt__qubit_release(%Qubit*) + declare void @__quantum__rt__qubit_release_array(%Array*) attributes #0 = { "EntryPoint" } diff --git a/src/QirRuntime/test/QIR-tracer/qir-tracer-driver.cpp b/src/QirRuntime/test/QIR-tracer/qir-tracer-driver.cpp index eb39f89542d..e5aa08c4de4 100644 --- a/src/QirRuntime/test/QIR-tracer/qir-tracer-driver.cpp +++ b/src/QirRuntime/test/QIR-tracer/qir-tracer-driver.cpp @@ -23,7 +23,7 @@ namespace Quantum } // namespace Microsoft extern "C" bool Microsoft__Quantum__Testing__Tracer__AllIntrinsics__body(); // NOLINT -TEST_CASE("Test that we are building the new components correctly", "[skip]") +TEST_CASE("Test that we are building the new components correctly", "[qir-tracer]") { shared_ptr tr = CreateTracer(); SetSimulatorForQIR(tr.get()); From 4868131de4bf497c95b656c927647f5402b3ed00 Mon Sep 17 00:00:00 2001 From: Irina Yatsenko Date: Tue, 12 Jan 2021 15:07:35 -0800 Subject: [PATCH 09/27] Global barrier injection --- src/QirRuntime/lib/Tracer/bridge-trc.ll | 7 ++ src/QirRuntime/lib/Tracer/tracer-trc.cpp | 5 ++ src/QirRuntime/lib/Tracer/tracer.cpp | 55 ++++++++----- src/QirRuntime/lib/Tracer/tracer.hpp | 13 ++++ .../test/QIR-tracer/qir-test-intrinsics.qs | 78 ++++++++++--------- .../test/QIR-tracer/qir-tracer-driver.cpp | 2 +- .../test/QIR-tracer/qir-tracer-target.qs | 5 ++ src/QirRuntime/test/unittests/TracerTests.cpp | 40 +++++++++- 8 files changed, 144 insertions(+), 61 deletions(-) diff --git a/src/QirRuntime/lib/Tracer/bridge-trc.ll b/src/QirRuntime/lib/Tracer/bridge-trc.ll index 725a701979b..6c155643a7b 100644 --- a/src/QirRuntime/lib/Tracer/bridge-trc.ll +++ b/src/QirRuntime/lib/Tracer/bridge-trc.ll @@ -25,6 +25,7 @@ declare void @quantum__trc__single_qubit_op(i32 %id, i32 %duration, %class.QUBIT declare void @quantum__trc__single_qubit_op_ctl(i32 %id, i32 %duration, %struct.QirArray*, %class.QUBIT*) declare void @quantum__trc__multi_qubit_op(i32 %id, i32 %duration, %struct.QirArray*) declare void @quantum__trc__multi_qubit_op_ctl(i32 %id, i32 %duration, %struct.QirArray*, %struct.QirArray*) +declare void @quantum__trc__inject_global_barrier(i8* %name, i32 %duration) ;=============================================================================== ; quantum__trc namespace implementations @@ -58,3 +59,9 @@ define void @__quantum__trc__multi_qubit_op_ctl(i32 %id, i32 %duration, %Array* call void @quantum__trc__multi_qubit_op_ctl(i32 %id, i32 %duration, %struct.QirArray* %ctls, %struct.QirArray* %qs) ret void } + +define void @__quantum__trc__inject_global_barrier(i8* %name, i32 %duration) +{ + call void @quantum__trc__inject_global_barrier(i8* %name, i32 %duration) + ret void +} \ No newline at end of file diff --git a/src/QirRuntime/lib/Tracer/tracer-trc.cpp b/src/QirRuntime/lib/Tracer/tracer-trc.cpp index 5570bbd084b..f2473187a6e 100644 --- a/src/QirRuntime/lib/Tracer/tracer-trc.cpp +++ b/src/QirRuntime/lib/Tracer/tracer-trc.cpp @@ -51,4 +51,9 @@ extern "C" id, duration, ctls->count, reinterpret_cast(ctls->buffer), targets->count, reinterpret_cast(targets->buffer)); } + + void quantum__trc__inject_global_barrier(char* name, int32_t duration) // NOLINT + { + tracer->InjectGlobalBarrier(name, duration); + } } \ No newline at end of file diff --git a/src/QirRuntime/lib/Tracer/tracer.cpp b/src/QirRuntime/lib/Tracer/tracer.cpp index 1605d650b42..dc15c367650 100644 --- a/src/QirRuntime/lib/Tracer/tracer.cpp +++ b/src/QirRuntime/lib/Tracer/tracer.cpp @@ -31,6 +31,7 @@ namespace Quantum layerStartTime = lastLayer.startTime + lastLayer.duration; } this->metricsByLayer.push_back(Layer{max(this->preferredLayerDuration, opDuration), layerStartTime}); + return this->metricsByLayer.size() - 1; } @@ -60,6 +61,18 @@ namespace Quantum layerToInsertInto = 0; } + if (layerToInsertInto != INVALID && this->globalBarrier != INVALID) + { + if (this->globalBarrier + 1 == this->metricsByLayer.size()) + { + layerToInsertInto = INVALID; + } + else + { + layerToInsertInto = std::max(layerToInsertInto, this->globalBarrier + 1); + } + } + return layerToInsertInto; } @@ -100,31 +113,24 @@ namespace Quantum //------------------------------------------------------------------------------------------------------------------ void CTracer::TraceSingleQubitOp(OpId id, Duration opDuration, Qubit target) { - if (opDuration == 0) + QubitState& qstate = this->UseQubit(target); + if (opDuration == 0 && + (qstate.layer == INVALID || (this->globalBarrier != INVALID && qstate.layer < this->globalBarrier))) { - QubitState& qstate = this->UseQubit(target); - if (qstate.layer != INVALID) - { - this->AddOperationToLayer(id, qstate.layer); - } - else - { - qstate.pendingZeroOps.push_back(id); - } + qstate.pendingZeroOps.push_back(id); + return; } - else - { - // Figure out the layer this operation should go into. - LayerId layerToInsertInto = this->FindLayerToInsertOperationInto(target, opDuration); - if (layerToInsertInto == INVALID) - { - layerToInsertInto = this->CreateNewLayer(opDuration); - } - // Add the operation and the pending zero-duration ones into the layer. - this->AddOperationToLayer(id, layerToInsertInto); - this->UpdateQubitState(target, layerToInsertInto, opDuration); + // Figure out the layer this operation should go into. + LayerId layerToInsertInto = this->FindLayerToInsertOperationInto(target, opDuration); + if (layerToInsertInto == INVALID) + { + layerToInsertInto = this->CreateNewLayer(opDuration); } + + // Add the operation and the pending zero-duration ones into the layer. + this->AddOperationToLayer(id, layerToInsertInto); + this->UpdateQubitState(target, layerToInsertInto, opDuration); } //------------------------------------------------------------------------------------------------------------------ @@ -181,5 +187,12 @@ namespace Quantum this->UpdateQubitState(secondGroup[i], layerToInsertInto, opDuration); } } + + void CTracer::InjectGlobalBarrier(char* name, Duration duration) + { + LayerId layer = this->CreateNewLayer(duration); + this->metricsByLayer[layer].name = name; + this->globalBarrier = layer; + } } // namespace Quantum } // namespace Microsoft \ No newline at end of file diff --git a/src/QirRuntime/lib/Tracer/tracer.hpp b/src/QirRuntime/lib/Tracer/tracer.hpp index 7dae5ccb488..1fd0516e843 100644 --- a/src/QirRuntime/lib/Tracer/tracer.hpp +++ b/src/QirRuntime/lib/Tracer/tracer.hpp @@ -31,8 +31,12 @@ namespace Quantum // Start time of the layer. const Time startTime; + // Quantum operations, assigned to this layer. std::unordered_map operations; + // Optional layer's name (global barriers might provide it). + std::string name; + Layer(Duration duration, Time startTime) : duration(duration) , startTime(startTime) @@ -70,6 +74,10 @@ namespace Quantum // The index into the vector is treated as implicit id of the layer. std::vector metricsByLayer; + // The last global barrier, injected by the user. No new operations can be added to the barrier or to any of the + // layer that preceeded it, even if the new operations involve completely new qubits. + LayerId globalBarrier = INVALID; + private: QubitState& UseQubit(Qubit q) { @@ -168,6 +176,11 @@ namespace Quantum int64_t nSecondGroup, Qubit* secondGroup); + // ------------------------------------------------------------------------------------------------------------- + // Backing of the rest of the bridge methods. + // ------------------------------------------------------------------------------------------------------------- + void InjectGlobalBarrier(char* name, Duration duration); + // ------------------------------------------------------------------------------------------------------------- // Configuring the tracer and getting data back from it. // ------------------------------------------------------------------------------------------------------------- diff --git a/src/QirRuntime/test/QIR-tracer/qir-test-intrinsics.qs b/src/QirRuntime/test/QIR-tracer/qir-test-intrinsics.qs index 9491ea765bf..c05344c80f5 100644 --- a/src/QirRuntime/test/QIR-tracer/qir-test-intrinsics.qs +++ b/src/QirRuntime/test/QIR-tracer/qir-test-intrinsics.qs @@ -12,55 +12,57 @@ namespace Microsoft.Quantum.Testing.Tracer using (qs = Qubit[3]) { X(qs[0]); - // Y(qs[0]); - // Z(qs[1]); - // H(qs[1]); - // CNOT(qs[1], qs[2]); - // Rx(0.3, qs[0]); - // Ry(0.4, qs[1]); - // Rz(0.5, qs[2]); - // //SWAP(qs[0], qs[2]); - // S(qs[1]); - // T(qs[2]); + Y(qs[0]); + Z(qs[1]); + H(qs[1]); + CNOT(qs[1], qs[2]); + Rx(0.3, qs[0]); + Ry(0.4, qs[1]); + Rz(0.5, qs[2]); + //SWAP(qs[0], qs[2]); + S(qs[1]); + T(qs[2]); + + Barrier("foo", 1); Adjoint X(qs[0]); - // Adjoint Y(qs[0]); - // Adjoint Z(qs[1]); - // Adjoint H(qs[1]); - // Adjoint CNOT(qs[1], qs[2]); - // Adjoint Rx(0.3, qs[0]); - // Adjoint Ry(0.4, qs[1]); - // Adjoint Rz(0.5, qs[2]); - // //Adjoint SWAP(qs[0], qs[2]); - // Adjoint S(qs[1]); - // Adjoint T(qs[2]); + Adjoint Y(qs[0]); + Adjoint Z(qs[1]); + Adjoint H(qs[1]); + Adjoint CNOT(qs[1], qs[2]); + Adjoint Rx(0.3, qs[0]); + Adjoint Ry(0.4, qs[1]); + Adjoint Rz(0.5, qs[2]); + //Adjoint SWAP(qs[0], qs[2]); + Adjoint S(qs[1]); + Adjoint T(qs[2]); using (c = Qubit()) { Controlled X([c], (qs[0])); - // Controlled Y([c], (qs[0])); - // Controlled Z([c], (qs[1])); - // Controlled H([c], (qs[1])); - // Controlled Rx([c], (0.3, qs[0])); - // Controlled Ry([c], (0.4, qs[1])); - // Controlled Rz([c], (0.5, qs[2])); - // //Controlled SWAP([c], (qs[0], qs[2])); - // Controlled S([c], (qs[1])); - // Controlled T([c], (qs[2])); + Controlled Y([c], (qs[0])); + Controlled Z([c], (qs[1])); + Controlled H([c], (qs[1])); + Controlled Rx([c], (0.3, qs[0])); + Controlled Ry([c], (0.4, qs[1])); + Controlled Rz([c], (0.5, qs[2])); + //Controlled SWAP([c], (qs[0], qs[2])); + Controlled S([c], (qs[1])); + Controlled T([c], (qs[2])); } using (cc = Qubit[2]) { Controlled X(cc, (qs[0])); - // Controlled Y(cc, (qs[0])); - // Controlled Z(cc, (qs[1])); - // Controlled H(cc, (qs[1])); - // Controlled Rx(cc, (0.3, qs[0])); - // Controlled Ry(cc, (0.4, qs[1])); - // Controlled Rz(cc, (0.5, qs[2])); - // //Controlled SWAP(cc, (qs[0], qs[2])); - // Controlled S(cc, (qs[1])); - // Controlled T(cc, (qs[2])); + Controlled Y(cc, (qs[0])); + Controlled Z(cc, (qs[1])); + Controlled H(cc, (qs[1])); + Controlled Rx(cc, (0.3, qs[0])); + Controlled Ry(cc, (0.4, qs[1])); + Controlled Rz(cc, (0.5, qs[2])); + //Controlled SWAP(cc, (qs[0], qs[2])); + Controlled S(cc, (qs[1])); + Controlled T(cc, (qs[2])); } //set res = (M(qs[0]) == Measure([PauliY, PauliX], [qs[1], qs[2]])); diff --git a/src/QirRuntime/test/QIR-tracer/qir-tracer-driver.cpp b/src/QirRuntime/test/QIR-tracer/qir-tracer-driver.cpp index e5aa08c4de4..eb39f89542d 100644 --- a/src/QirRuntime/test/QIR-tracer/qir-tracer-driver.cpp +++ b/src/QirRuntime/test/QIR-tracer/qir-tracer-driver.cpp @@ -23,7 +23,7 @@ namespace Quantum } // namespace Microsoft extern "C" bool Microsoft__Quantum__Testing__Tracer__AllIntrinsics__body(); // NOLINT -TEST_CASE("Test that we are building the new components correctly", "[qir-tracer]") +TEST_CASE("Test that we are building the new components correctly", "[skip]") { shared_ptr tr = CreateTracer(); SetSimulatorForQIR(tr.get()); diff --git a/src/QirRuntime/test/QIR-tracer/qir-tracer-target.qs b/src/QirRuntime/test/QIR-tracer/qir-tracer-target.qs index c0d1fe4bec0..634c7642f2c 100644 --- a/src/QirRuntime/test/QIR-tracer/qir-tracer-target.qs +++ b/src/QirRuntime/test/QIR-tracer/qir-tracer-target.qs @@ -197,6 +197,11 @@ namespace Microsoft.Quantum.Intrinsic { controlled adjoint (ctls, ...) { Phys.single_qubit_op_ctl(25, 1, ctls, qb); } } + @TargetInstruction("inject_global_barrier") + operation Barrier(name : String, duration : Int) : Unit + { + body intrinsic; + } operation SWAP(a : Qubit, b : Qubit) : Unit is Adj { diff --git a/src/QirRuntime/test/unittests/TracerTests.cpp b/src/QirRuntime/test/unittests/TracerTests.cpp index 2e602b4f634..cacafe862ba 100644 --- a/src/QirRuntime/test/unittests/TracerTests.cpp +++ b/src/QirRuntime/test/unittests/TracerTests.cpp @@ -113,7 +113,7 @@ TEST_CASE("Layering distinct controlled single-qubit operations", "[tracer]") CHECK(ops1.find(10) != ops1.end()); } -// TODO: add controlled and multi-qubit ops +// TODO: add multi-qubit ops TEST_CASE("Operations with same id are counted together", "[tracer]") { shared_ptr tr = CreateTracer(); @@ -139,3 +139,41 @@ TEST_CASE("Operations with same id are counted together", "[tracer]") CHECK(ops.find(2)->second == 2); CHECK(ops.find(3)->second == 1); } + +TEST_CASE("Global barrier", "[tracer]") +{ + shared_ptr tr = CreateTracer(); + tr->SetPreferredLayerDuration(1); + + Qubit q1 = tr->AllocateQubit(); + Qubit q2 = tr->AllocateQubit(); + Qubit q3 = tr->AllocateQubit(); + + tr->TraceSingleQubitOp(1, 1, q1); // L(0,1) created + tr->InjectGlobalBarrier("foo", 1); // creates L(1,1) + + tr->TraceMultiQubitOp(2 /*id*/, 1 /*dur*/, 1 /*nFirst*/, &q2 /*first*/, 1 /*nSecond*/, &q3 /*second*/); + // the barrier shouldn't allow this op to fall through into L(0,1), so should create L(2,1) + + tr->TraceSingleQubitOp(3, 0, q1); + // the barrier shouldn't allow this op to fall through into L(0,1), so should create pending op + + tr->TraceSingleQubitOp(4, 1, q1); + // should be added into L(2,1) together with the pending op `3` + + const vector& layers = tr->UseLayers(); + REQUIRE(layers.size() == 3); + CHECK(layers[0].operations.size() == 1); + CHECK(layers[1].operations.size() == 0); + CHECK(layers[2].operations.size() == 3); + + const auto& ops0 = layers[0].operations; + CHECK(ops0.find(1) != ops0.end()); + + CHECK(std::string("foo") == layers[1].name); + + const auto& ops2 = layers[2].operations; + CHECK(ops2.find(2) != ops2.end()); + CHECK(ops2.find(3) != ops2.end()); + CHECK(ops2.find(4) != ops2.end()); +} From fa82c23b8bdbc72eb23e481877a6ead11e588f14 Mon Sep 17 00:00:00 2001 From: Irina Yatsenko Date: Thu, 14 Jan 2021 15:55:42 -0800 Subject: [PATCH 10/27] Layering of measurements --- src/QirRuntime/lib/Tracer/bridge-trc.ll | 9 ++ src/QirRuntime/lib/Tracer/tracer-trc.cpp | 20 ++-- src/QirRuntime/lib/Tracer/tracer.cpp | 85 +++++++++++++++-- src/QirRuntime/lib/Tracer/tracer.hpp | 60 +++++------- src/QirRuntime/test/unittests/TracerTests.cpp | 92 ++++++++++++------- 5 files changed, 179 insertions(+), 87 deletions(-) diff --git a/src/QirRuntime/lib/Tracer/bridge-trc.ll b/src/QirRuntime/lib/Tracer/bridge-trc.ll index 6c155643a7b..1155c973c46 100644 --- a/src/QirRuntime/lib/Tracer/bridge-trc.ll +++ b/src/QirRuntime/lib/Tracer/bridge-trc.ll @@ -26,6 +26,7 @@ declare void @quantum__trc__single_qubit_op_ctl(i32 %id, i32 %duration, %struct. declare void @quantum__trc__multi_qubit_op(i32 %id, i32 %duration, %struct.QirArray*) declare void @quantum__trc__multi_qubit_op_ctl(i32 %id, i32 %duration, %struct.QirArray*, %struct.QirArray*) declare void @quantum__trc__inject_global_barrier(i8* %name, i32 %duration) +declare %class.RESULT* @quantum__trc__single_qubit_measure(i32 %id, i32 %duration, %class.QUBIT* %.q) ;=============================================================================== ; quantum__trc namespace implementations @@ -64,4 +65,12 @@ define void @__quantum__trc__inject_global_barrier(i8* %name, i32 %duration) { call void @quantum__trc__inject_global_barrier(i8* %name, i32 %duration) ret void +} + +define %Result* @__quantum__trc__single_qubit_measure(i32 %id, i32 %duration, %Qubit* %.q) +{ + %q = bitcast %Qubit* %.q to %class.QUBIT* + %r = call %class.RESULT* @quantum__trc__single_qubit_measure(i32 %id, i32 %duration, %class.QUBIT* %q) + %.r = bitcast %class.RESULT* %r to %Result* + ret %Result* %.r } \ No newline at end of file diff --git a/src/QirRuntime/lib/Tracer/tracer-trc.cpp b/src/QirRuntime/lib/Tracer/tracer-trc.cpp index f2473187a6e..05e3fe1bb87 100644 --- a/src/QirRuntime/lib/Tracer/tracer-trc.cpp +++ b/src/QirRuntime/lib/Tracer/tracer-trc.cpp @@ -29,31 +29,33 @@ extern "C" { } - void quantum__trc__global_barrier(const char* name) // NOLINT - { - } - void quantum__trc__single_qubit_op(int32_t id, int32_t duration, Qubit target) // NOLINT { - tracer->TraceSingleQubitOp(id, duration, target); + (void)tracer->TraceSingleQubitOp(id, duration, target); } void quantum__trc__single_qubit_op_ctl(int32_t id, int32_t duration, QirArray* ctls, Qubit target) // NOLINT { - tracer->TraceMultiQubitOp(id, duration, ctls->count, reinterpret_cast(ctls->buffer), 1, &target); + (void)tracer->TraceMultiQubitOp(id, duration, ctls->count, reinterpret_cast(ctls->buffer), 1, &target); } void quantum__trc__multi_qubit_op(int32_t id, int32_t duration, QirArray* targets) // NOLINT { - tracer->TraceMultiQubitOp(id, duration, 0, nullptr, targets->count, reinterpret_cast(targets->buffer)); + (void)tracer->TraceMultiQubitOp( + id, duration, 0, nullptr, targets->count, reinterpret_cast(targets->buffer)); } void quantum__trc__multi_qubit_op_ctl(int32_t id, int32_t duration, QirArray* ctls, QirArray* targets) // NOLINT { - tracer->TraceMultiQubitOp( + (void)tracer->TraceMultiQubitOp( id, duration, ctls->count, reinterpret_cast(ctls->buffer), targets->count, reinterpret_cast(targets->buffer)); } void quantum__trc__inject_global_barrier(char* name, int32_t duration) // NOLINT { - tracer->InjectGlobalBarrier(name, duration); + (void)tracer->InjectGlobalBarrier(name, duration); + } + + RESULT* quantum__trc__single_qubit_measure(int32_t id, int32_t duration, QUBIT* q) // NOLINT + { + return tracer->TraceSingleQubitMeasurement(id, duration, q); } } \ No newline at end of file diff --git a/src/QirRuntime/lib/Tracer/tracer.cpp b/src/QirRuntime/lib/Tracer/tracer.cpp index dc15c367650..36ecd135622 100644 --- a/src/QirRuntime/lib/Tracer/tracer.cpp +++ b/src/QirRuntime/lib/Tracer/tracer.cpp @@ -2,6 +2,7 @@ // Licensed under the MIT License. #include +#include #include "tracer.hpp" @@ -18,6 +19,51 @@ namespace Quantum return tracer; } + //------------------------------------------------------------------------------------------------------------------ + // CTracer's ISumulator implementation + //------------------------------------------------------------------------------------------------------------------ + IQuantumGateSet* CTracer::AsQuantumGateSet() + { + return nullptr; + } + IDiagnostics* CTracer::AsDiagnostics() + { + return nullptr; + } + Qubit CTracer::AllocateQubit() + { + size_t qubit = qubits.size(); + qubits.push_back({}); + return reinterpret_cast(qubit); + } + void CTracer::ReleaseQubit(Qubit /*qubit*/) + { + // nothing for now + } + std::string CTracer::QubitToString(Qubit q) + { + size_t qubitIndex = reinterpret_cast(q); + const QubitState& qstate = this->UseQubit(q); + + stringstream str(qubitIndex); + str << " last used in layer " << qstate.layer << "(pending zero ops: " << qstate.pendingZeroOps.size() << ")"; + return str.str(); + } + void CTracer::ReleaseResult(Result /*result*/) + { + // nothing to do, we don't allocate results on measurement + } + // Although the tracer should never compare results or get their values, it still has to implement UseZero and + // UseOne methods as they are invoked by the QIR initialization. + Result CTracer::UseZero() + { + return reinterpret_cast(INVALID); + } + Result CTracer::UseOne() + { + return reinterpret_cast(INVALID); + } + //------------------------------------------------------------------------------------------------------------------ // CTracer::CreateNewLayer //------------------------------------------------------------------------------------------------------------------ @@ -50,9 +96,16 @@ namespace Quantum { layerToInsertInto = qstate.layer; } - else if (opDuration <= this->preferredLayerDuration && qstate.layer + 1 < this->metricsByLayer.size()) + else { - layerToInsertInto = qstate.layer + 1; + for (LayerId candidate = qstate.layer + 1; candidate < this->metricsByLayer.size(); candidate++) + { + if (opDuration <= this->metricsByLayer[candidate].duration) + { + layerToInsertInto = candidate; + break; + } + } } } else if (opDuration <= this->preferredLayerDuration && !this->metricsByLayer.empty()) @@ -111,14 +164,14 @@ namespace Quantum //------------------------------------------------------------------------------------------------------------------ // CTracer::TraceSingleQubitOp //------------------------------------------------------------------------------------------------------------------ - void CTracer::TraceSingleQubitOp(OpId id, Duration opDuration, Qubit target) + LayerId CTracer::TraceSingleQubitOp(OpId id, Duration opDuration, Qubit target) { QubitState& qstate = this->UseQubit(target); if (opDuration == 0 && (qstate.layer == INVALID || (this->globalBarrier != INVALID && qstate.layer < this->globalBarrier))) { qstate.pendingZeroOps.push_back(id); - return; + return INVALID; } // Figure out the layer this operation should go into. @@ -131,12 +184,14 @@ namespace Quantum // Add the operation and the pending zero-duration ones into the layer. this->AddOperationToLayer(id, layerToInsertInto); this->UpdateQubitState(target, layerToInsertInto, opDuration); + + return layerToInsertInto; } //------------------------------------------------------------------------------------------------------------------ // CTracer::TraceControlledSingleQubitOp //------------------------------------------------------------------------------------------------------------------ - void CTracer::TraceMultiQubitOp( + LayerId CTracer::TraceMultiQubitOp( OpId id, Duration opDuration, int64_t nFirstGroup, @@ -150,8 +205,7 @@ namespace Quantum // Operations that involve a single qubit can special case duration zero. if (nFirstGroup == 0 && nSecondGroup == 1) { - this->TraceSingleQubitOp(id, opDuration, secondGroup[0]); - return; + return this->TraceSingleQubitOp(id, opDuration, secondGroup[0]); } // Special-casing operations of duration zero enables potentially better reuse of qubits, when we'll start @@ -186,13 +240,28 @@ namespace Quantum { this->UpdateQubitState(secondGroup[i], layerToInsertInto, opDuration); } + + return layerToInsertInto; } - void CTracer::InjectGlobalBarrier(char* name, Duration duration) + LayerId CTracer::InjectGlobalBarrier(const char* name, Duration duration) { LayerId layer = this->CreateNewLayer(duration); this->metricsByLayer[layer].name = name; this->globalBarrier = layer; + return layer; + } + + Result CTracer::TraceSingleQubitMeasurement(OpId id, Duration duration, Qubit target) + { + LayerId layerId = this->TraceSingleQubitOp(id, duration, target); + return reinterpret_cast(layerId); + } + + Result CTracer::TraceMultiQubitMeasurement(OpId id, Duration duration, int64_t nTargets, Qubit* targets) + { + LayerId layerId = this->TraceMultiQubitOp(id, duration, 0, nullptr, nTargets, targets); + return reinterpret_cast(layerId); } } // namespace Quantum } // namespace Microsoft \ No newline at end of file diff --git a/src/QirRuntime/lib/Tracer/tracer.hpp b/src/QirRuntime/lib/Tracer/tracer.hpp index 1fd0516e843..fa7d0e6e3c4 100644 --- a/src/QirRuntime/lib/Tracer/tracer.hpp +++ b/src/QirRuntime/lib/Tracer/tracer.hpp @@ -108,28 +108,13 @@ namespace Quantum // ------------------------------------------------------------------------------------------------------------- // ISimulator interface // ------------------------------------------------------------------------------------------------------------- - IQuantumGateSet* AsQuantumGateSet() override - { - return nullptr; - } - IDiagnostics* AsDiagnostics() override - { - return nullptr; - } - Qubit AllocateQubit() override - { - size_t qubit = qubits.size(); - qubits.push_back({}); - return reinterpret_cast(qubit); - } - void ReleaseQubit(Qubit qubit) override - { - // nothing for now - } - std::string QubitToString(Qubit qubit) override - { - throw std::logic_error("not_implemented"); - } + IQuantumGateSet* AsQuantumGateSet() override; + IDiagnostics* AsDiagnostics() override; + Qubit AllocateQubit() override; + void ReleaseQubit(Qubit qubit) override; + std::string QubitToString(Qubit qubit) override; + void ReleaseResult(Result result) override; + Result M(Qubit target) override { throw std::logic_error("not_implemented"); @@ -138,26 +123,16 @@ namespace Quantum { throw std::logic_error("not_implemented"); } - void ReleaseResult(Result result) override - { - throw std::logic_error("not_implemented"); - } bool AreEqualResults(Result r1, Result r2) override { - throw std::logic_error("not_implemented"); + throw std::logic_error("Cannot compare results, when tracing!"); } ResultValue GetResultValue(Result result) override { - throw std::logic_error("not_implemented"); - } - Result UseZero() override - { - return reinterpret_cast(0); - } - Result UseOne() override - { - return reinterpret_cast(1); + throw std::logic_error("Result values aren't available, when tracing!"); } + Result UseZero() override; + Result UseOne() override; // ------------------------------------------------------------------------------------------------------------- // Instead of implementing IQuantumGateSet, the tracer provides 'tracing-by-id' methods. The QIR generation @@ -167,8 +142,8 @@ namespace Quantum // qubits into the same array. To avoid the copy, the tracer provides a method that takes two groups of qubits, // where the first one can be empty or can be viewed as the set of controls. // ------------------------------------------------------------------------------------------------------------- - void TraceSingleQubitOp(OpId id, Duration duration, Qubit target); - void TraceMultiQubitOp( + LayerId TraceSingleQubitOp(OpId id, Duration duration, Qubit target); + LayerId TraceMultiQubitOp( OpId id, Duration duration, int64_t nFirstGroup, @@ -176,10 +151,17 @@ namespace Quantum int64_t nSecondGroup, Qubit* secondGroup); + Result TraceSingleQubitMeasurement(OpId id, Duration duration, Qubit target); + Result TraceMultiQubitMeasurement(OpId id, Duration duration, int64_t nTargets, Qubit* targets); + LayerId GetLayerIdOfSourceMeasurement(Result r) const + { + return reinterpret_cast(r); + } + // ------------------------------------------------------------------------------------------------------------- // Backing of the rest of the bridge methods. // ------------------------------------------------------------------------------------------------------------- - void InjectGlobalBarrier(char* name, Duration duration); + LayerId InjectGlobalBarrier(const char* name, Duration duration); // ------------------------------------------------------------------------------------------------------------- // Configuring the tracer and getting data back from it. diff --git a/src/QirRuntime/test/unittests/TracerTests.cpp b/src/QirRuntime/test/unittests/TracerTests.cpp index cacafe862ba..dbaed14852b 100644 --- a/src/QirRuntime/test/unittests/TracerTests.cpp +++ b/src/QirRuntime/test/unittests/TracerTests.cpp @@ -18,23 +18,26 @@ TEST_CASE("Layering distinct single-qubit operations of non-zero durations", "[t Qubit q2 = tr->AllocateQubit(); Qubit q3 = tr->AllocateQubit(); - tr->TraceSingleQubitOp(1, 1, q1); // L(0,3) should be created - tr->TraceSingleQubitOp(2, 2, q1); // add the op into L(0,3) - tr->TraceSingleQubitOp(3, 1, q2); // add the op into L(0,3) - tr->TraceSingleQubitOp(4, 3, q2); // create new layer L(3,3) - tr->TraceSingleQubitOp(5, 4, q2); // create new layer L(6,4) - tr->TraceSingleQubitOp(6, 2, q1); // add the op into L(3,3) - tr->TraceSingleQubitOp(7, 1, q3); // add the op into L(0,3) + CHECK(0 == tr->TraceSingleQubitOp(1, 1, q1)); // L(0,3) should be created + CHECK(0 == tr->TraceSingleQubitOp(2, 2, q1)); // add the op into L(0,3) + CHECK(0 == tr->TraceSingleQubitOp(3, 1, q2)); // add the op into L(0,3) + CHECK(1 == tr->TraceSingleQubitOp(4, 3, q2)); // create new layer L(3,3) + CHECK(2 == tr->TraceSingleQubitOp(5, 4, q2)); // long op! create new layer L(6,4) + CHECK(1 == tr->TraceSingleQubitOp(6, 2, q1)); // add the op into L(3,3) + CHECK(0 == tr->TraceSingleQubitOp(7, 1, q3)); // add the op into L(0,3) + CHECK(2 == tr->TraceSingleQubitOp(8, 4, q3)); // long op! but fits into existing L(6,4) + CHECK(3 == tr->TraceSingleQubitOp(9, 5, q1)); // long op! add the op into L(10,5) const vector& layers = tr->UseLayers(); - REQUIRE(layers.size() == 3); + REQUIRE(layers.size() == 4); CHECK(layers[0].startTime == 0); CHECK(layers[0].operations.size() == 4); CHECK(layers[1].startTime == 3); CHECK(layers[1].operations.size() == 2); CHECK(layers[2].startTime == 6); - CHECK(layers[2].operations.size() == 1); -} + CHECK(layers[2].operations.size() == 2); + CHECK(layers[3].startTime == 10); + CHECK(layers[3].operations.size() == 1);} TEST_CASE("Layering single-qubit operations of zero duration", "[tracer]") { @@ -45,12 +48,12 @@ TEST_CASE("Layering single-qubit operations of zero duration", "[tracer]") Qubit q2 = tr->AllocateQubit(); Qubit q3 = tr->AllocateQubit(); - tr->TraceSingleQubitOp(1, 1, q1); // L(0,3) should be created - tr->TraceSingleQubitOp(2, 0, q1); // add the op into L(0,3) - tr->TraceSingleQubitOp(3, 0, q3); // pending zero op (will remain orphan) - tr->TraceSingleQubitOp(4, 0, q2); // pending zero op - tr->TraceSingleQubitOp(5, 0, q2); // another pending zero op - tr->TraceSingleQubitOp(6, 1, q2); // add the op into L(0,3) together with the pending ones + CHECK(0 == tr->TraceSingleQubitOp(1, 1, q1)); // L(0,3) should be created + CHECK(0 == tr->TraceSingleQubitOp(2, 0, q1)); // add the op into L(0,3) + CHECK(INVALID == tr->TraceSingleQubitOp(3, 0, q3)); // pending zero op (will remain orphan) + CHECK(INVALID == tr->TraceSingleQubitOp(4, 0, q2)); // pending zero op + CHECK(INVALID == tr->TraceSingleQubitOp(5, 0, q2)); // another pending zero op + CHECK(0 == tr->TraceSingleQubitOp(6, 1, q2)); // add the op into L(0,3) together with the pending ones const vector& layers = tr->UseLayers(); REQUIRE(layers.size() == 1); @@ -69,28 +72,28 @@ TEST_CASE("Layering distinct controlled single-qubit operations", "[tracer]") Qubit q5 = tr->AllocateQubit(); Qubit q6 = tr->AllocateQubit(); - tr->TraceMultiQubitOp(1 /*id*/, 1 /*dur*/, 1 /*nFirst*/, &q1 /*first*/, 1 /*nSecond*/, &q2 /*second*/); - tr->TraceMultiQubitOp(2 /*id*/, 2 /*dur*/, 0 /*nFirst*/, nullptr /*first*/, 1 /*nSecond*/, &q2 /*second*/); + CHECK(0 == tr->TraceMultiQubitOp(1, 1, 1 /*nFirst*/, &q1 /*first*/, 1 /*nSecond*/, &q2 /*second*/)); + CHECK(0 == tr->TraceMultiQubitOp(2, 2, 0 /*nFirst*/, nullptr /*first*/, 1 /*nSecond*/, &q2 /*second*/)); // q2 now is at the limit of the layer duration Qubit qs12[2] = {q1, q2}; - tr->TraceMultiQubitOp(3 /*id*/, 1 /*dur*/, 0 /*nFirst*/, nullptr /*first*/, 2 /*nSecond*/, qs12 /*second*/); - tr->TraceMultiQubitOp(4 /*id*/, 1 /*dur*/, 1 /*nFirst*/, &q2 /*first*/, 1 /*nSecond*/, &q3 /*second*/); + CHECK(1 == tr->TraceMultiQubitOp(3, 1, 0 /*nFirst*/, nullptr /*first*/, 2 /*nSecond*/, qs12 /*second*/)); + CHECK(1 == tr->TraceMultiQubitOp(4, 1, 1 /*nFirst*/, &q2 /*first*/, 1 /*nSecond*/, &q3 /*second*/)); // because of q2, both ops should have been added to a new layer, which now "catches" q1, q2, q3 - tr->TraceMultiQubitOp(5 /*id*/, 0 /*dur*/, 1 /*nFirst*/, &q4 /*first*/, 1 /*nSecond*/, &q5 /*second*/); - tr->TraceSingleQubitOp(6 /*id*/, 1 /*dur*/, q6); + CHECK(0 == tr->TraceMultiQubitOp(5, 0, 1 /*nFirst*/, &q4 /*first*/, 1 /*nSecond*/, &q5 /*second*/)); + CHECK(0 == tr->TraceSingleQubitOp(6, 1, q6)); // these ops should fall through into the first layer (notice no special handling of duration zero) - tr->TraceMultiQubitOp(7 /*id*/, 1 /*dur*/, 1 /*nFirst*/, &q1 /*first*/, 1 /*nSecond*/, &q6 /*second*/); - tr->TraceMultiQubitOp(8 /*id*/, 1 /*dur*/, 1 /*nFirst*/, &q3 /*first*/, 1 /*nSecond*/, &q4 /*second*/); + CHECK(1 == tr->TraceMultiQubitOp(7, 1, 1 /*nFirst*/, &q1 /*first*/, 1 /*nSecond*/, &q6 /*second*/)); + CHECK(1 == tr->TraceMultiQubitOp(8, 1, 1 /*nFirst*/, &q3 /*first*/, 1 /*nSecond*/, &q4 /*second*/)); // because of q1 and q3, thiese ops should be added into the second layer, which now has all but q5 - tr->TraceSingleQubitOp(9, 1, q5); + CHECK(0 == tr->TraceSingleQubitOp(9, 1, q5)); // should fall through to the first layer Qubit qs46[2] = {q4, q6}; - tr->TraceMultiQubitOp(10 /*id*/, 1 /*dur*/, 3 /*nFirst*/, &q3 /*first*/, 1 /*nSecond*/, &q5 /*second*/); + CHECK(1 == tr->TraceMultiQubitOp(10, 1, 3 /*nFirst*/, &q3 /*first*/, 1 /*nSecond*/, &q5 /*second*/)); // because of q4, should be added into the second layer const vector& layers = tr->UseLayers(); @@ -149,16 +152,16 @@ TEST_CASE("Global barrier", "[tracer]") Qubit q2 = tr->AllocateQubit(); Qubit q3 = tr->AllocateQubit(); - tr->TraceSingleQubitOp(1, 1, q1); // L(0,1) created - tr->InjectGlobalBarrier("foo", 1); // creates L(1,1) + CHECK(0 == tr->TraceSingleQubitOp(1, 1, q1)); // L(0,1) created + CHECK(1 == tr->InjectGlobalBarrier("foo", 1)); // creates L(1,1) - tr->TraceMultiQubitOp(2 /*id*/, 1 /*dur*/, 1 /*nFirst*/, &q2 /*first*/, 1 /*nSecond*/, &q3 /*second*/); + CHECK(2 == tr->TraceMultiQubitOp(2, 1, 1 /*nFirst*/, &q2 /*first*/, 1 /*nSecond*/, &q3 /*second*/)); // the barrier shouldn't allow this op to fall through into L(0,1), so should create L(2,1) - tr->TraceSingleQubitOp(3, 0, q1); + CHECK(INVALID == tr->TraceSingleQubitOp(3, 0, q1)); // the barrier shouldn't allow this op to fall through into L(0,1), so should create pending op - tr->TraceSingleQubitOp(4, 1, q1); + CHECK(2 == tr->TraceSingleQubitOp(4, 1, q1)); // should be added into L(2,1) together with the pending op `3` const vector& layers = tr->UseLayers(); @@ -177,3 +180,30 @@ TEST_CASE("Global barrier", "[tracer]") CHECK(ops2.find(3) != ops2.end()); CHECK(ops2.find(4) != ops2.end()); } + +// For layering purposes, measurements behave pretty much the same as other operations +TEST_CASE("Layering measurements", "[tracer]") +{ + shared_ptr tr = CreateTracer(); + tr->SetPreferredLayerDuration(1); + + Qubit q1 = tr->AllocateQubit(); + Qubit q2 = tr->AllocateQubit(); + Qubit q3 = tr->AllocateQubit(); + Qubit q4 = tr->AllocateQubit(); + + CHECK(0 == tr->GetLayerIdOfSourceMeasurement(tr->TraceSingleQubitMeasurement(1, 1, q1))); + Qubit qs12[2] = {q1, q2}; + CHECK(1 == tr->GetLayerIdOfSourceMeasurement(tr->TraceMultiQubitMeasurement(2, 1, 2, qs12))); + CHECK(0 == tr->TraceSingleQubitOp(3, 1, q4)); + CHECK(0 == tr->GetLayerIdOfSourceMeasurement(tr->TraceSingleQubitMeasurement(4, 1, q3))); + Qubit qs23[2] = {q2, q3}; + CHECK(2 == tr->GetLayerIdOfSourceMeasurement(tr->TraceMultiQubitMeasurement(5, 1, 2, qs23))); + CHECK(1 == tr->TraceSingleQubitOp(3, 1, q4)); + + const vector& layers = tr->UseLayers(); + REQUIRE(layers.size() == 3); + CHECK(layers[0].operations.size() == 3); + CHECK(layers[1].operations.size() == 2); + CHECK(layers[2].operations.size() == 1); +} \ No newline at end of file From cfb9a0982cc9e4cdeefdb3f6790bc4cb1d7d0fcd Mon Sep 17 00:00:00 2001 From: Irina Yatsenko Date: Thu, 14 Jan 2021 17:28:14 -0800 Subject: [PATCH 11/27] Simple Q# test to run against the tracer --- src/QirRuntime/lib/Tracer/bridge-trc.ll | 6 +- src/QirRuntime/lib/Tracer/tracer-trc.cpp | 4 +- src/QirRuntime/lib/Tracer/tracer.cpp | 4 +- src/QirRuntime/lib/Tracer/tracer.hpp | 6 +- src/QirRuntime/test/QIR-tracer/CMakeLists.txt | 2 +- src/QirRuntime/test/QIR-tracer/generate.py | 32 + .../test/QIR-tracer/qir-test-intrinsics.ll | 198 ---- .../test/QIR-tracer/qir-tracer-driver.cpp | 4 +- .../{qir-tracer-core.qs => tracer-core.qs} | 0 .../test/QIR-tracer/tracer-intrinsics.ll | 848 ++++++++++++++++++ ...est-intrinsics.qs => tracer-intrinsics.qs} | 17 +- ...{qir-tracer-target.qs => tracer-target.qs} | 3 +- src/QirRuntime/test/unittests/TracerTests.cpp | 4 +- 13 files changed, 913 insertions(+), 215 deletions(-) create mode 100644 src/QirRuntime/test/QIR-tracer/generate.py delete mode 100644 src/QirRuntime/test/QIR-tracer/qir-test-intrinsics.ll rename src/QirRuntime/test/QIR-tracer/{qir-tracer-core.qs => tracer-core.qs} (100%) create mode 100644 src/QirRuntime/test/QIR-tracer/tracer-intrinsics.ll rename src/QirRuntime/test/QIR-tracer/{qir-test-intrinsics.qs => tracer-intrinsics.qs} (84%) rename src/QirRuntime/test/QIR-tracer/{qir-tracer-target.qs => tracer-target.qs} (98%) diff --git a/src/QirRuntime/lib/Tracer/bridge-trc.ll b/src/QirRuntime/lib/Tracer/bridge-trc.ll index 1155c973c46..081f9d03372 100644 --- a/src/QirRuntime/lib/Tracer/bridge-trc.ll +++ b/src/QirRuntime/lib/Tracer/bridge-trc.ll @@ -25,7 +25,7 @@ declare void @quantum__trc__single_qubit_op(i32 %id, i32 %duration, %class.QUBIT declare void @quantum__trc__single_qubit_op_ctl(i32 %id, i32 %duration, %struct.QirArray*, %class.QUBIT*) declare void @quantum__trc__multi_qubit_op(i32 %id, i32 %duration, %struct.QirArray*) declare void @quantum__trc__multi_qubit_op_ctl(i32 %id, i32 %duration, %struct.QirArray*, %struct.QirArray*) -declare void @quantum__trc__inject_global_barrier(i8* %name, i32 %duration) +declare void @quantum__trc__inject_global_barrier(i32 %id, i32 %duration) declare %class.RESULT* @quantum__trc__single_qubit_measure(i32 %id, i32 %duration, %class.QUBIT* %.q) ;=============================================================================== @@ -61,9 +61,9 @@ define void @__quantum__trc__multi_qubit_op_ctl(i32 %id, i32 %duration, %Array* ret void } -define void @__quantum__trc__inject_global_barrier(i8* %name, i32 %duration) +define void @__quantum__trc__inject_global_barrier(i32 %id, i32 %duration) { - call void @quantum__trc__inject_global_barrier(i8* %name, i32 %duration) + call void @quantum__trc__inject_global_barrier(i32 %id, i32 %duration) ret void } diff --git a/src/QirRuntime/lib/Tracer/tracer-trc.cpp b/src/QirRuntime/lib/Tracer/tracer-trc.cpp index 05e3fe1bb87..574b45292f0 100644 --- a/src/QirRuntime/lib/Tracer/tracer-trc.cpp +++ b/src/QirRuntime/lib/Tracer/tracer-trc.cpp @@ -49,9 +49,9 @@ extern "C" reinterpret_cast(targets->buffer)); } - void quantum__trc__inject_global_barrier(char* name, int32_t duration) // NOLINT + void quantum__trc__inject_global_barrier(int32_t id, int32_t duration) // NOLINT { - (void)tracer->InjectGlobalBarrier(name, duration); + (void)tracer->InjectGlobalBarrier(id, duration); } RESULT* quantum__trc__single_qubit_measure(int32_t id, int32_t duration, QUBIT* q) // NOLINT diff --git a/src/QirRuntime/lib/Tracer/tracer.cpp b/src/QirRuntime/lib/Tracer/tracer.cpp index 36ecd135622..35b077d0302 100644 --- a/src/QirRuntime/lib/Tracer/tracer.cpp +++ b/src/QirRuntime/lib/Tracer/tracer.cpp @@ -244,10 +244,10 @@ namespace Quantum return layerToInsertInto; } - LayerId CTracer::InjectGlobalBarrier(const char* name, Duration duration) + LayerId CTracer::InjectGlobalBarrier(OpId id, Duration duration) { LayerId layer = this->CreateNewLayer(duration); - this->metricsByLayer[layer].name = name; + this->metricsByLayer[layer].barrierId = id; this->globalBarrier = layer; return layer; } diff --git a/src/QirRuntime/lib/Tracer/tracer.hpp b/src/QirRuntime/lib/Tracer/tracer.hpp index fa7d0e6e3c4..0271ad57c42 100644 --- a/src/QirRuntime/lib/Tracer/tracer.hpp +++ b/src/QirRuntime/lib/Tracer/tracer.hpp @@ -34,8 +34,8 @@ namespace Quantum // Quantum operations, assigned to this layer. std::unordered_map operations; - // Optional layer's name (global barriers might provide it). - std::string name; + // Optional id, if the layer represents a global barrier. + OpId barrierId = -1; Layer(Duration duration, Time startTime) : duration(duration) @@ -161,7 +161,7 @@ namespace Quantum // ------------------------------------------------------------------------------------------------------------- // Backing of the rest of the bridge methods. // ------------------------------------------------------------------------------------------------------------- - LayerId InjectGlobalBarrier(const char* name, Duration duration); + LayerId InjectGlobalBarrier(OpId id, Duration duration); // ------------------------------------------------------------------------------------------------------------- // Configuring the tracer and getting data back from it. diff --git a/src/QirRuntime/test/QIR-tracer/CMakeLists.txt b/src/QirRuntime/test/QIR-tracer/CMakeLists.txt index 852dc3374b6..092ab2d250d 100644 --- a/src/QirRuntime/test/QIR-tracer/CMakeLists.txt +++ b/src/QirRuntime/test/QIR-tracer/CMakeLists.txt @@ -1,5 +1,5 @@ set(TEST_FILES - qir-test-intrinsics + tracer-intrinsics ) foreach(file ${TEST_FILES}) diff --git a/src/QirRuntime/test/QIR-tracer/generate.py b/src/QirRuntime/test/QIR-tracer/generate.py new file mode 100644 index 00000000000..e1e5828a44f --- /dev/null +++ b/src/QirRuntime/test/QIR-tracer/generate.py @@ -0,0 +1,32 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. + +import sys, os, platform, subprocess, datetime, shutil + +# ============================================================================= +# Generates QIR files for all *.qs files in this folder +# Accepts arguments: +# path to qsc.exe (absolute or rely on Path env) +# +# For example: "generate.py qsc.exe" +# ============================================================================= + +# ============================================================================= +def log(message): + now = datetime.datetime.now() + current_time = now.strftime("%H:%M:%S") + print(current_time + ": " + message) +# ============================================================================= + +root_dir = os.path.dirname(os.path.abspath(__file__)) + +# parameters +qsc = sys.argv[1] # argv[0] is the name of this script file + +for file in os.listdir(root_dir): + (file_name, ext) = os.path.splitext(file) + if ext == ".qs" and file_name != "tracer-core" and file_name != "tracer-target": + log("Generating QIR from " + file) + subprocess.run( + qsc + " build --qir s --build-exe --input " + file + + " tracer-core.qs tracer-target.qs --proj " + file_name, shell = True) diff --git a/src/QirRuntime/test/QIR-tracer/qir-test-intrinsics.ll b/src/QirRuntime/test/QIR-tracer/qir-test-intrinsics.ll deleted file mode 100644 index b95a35bf1dc..00000000000 --- a/src/QirRuntime/test/QIR-tracer/qir-test-intrinsics.ll +++ /dev/null @@ -1,198 +0,0 @@ - -%Result = type opaque -%Range = type { i64, i64, i64 } -%Qubit = type opaque -%Array = type opaque -%Tuple = type opaque - -@ResultZero = external global %Result* -@ResultOne = external global %Result* -@PauliI = constant i2 0 -@PauliX = constant i2 1 -@PauliY = constant i2 -1 -@PauliZ = constant i2 -2 -@EmptyRange = internal constant %Range { i64 0, i64 1, i64 -1 } - -@Microsoft_Quantum_Testing_Tracer_AllIntrinsics = alias i1 (), i1 ()* @Microsoft__Quantum__Testing__Tracer__AllIntrinsics__body - -define void @Microsoft__Quantum__Intrinsic__X__body(%Qubit* %qb) { -entry: - call void @__quantum__trc__single_qubit_op(i64 0, i64 1, %Qubit* %qb) - ret void -} - -declare void @__quantum__trc__single_qubit_op(i64, i64, %Qubit*) - -define void @Microsoft__Quantum__Intrinsic__X__adj(%Qubit* %qb) { -entry: - call void @__quantum__trc__single_qubit_op(i64 0, i64 1, %Qubit* %qb) - ret void -} - -define void @Microsoft__Quantum__Intrinsic__X__ctl(%Array* %ctls, %Qubit* %qb) { -entry: -; call void @__quantum__rt__array_add_access(%Array* %ctls) - %0 = call i64 @__quantum__rt__array_get_length(%Array* %ctls, i32 0) - %1 = icmp eq i64 %0, 1 - br i1 %1, label %then0__1, label %else__1 - -then0__1: ; preds = %entry - call void @__quantum__trc__single_qubit_op_ctl(i64 1, i64 1, %Array* %ctls, %Qubit* %qb) - br label %continue__1 - -else__1: ; preds = %entry - call void @__quantum__trc__single_qubit_op_ctl(i64 2, i64 1, %Array* %ctls, %Qubit* %qb) - br label %continue__1 - -continue__1: ; preds = %else__1, %then0__1 -; call void @__quantum__rt__array_remove_access(%Array* %ctls) - ret void -} - -declare void @__quantum__rt__array_add_access(%Array*) - -declare i64 @__quantum__rt__array_get_length(%Array*, i32) - -declare void @__quantum__trc__single_qubit_op_ctl(i64, i64, %Array*, %Qubit*) - -declare void @__quantum__rt__array_remove_access(%Array*) - -define void @Microsoft__Quantum__Intrinsic__X__ctladj(%Array* %ctls, %Qubit* %qb) { -entry: -; call void @__quantum__rt__array_add_access(%Array* %ctls) - %0 = call i64 @__quantum__rt__array_get_length(%Array* %ctls, i32 0) - %1 = icmp eq i64 %0, 1 - br i1 %1, label %then0__1, label %else__1 - -then0__1: ; preds = %entry - call void @__quantum__trc__single_qubit_op_ctl(i64 1, i64 1, %Array* %ctls, %Qubit* %qb) - br label %continue__1 - -else__1: ; preds = %entry - call void @__quantum__trc__single_qubit_op_ctl(i64 2, i64 1, %Array* %ctls, %Qubit* %qb) - br label %continue__1 - -continue__1: ; preds = %else__1, %then0__1 -; call void @__quantum__rt__array_remove_access(%Array* %ctls) - ret void -} - -define i1 @Microsoft__Quantum__Testing__Tracer__AllIntrinsics__body() #0 { -entry: - %res = alloca i1 - store i1 false, i1* %res - %qs = call %Array* @__quantum__rt__qubit_allocate_array(i64 3) -; call void @__quantum__rt__array_add_access(%Array* %qs) - %0 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 0) - %1 = bitcast i8* %0 to %Qubit** - %qb__inline__1 = load %Qubit*, %Qubit** %1 - call void @__quantum__trc__single_qubit_op(i64 0, i64 1, %Qubit* %qb__inline__1) - %2 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 0) - %3 = bitcast i8* %2 to %Qubit** - %qb__inline__2 = load %Qubit*, %Qubit** %3 - call void @__quantum__trc__single_qubit_op(i64 0, i64 1, %Qubit* %qb__inline__2) - %c = call %Qubit* @__quantum__rt__qubit_allocate() - %4 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) - %5 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %4, i64 0) - %6 = bitcast i8* %5 to %Qubit** - store %Qubit* %c, %Qubit** %6 - %7 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 0) - %8 = bitcast i8* %7 to %Qubit** - %9 = load %Qubit*, %Qubit** %8 - %10 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %11 = bitcast %Tuple* %10 to { %Array*, %Qubit* }* - %12 = getelementptr { %Array*, %Qubit* }, { %Array*, %Qubit* }* %11, i64 0, i32 0 - %13 = getelementptr { %Array*, %Qubit* }, { %Array*, %Qubit* }* %11, i64 0, i32 1 - store %Array* %4, %Array** %12 - call void @__quantum__rt__array_reference(%Array* %4) - store %Qubit* %9, %Qubit** %13 - %14 = getelementptr { %Array*, %Qubit* }, { %Array*, %Qubit* }* %11, i64 0, i32 0 - %ctls__inline__3 = load %Array*, %Array** %14 -; call void @__quantum__rt__array_add_access(%Array* %ctls__inline__3) - %15 = getelementptr { %Array*, %Qubit* }, { %Array*, %Qubit* }* %11, i64 0, i32 1 - %qb__inline__3 = load %Qubit*, %Qubit** %15 - %16 = call i64 @__quantum__rt__array_get_length(%Array* %ctls__inline__3, i32 0) - %17 = icmp eq i64 %16, 1 - br i1 %17, label %then0__1, label %else__1 - -then0__1: ; preds = %entry - call void @__quantum__trc__single_qubit_op_ctl(i64 1, i64 1, %Array* %ctls__inline__3, %Qubit* %qb__inline__3) - br label %continue__1 - -else__1: ; preds = %entry - call void @__quantum__trc__single_qubit_op_ctl(i64 2, i64 1, %Array* %ctls__inline__3, %Qubit* %qb__inline__3) - br label %continue__1 - -continue__1: ; preds = %else__1, %then0__1 -; call void @__quantum__rt__array_remove_access(%Array* %ctls__inline__3) - call void @__quantum__rt__array_unreference(%Array* %4) - %18 = getelementptr { %Array*, %Qubit* }, { %Array*, %Qubit* }* %11, i64 0, i32 0 - %19 = load %Array*, %Array** %18 - call void @__quantum__rt__array_unreference(%Array* %19) - call void @__quantum__rt__tuple_unreference(%Tuple* %10) - call void @__quantum__rt__qubit_release(%Qubit* %c) - %cc = call %Array* @__quantum__rt__qubit_allocate_array(i64 2) -; call void @__quantum__rt__array_add_access(%Array* %cc) - %20 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 0) - %21 = bitcast i8* %20 to %Qubit** - %22 = load %Qubit*, %Qubit** %21 - %23 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %24 = bitcast %Tuple* %23 to { %Array*, %Qubit* }* - %25 = getelementptr { %Array*, %Qubit* }, { %Array*, %Qubit* }* %24, i64 0, i32 0 - %26 = getelementptr { %Array*, %Qubit* }, { %Array*, %Qubit* }* %24, i64 0, i32 1 - store %Array* %cc, %Array** %25 - call void @__quantum__rt__array_reference(%Array* %cc) - store %Qubit* %22, %Qubit** %26 - %27 = getelementptr { %Array*, %Qubit* }, { %Array*, %Qubit* }* %24, i64 0, i32 0 - %ctls__inline__4 = load %Array*, %Array** %27 -; call void @__quantum__rt__array_add_access(%Array* %ctls__inline__4) - %28 = getelementptr { %Array*, %Qubit* }, { %Array*, %Qubit* }* %24, i64 0, i32 1 - %qb__inline__4 = load %Qubit*, %Qubit** %28 - %29 = call i64 @__quantum__rt__array_get_length(%Array* %ctls__inline__4, i32 0) - %30 = icmp eq i64 %29, 1 - br i1 %30, label %then0__2, label %else__2 - -then0__2: ; preds = %continue__1 - call void @__quantum__trc__single_qubit_op_ctl(i64 1, i64 1, %Array* %ctls__inline__4, %Qubit* %qb__inline__4) - br label %continue__2 - -else__2: ; preds = %continue__1 - call void @__quantum__trc__single_qubit_op_ctl(i64 2, i64 1, %Array* %ctls__inline__4, %Qubit* %qb__inline__4) - br label %continue__2 - -continue__2: ; preds = %else__2, %then0__2 -; call void @__quantum__rt__array_remove_access(%Array* %ctls__inline__4) - %31 = getelementptr { %Array*, %Qubit* }, { %Array*, %Qubit* }* %24, i64 0, i32 0 - %32 = load %Array*, %Array** %31 - call void @__quantum__rt__array_unreference(%Array* %32) - call void @__quantum__rt__tuple_unreference(%Tuple* %23) - call void @__quantum__rt__qubit_release_array(%Array* %cc) -; call void @__quantum__rt__array_remove_access(%Array* %cc) - call void @__quantum__rt__array_unreference(%Array* %cc) - call void @__quantum__rt__qubit_release_array(%Array* %qs) -; call void @__quantum__rt__array_remove_access(%Array* %qs) - call void @__quantum__rt__array_unreference(%Array* %qs) - ret i1 true -} - -declare %Qubit* @__quantum__rt__qubit_allocate() - -declare %Array* @__quantum__rt__qubit_allocate_array(i64) - -declare i8* @__quantum__rt__array_get_element_ptr_1d(%Array*, i64) - -declare %Array* @__quantum__rt__array_create_1d(i32, i64) - -declare %Tuple* @__quantum__rt__tuple_create(i64) - -declare void @__quantum__rt__array_reference(%Array*) - -declare void @__quantum__rt__array_unreference(%Array*) - -declare void @__quantum__rt__tuple_unreference(%Tuple*) - -declare void @__quantum__rt__qubit_release(%Qubit*) - -declare void @__quantum__rt__qubit_release_array(%Array*) - -attributes #0 = { "EntryPoint" } diff --git a/src/QirRuntime/test/QIR-tracer/qir-tracer-driver.cpp b/src/QirRuntime/test/QIR-tracer/qir-tracer-driver.cpp index eb39f89542d..ae12e0aef4b 100644 --- a/src/QirRuntime/test/QIR-tracer/qir-tracer-driver.cpp +++ b/src/QirRuntime/test/QIR-tracer/qir-tracer-driver.cpp @@ -23,10 +23,12 @@ namespace Quantum } // namespace Microsoft extern "C" bool Microsoft__Quantum__Testing__Tracer__AllIntrinsics__body(); // NOLINT -TEST_CASE("Test that we are building the new components correctly", "[skip]") +TEST_CASE("Test that we are building the new components correctly", "[qir-tracer]") { shared_ptr tr = CreateTracer(); SetSimulatorForQIR(tr.get()); REQUIRE(Microsoft__Quantum__Testing__Tracer__AllIntrinsics__body()); + vector layers = tr->UseLayers(); + CHECK(layers.size() > 0); } diff --git a/src/QirRuntime/test/QIR-tracer/qir-tracer-core.qs b/src/QirRuntime/test/QIR-tracer/tracer-core.qs similarity index 100% rename from src/QirRuntime/test/QIR-tracer/qir-tracer-core.qs rename to src/QirRuntime/test/QIR-tracer/tracer-core.qs diff --git a/src/QirRuntime/test/QIR-tracer/tracer-intrinsics.ll b/src/QirRuntime/test/QIR-tracer/tracer-intrinsics.ll new file mode 100644 index 00000000000..31d533b0185 --- /dev/null +++ b/src/QirRuntime/test/QIR-tracer/tracer-intrinsics.ll @@ -0,0 +1,848 @@ + +%Result = type opaque +%Range = type { i64, i64, i64 } +%Qubit = type opaque +%Array = type opaque + +@ResultZero = external global %Result* +@ResultOne = external global %Result* +@PauliI = constant i2 0 +@PauliX = constant i2 1 +@PauliY = constant i2 -1 +@PauliZ = constant i2 -2 +@EmptyRange = internal constant %Range { i64 0, i64 1, i64 -1 } + +@Microsoft__Quantum__Testing__Tracer__AllIntrinsics = alias i1 (), i1 ()* @Microsoft__Quantum__Testing__Tracer__AllIntrinsics__body + +define void @Microsoft__Quantum__Intrinsic__Sz__body(%Qubit* %qb) { +entry: + call void @__quantum__trc__single_qubit_op(i64 15, i64 1, %Qubit* %qb) + ret void +} + +declare void @__quantum__trc__single_qubit_op(i64, i64, %Qubit*) + +define void @Microsoft__Quantum__Intrinsic__Sz__adj(%Qubit* %qb) { +entry: + call void @__quantum__trc__single_qubit_op(i64 15, i64 1, %Qubit* %qb) + ret void +} + +define void @Microsoft__Quantum__Intrinsic__Sz__ctl(%Array* %ctls, %Qubit* %qb) { +entry: + call void @__quantum__rt__array_add_access(%Array* %ctls) + call void @__quantum__trc__single_qubit_op_ctl(i64 16, i64 1, %Array* %ctls, %Qubit* %qb) + call void @__quantum__rt__array_remove_access(%Array* %ctls) + ret void +} + +declare void @__quantum__rt__array_add_access(%Array*) + +declare void @__quantum__trc__single_qubit_op_ctl(i64, i64, %Array*, %Qubit*) + +declare void @__quantum__rt__array_remove_access(%Array*) + +define void @Microsoft__Quantum__Intrinsic__Sz__ctladj(%Array* %ctls, %Qubit* %qb) { +entry: + call void @__quantum__rt__array_add_access(%Array* %ctls) + call void @__quantum__trc__single_qubit_op_ctl(i64 16, i64 1, %Array* %ctls, %Qubit* %qb) + call void @__quantum__rt__array_remove_access(%Array* %ctls) + ret void +} + +define void @Microsoft__Quantum__Intrinsic__Rx__body(double %theta, %Qubit* %qb) { +entry: + call void @__quantum__trc__single_qubit_op(i64 19, i64 1, %Qubit* %qb) + ret void +} + +define void @Microsoft__Quantum__Intrinsic__Rx__adj(double %theta, %Qubit* %qb) { +entry: + call void @__quantum__trc__single_qubit_op(i64 19, i64 1, %Qubit* %qb) + ret void +} + +define void @Microsoft__Quantum__Intrinsic__Rx__ctl(%Array* %ctls, { double, %Qubit* }* %0) { +entry: + call void @__quantum__rt__array_add_access(%Array* %ctls) + %1 = getelementptr { double, %Qubit* }, { double, %Qubit* }* %0, i64 0, i32 0 + %theta = load double, double* %1 + %2 = getelementptr { double, %Qubit* }, { double, %Qubit* }* %0, i64 0, i32 1 + %qb = load %Qubit*, %Qubit** %2 + call void @__quantum__trc__single_qubit_op_ctl(i64 20, i64 1, %Array* %ctls, %Qubit* %qb) + call void @__quantum__rt__array_remove_access(%Array* %ctls) + ret void +} + +define void @Microsoft__Quantum__Intrinsic__Rx__ctladj(%Array* %ctls, { double, %Qubit* }* %0) { +entry: + call void @__quantum__rt__array_add_access(%Array* %ctls) + %1 = getelementptr { double, %Qubit* }, { double, %Qubit* }* %0, i64 0, i32 0 + %theta = load double, double* %1 + %2 = getelementptr { double, %Qubit* }, { double, %Qubit* }* %0, i64 0, i32 1 + %qb = load %Qubit*, %Qubit** %2 + call void @__quantum__trc__single_qubit_op_ctl(i64 20, i64 1, %Array* %ctls, %Qubit* %qb) + call void @__quantum__rt__array_remove_access(%Array* %ctls) + ret void +} + +define void @Microsoft__Quantum__Intrinsic__Z__body(%Qubit* %qb) { +entry: + call void @__quantum__trc__single_qubit_op(i64 6, i64 1, %Qubit* %qb) + ret void +} + +define void @Microsoft__Quantum__Intrinsic__Z__adj(%Qubit* %qb) { +entry: + call void @__quantum__trc__single_qubit_op(i64 6, i64 1, %Qubit* %qb) + ret void +} + +define void @Microsoft__Quantum__Intrinsic__Z__ctl(%Array* %ctls, %Qubit* %qb) { +entry: + call void @__quantum__rt__array_add_access(%Array* %ctls) + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %ctls) + %1 = icmp eq i64 %0, 1 + br i1 %1, label %then0__1, label %else__1 + +then0__1: ; preds = %entry + call void @__quantum__trc__single_qubit_op_ctl(i64 7, i64 1, %Array* %ctls, %Qubit* %qb) + br label %continue__1 + +else__1: ; preds = %entry + call void @__quantum__trc__single_qubit_op_ctl(i64 8, i64 1, %Array* %ctls, %Qubit* %qb) + br label %continue__1 + +continue__1: ; preds = %else__1, %then0__1 + call void @__quantum__rt__array_remove_access(%Array* %ctls) + ret void +} + +declare i64 @__quantum__rt__array_get_size_1d(%Array*) + +define void @Microsoft__Quantum__Intrinsic__Z__ctladj(%Array* %ctls, %Qubit* %qb) { +entry: + call void @__quantum__rt__array_add_access(%Array* %ctls) + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %ctls) + %1 = icmp eq i64 %0, 1 + br i1 %1, label %then0__1, label %else__1 + +then0__1: ; preds = %entry + call void @__quantum__trc__single_qubit_op_ctl(i64 7, i64 1, %Array* %ctls, %Qubit* %qb) + br label %continue__1 + +else__1: ; preds = %entry + call void @__quantum__trc__single_qubit_op_ctl(i64 8, i64 1, %Array* %ctls, %Qubit* %qb) + br label %continue__1 + +continue__1: ; preds = %else__1, %then0__1 + call void @__quantum__rt__array_remove_access(%Array* %ctls) + ret void +} + +define void @Microsoft__Quantum__Intrinsic__X__body(%Qubit* %qb) { +entry: + call void @__quantum__trc__single_qubit_op(i64 0, i64 1, %Qubit* %qb) + ret void +} + +define void @Microsoft__Quantum__Intrinsic__X__adj(%Qubit* %qb) { +entry: + call void @__quantum__trc__single_qubit_op(i64 0, i64 1, %Qubit* %qb) + ret void +} + +define void @Microsoft__Quantum__Intrinsic__X__ctl(%Array* %ctls, %Qubit* %qb) { +entry: + call void @__quantum__rt__array_add_access(%Array* %ctls) + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %ctls) + %1 = icmp eq i64 %0, 1 + br i1 %1, label %then0__1, label %else__1 + +then0__1: ; preds = %entry + call void @__quantum__trc__single_qubit_op_ctl(i64 1, i64 1, %Array* %ctls, %Qubit* %qb) + br label %continue__1 + +else__1: ; preds = %entry + call void @__quantum__trc__single_qubit_op_ctl(i64 2, i64 1, %Array* %ctls, %Qubit* %qb) + br label %continue__1 + +continue__1: ; preds = %else__1, %then0__1 + call void @__quantum__rt__array_remove_access(%Array* %ctls) + ret void +} + +define void @Microsoft__Quantum__Intrinsic__X__ctladj(%Array* %ctls, %Qubit* %qb) { +entry: + call void @__quantum__rt__array_add_access(%Array* %ctls) + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %ctls) + %1 = icmp eq i64 %0, 1 + br i1 %1, label %then0__1, label %else__1 + +then0__1: ; preds = %entry + call void @__quantum__trc__single_qubit_op_ctl(i64 1, i64 1, %Array* %ctls, %Qubit* %qb) + br label %continue__1 + +else__1: ; preds = %entry + call void @__quantum__trc__single_qubit_op_ctl(i64 2, i64 1, %Array* %ctls, %Qubit* %qb) + br label %continue__1 + +continue__1: ; preds = %else__1, %then0__1 + call void @__quantum__rt__array_remove_access(%Array* %ctls) + ret void +} + +define void @Microsoft__Quantum__Intrinsic__S__body(%Qubit* %qb) { +entry: + call void @__quantum__trc__single_qubit_op(i64 15, i64 1, %Qubit* %qb) + ret void +} + +define void @Microsoft__Quantum__Intrinsic__S__adj(%Qubit* %qb) { +entry: + call void @__quantum__trc__single_qubit_op(i64 15, i64 1, %Qubit* %qb) + ret void +} + +define void @Microsoft__Quantum__Intrinsic__S__ctl(%Array* %ctls, %Qubit* %qb) { +entry: + call void @__quantum__rt__array_add_access(%Array* %ctls) + call void @__quantum__rt__array_add_access(%Array* %ctls) + call void @__quantum__trc__single_qubit_op_ctl(i64 16, i64 1, %Array* %ctls, %Qubit* %qb) + call void @__quantum__rt__array_remove_access(%Array* %ctls) + call void @__quantum__rt__array_remove_access(%Array* %ctls) + ret void +} + +define void @Microsoft__Quantum__Intrinsic__S__ctladj(%Array* %ctls, %Qubit* %qb) { +entry: + call void @__quantum__rt__array_add_access(%Array* %ctls) + call void @__quantum__rt__array_add_access(%Array* %ctls) + call void @__quantum__trc__single_qubit_op_ctl(i64 16, i64 1, %Array* %ctls, %Qubit* %qb) + call void @__quantum__rt__array_remove_access(%Array* %ctls) + call void @__quantum__rt__array_remove_access(%Array* %ctls) + ret void +} + +define void @Microsoft__Quantum__Intrinsic__H__body(%Qubit* %qb) { +entry: + call void @__quantum__trc__single_qubit_op(i64 9, i64 1, %Qubit* %qb) + ret void +} + +define void @Microsoft__Quantum__Intrinsic__H__adj(%Qubit* %qb) { +entry: + call void @__quantum__trc__single_qubit_op(i64 9, i64 1, %Qubit* %qb) + ret void +} + +define void @Microsoft__Quantum__Intrinsic__H__ctl(%Array* %ctls, %Qubit* %qb) { +entry: + call void @__quantum__rt__array_add_access(%Array* %ctls) + call void @__quantum__trc__single_qubit_op_ctl(i64 10, i64 1, %Array* %ctls, %Qubit* %qb) + call void @__quantum__rt__array_remove_access(%Array* %ctls) + ret void +} + +define void @Microsoft__Quantum__Intrinsic__H__ctladj(%Array* %ctls, %Qubit* %qb) { +entry: + call void @__quantum__rt__array_add_access(%Array* %ctls) + call void @__quantum__trc__single_qubit_op_ctl(i64 10, i64 1, %Array* %ctls, %Qubit* %qb) + call void @__quantum__rt__array_remove_access(%Array* %ctls) + ret void +} + +define void @Microsoft__Quantum__Intrinsic__CNOT__body(%Qubit* %control, %Qubit* %target) { +entry: + %ctls__inline__1 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %0 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ctls__inline__1, i64 0) + %1 = bitcast i8* %0 to %Qubit** + store %Qubit* %control, %Qubit** %1 + call void @__quantum__rt__array_add_access(%Array* %ctls__inline__1) + br i1 true, label %then0__1, label %else__1 + +then0__1: ; preds = %entry + call void @__quantum__trc__single_qubit_op_ctl(i64 1, i64 1, %Array* %ctls__inline__1, %Qubit* %target) + br label %continue__1 + +else__1: ; preds = %entry + call void @__quantum__trc__single_qubit_op_ctl(i64 2, i64 1, %Array* %ctls__inline__1, %Qubit* %target) + br label %continue__1 + +continue__1: ; preds = %else__1, %then0__1 + call void @__quantum__rt__array_remove_access(%Array* %ctls__inline__1) + call void @__quantum__rt__array_unreference(%Array* %ctls__inline__1) + ret void +} + +declare %Array* @__quantum__rt__array_create_1d(i32, i64) + +declare i8* @__quantum__rt__array_get_element_ptr_1d(%Array*, i64) + +declare void @__quantum__rt__array_unreference(%Array*) + +define void @Microsoft__Quantum__Intrinsic__CNOT__adj(%Qubit* %control, %Qubit* %target) { +entry: + %ctls__inline__1 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %0 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ctls__inline__1, i64 0) + %1 = bitcast i8* %0 to %Qubit** + store %Qubit* %control, %Qubit** %1 + call void @__quantum__rt__array_add_access(%Array* %ctls__inline__1) + br i1 true, label %then0__1, label %else__1 + +then0__1: ; preds = %entry + call void @__quantum__trc__single_qubit_op_ctl(i64 1, i64 1, %Array* %ctls__inline__1, %Qubit* %target) + br label %continue__1 + +else__1: ; preds = %entry + call void @__quantum__trc__single_qubit_op_ctl(i64 2, i64 1, %Array* %ctls__inline__1, %Qubit* %target) + br label %continue__1 + +continue__1: ; preds = %else__1, %then0__1 + call void @__quantum__rt__array_remove_access(%Array* %ctls__inline__1) + call void @__quantum__rt__array_unreference(%Array* %ctls__inline__1) + ret void +} + +define void @Microsoft__Quantum__Intrinsic__Tz__body(%Qubit* %qb) { +entry: + call void @__quantum__trc__single_qubit_op(i64 11, i64 1, %Qubit* %qb) + ret void +} + +define void @Microsoft__Quantum__Intrinsic__Tz__adj(%Qubit* %qb) { +entry: + call void @__quantum__trc__single_qubit_op(i64 11, i64 1, %Qubit* %qb) + ret void +} + +define void @Microsoft__Quantum__Intrinsic__Tz__ctl(%Array* %ctls, %Qubit* %qb) { +entry: + call void @__quantum__rt__array_add_access(%Array* %ctls) + call void @__quantum__trc__single_qubit_op_ctl(i64 12, i64 1, %Array* %ctls, %Qubit* %qb) + call void @__quantum__rt__array_remove_access(%Array* %ctls) + ret void +} + +define void @Microsoft__Quantum__Intrinsic__Tz__ctladj(%Array* %ctls, %Qubit* %qb) { +entry: + call void @__quantum__rt__array_add_access(%Array* %ctls) + call void @__quantum__trc__single_qubit_op_ctl(i64 12, i64 1, %Array* %ctls, %Qubit* %qb) + call void @__quantum__rt__array_remove_access(%Array* %ctls) + ret void +} + +define void @Microsoft__Quantum__Intrinsic__Rz__body(double %theta, %Qubit* %qb) { +entry: + call void @__quantum__trc__single_qubit_op(i64 23, i64 1, %Qubit* %qb) + ret void +} + +define void @Microsoft__Quantum__Intrinsic__Rz__adj(double %theta, %Qubit* %qb) { +entry: + call void @__quantum__trc__single_qubit_op(i64 24, i64 1, %Qubit* %qb) + ret void +} + +define void @Microsoft__Quantum__Intrinsic__Rz__ctl(%Array* %ctls, { double, %Qubit* }* %0) { +entry: + call void @__quantum__rt__array_add_access(%Array* %ctls) + %1 = getelementptr { double, %Qubit* }, { double, %Qubit* }* %0, i64 0, i32 0 + %theta = load double, double* %1 + %2 = getelementptr { double, %Qubit* }, { double, %Qubit* }* %0, i64 0, i32 1 + %qb = load %Qubit*, %Qubit** %2 + call void @__quantum__trc__single_qubit_op_ctl(i64 25, i64 1, %Array* %ctls, %Qubit* %qb) + call void @__quantum__rt__array_remove_access(%Array* %ctls) + ret void +} + +define void @Microsoft__Quantum__Intrinsic__Rz__ctladj(%Array* %ctls, { double, %Qubit* }* %0) { +entry: + call void @__quantum__rt__array_add_access(%Array* %ctls) + %1 = getelementptr { double, %Qubit* }, { double, %Qubit* }* %0, i64 0, i32 0 + %theta = load double, double* %1 + %2 = getelementptr { double, %Qubit* }, { double, %Qubit* }* %0, i64 0, i32 1 + %qb = load %Qubit*, %Qubit** %2 + call void @__quantum__trc__single_qubit_op_ctl(i64 25, i64 1, %Array* %ctls, %Qubit* %qb) + call void @__quantum__rt__array_remove_access(%Array* %ctls) + ret void +} + +define void @Microsoft__Quantum__Intrinsic__Ry__body(double %theta, %Qubit* %qb) { +entry: + call void @__quantum__trc__single_qubit_op(i64 21, i64 1, %Qubit* %qb) + ret void +} + +define void @Microsoft__Quantum__Intrinsic__Ry__adj(double %theta, %Qubit* %qb) { +entry: + call void @__quantum__trc__single_qubit_op(i64 21, i64 1, %Qubit* %qb) + ret void +} + +define void @Microsoft__Quantum__Intrinsic__Ry__ctl(%Array* %ctls, { double, %Qubit* }* %0) { +entry: + call void @__quantum__rt__array_add_access(%Array* %ctls) + %1 = getelementptr { double, %Qubit* }, { double, %Qubit* }* %0, i64 0, i32 0 + %theta = load double, double* %1 + %2 = getelementptr { double, %Qubit* }, { double, %Qubit* }* %0, i64 0, i32 1 + %qb = load %Qubit*, %Qubit** %2 + call void @__quantum__trc__single_qubit_op_ctl(i64 22, i64 1, %Array* %ctls, %Qubit* %qb) + call void @__quantum__rt__array_remove_access(%Array* %ctls) + ret void +} + +define void @Microsoft__Quantum__Intrinsic__Ry__ctladj(%Array* %ctls, { double, %Qubit* }* %0) { +entry: + call void @__quantum__rt__array_add_access(%Array* %ctls) + %1 = getelementptr { double, %Qubit* }, { double, %Qubit* }* %0, i64 0, i32 0 + %theta = load double, double* %1 + %2 = getelementptr { double, %Qubit* }, { double, %Qubit* }* %0, i64 0, i32 1 + %qb = load %Qubit*, %Qubit** %2 + call void @__quantum__trc__single_qubit_op_ctl(i64 22, i64 1, %Array* %ctls, %Qubit* %qb) + call void @__quantum__rt__array_remove_access(%Array* %ctls) + ret void +} + +define void @Microsoft__Quantum__Intrinsic__T__body(%Qubit* %qb) { +entry: + call void @__quantum__trc__single_qubit_op(i64 11, i64 1, %Qubit* %qb) + ret void +} + +define void @Microsoft__Quantum__Intrinsic__T__adj(%Qubit* %qb) { +entry: + call void @__quantum__trc__single_qubit_op(i64 11, i64 1, %Qubit* %qb) + ret void +} + +define void @Microsoft__Quantum__Intrinsic__T__ctl(%Array* %ctls, %Qubit* %qb) { +entry: + call void @__quantum__rt__array_add_access(%Array* %ctls) + call void @__quantum__rt__array_add_access(%Array* %ctls) + call void @__quantum__trc__single_qubit_op_ctl(i64 12, i64 1, %Array* %ctls, %Qubit* %qb) + call void @__quantum__rt__array_remove_access(%Array* %ctls) + call void @__quantum__rt__array_remove_access(%Array* %ctls) + ret void +} + +define void @Microsoft__Quantum__Intrinsic__T__ctladj(%Array* %ctls, %Qubit* %qb) { +entry: + call void @__quantum__rt__array_add_access(%Array* %ctls) + call void @__quantum__rt__array_add_access(%Array* %ctls) + call void @__quantum__trc__single_qubit_op_ctl(i64 12, i64 1, %Array* %ctls, %Qubit* %qb) + call void @__quantum__rt__array_remove_access(%Array* %ctls) + call void @__quantum__rt__array_remove_access(%Array* %ctls) + ret void +} + +define void @Microsoft__Quantum__Intrinsic__Y__body(%Qubit* %qb) { +entry: + call void @__quantum__trc__single_qubit_op(i64 3, i64 1, %Qubit* %qb) + ret void +} + +define void @Microsoft__Quantum__Intrinsic__Y__adj(%Qubit* %qb) { +entry: + call void @__quantum__trc__single_qubit_op(i64 3, i64 1, %Qubit* %qb) + ret void +} + +define void @Microsoft__Quantum__Intrinsic__Y__ctl(%Array* %ctls, %Qubit* %qb) { +entry: + call void @__quantum__rt__array_add_access(%Array* %ctls) + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %ctls) + %1 = icmp eq i64 %0, 1 + br i1 %1, label %then0__1, label %else__1 + +then0__1: ; preds = %entry + call void @__quantum__trc__single_qubit_op_ctl(i64 4, i64 1, %Array* %ctls, %Qubit* %qb) + br label %continue__1 + +else__1: ; preds = %entry + call void @__quantum__trc__single_qubit_op_ctl(i64 5, i64 1, %Array* %ctls, %Qubit* %qb) + br label %continue__1 + +continue__1: ; preds = %else__1, %then0__1 + call void @__quantum__rt__array_remove_access(%Array* %ctls) + ret void +} + +define void @Microsoft__Quantum__Intrinsic__Y__ctladj(%Array* %ctls, %Qubit* %qb) { +entry: + call void @__quantum__rt__array_add_access(%Array* %ctls) + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %ctls) + %1 = icmp eq i64 %0, 1 + br i1 %1, label %then0__1, label %else__1 + +then0__1: ; preds = %entry + call void @__quantum__trc__single_qubit_op_ctl(i64 4, i64 1, %Array* %ctls, %Qubit* %qb) + br label %continue__1 + +else__1: ; preds = %entry + call void @__quantum__trc__single_qubit_op_ctl(i64 5, i64 1, %Array* %ctls, %Qubit* %qb) + br label %continue__1 + +continue__1: ; preds = %else__1, %then0__1 + call void @__quantum__rt__array_remove_access(%Array* %ctls) + ret void +} + +define i1 @Microsoft__Quantum__Testing__Tracer__AllIntrinsics__body() #0 { +entry: + %res = alloca i1 + store i1 false, i1* %res + %qs = call %Array* @__quantum__rt__qubit_allocate_array(i64 3) + call void @__quantum__rt__array_add_access(%Array* %qs) + %0 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 0) + %1 = bitcast i8* %0 to %Qubit** + %qb__inline__1 = load %Qubit*, %Qubit** %1 + call void @__quantum__trc__single_qubit_op(i64 0, i64 1, %Qubit* %qb__inline__1) + %2 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 0) + %3 = bitcast i8* %2 to %Qubit** + %qb__inline__2 = load %Qubit*, %Qubit** %3 + call void @__quantum__trc__single_qubit_op(i64 3, i64 1, %Qubit* %qb__inline__2) + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 1) + %5 = bitcast i8* %4 to %Qubit** + %qb__inline__3 = load %Qubit*, %Qubit** %5 + call void @__quantum__trc__single_qubit_op(i64 6, i64 1, %Qubit* %qb__inline__3) + %6 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 1) + %7 = bitcast i8* %6 to %Qubit** + %qb__inline__4 = load %Qubit*, %Qubit** %7 + call void @__quantum__trc__single_qubit_op(i64 9, i64 1, %Qubit* %qb__inline__4) + %8 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 1) + %9 = bitcast i8* %8 to %Qubit** + %10 = load %Qubit*, %Qubit** %9 + %11 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 2) + %12 = bitcast i8* %11 to %Qubit** + %13 = load %Qubit*, %Qubit** %12 + call void @Microsoft__Quantum__Intrinsic__CNOT__body(%Qubit* %10, %Qubit* %13) + %14 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 0) + %15 = bitcast i8* %14 to %Qubit** + %qb__inline__5 = load %Qubit*, %Qubit** %15 + call void @__quantum__trc__single_qubit_op(i64 19, i64 1, %Qubit* %qb__inline__5) + %16 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 1) + %17 = bitcast i8* %16 to %Qubit** + %qb__inline__6 = load %Qubit*, %Qubit** %17 + call void @__quantum__trc__single_qubit_op(i64 21, i64 1, %Qubit* %qb__inline__6) + %18 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 2) + %19 = bitcast i8* %18 to %Qubit** + %qb__inline__7 = load %Qubit*, %Qubit** %19 + call void @__quantum__trc__single_qubit_op(i64 23, i64 1, %Qubit* %qb__inline__7) + %20 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 1) + %21 = bitcast i8* %20 to %Qubit** + %qb__inline__8 = load %Qubit*, %Qubit** %21 + call void @__quantum__trc__single_qubit_op(i64 15, i64 1, %Qubit* %qb__inline__8) + %22 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 2) + %23 = bitcast i8* %22 to %Qubit** + %qb__inline__10 = load %Qubit*, %Qubit** %23 + call void @__quantum__trc__single_qubit_op(i64 11, i64 1, %Qubit* %qb__inline__10) + call void @__quantum__trc__inject_global_barrier(i64 42, i64 1) + %24 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 0) + %25 = bitcast i8* %24 to %Qubit** + %qb__inline__12 = load %Qubit*, %Qubit** %25 + call void @__quantum__trc__single_qubit_op(i64 0, i64 1, %Qubit* %qb__inline__12) + %26 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 0) + %27 = bitcast i8* %26 to %Qubit** + %qb__inline__13 = load %Qubit*, %Qubit** %27 + call void @__quantum__trc__single_qubit_op(i64 3, i64 1, %Qubit* %qb__inline__13) + %28 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 1) + %29 = bitcast i8* %28 to %Qubit** + %qb__inline__14 = load %Qubit*, %Qubit** %29 + call void @__quantum__trc__single_qubit_op(i64 6, i64 1, %Qubit* %qb__inline__14) + %30 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 1) + %31 = bitcast i8* %30 to %Qubit** + %qb__inline__15 = load %Qubit*, %Qubit** %31 + call void @__quantum__trc__single_qubit_op(i64 9, i64 1, %Qubit* %qb__inline__15) + %32 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 1) + %33 = bitcast i8* %32 to %Qubit** + %34 = load %Qubit*, %Qubit** %33 + %35 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 2) + %36 = bitcast i8* %35 to %Qubit** + %37 = load %Qubit*, %Qubit** %36 + call void @Microsoft__Quantum__Intrinsic__CNOT__adj(%Qubit* %34, %Qubit* %37) + %38 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 0) + %39 = bitcast i8* %38 to %Qubit** + %qb__inline__16 = load %Qubit*, %Qubit** %39 + call void @__quantum__trc__single_qubit_op(i64 19, i64 1, %Qubit* %qb__inline__16) + %40 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 1) + %41 = bitcast i8* %40 to %Qubit** + %qb__inline__17 = load %Qubit*, %Qubit** %41 + call void @__quantum__trc__single_qubit_op(i64 21, i64 1, %Qubit* %qb__inline__17) + %42 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 2) + %43 = bitcast i8* %42 to %Qubit** + %qb__inline__18 = load %Qubit*, %Qubit** %43 + call void @__quantum__trc__single_qubit_op(i64 24, i64 1, %Qubit* %qb__inline__18) + %44 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 1) + %45 = bitcast i8* %44 to %Qubit** + %qb__inline__19 = load %Qubit*, %Qubit** %45 + call void @__quantum__trc__single_qubit_op(i64 15, i64 1, %Qubit* %qb__inline__19) + %46 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 2) + %47 = bitcast i8* %46 to %Qubit** + %qb__inline__21 = load %Qubit*, %Qubit** %47 + call void @__quantum__trc__single_qubit_op(i64 11, i64 1, %Qubit* %qb__inline__21) + %c = call %Qubit* @__quantum__rt__qubit_allocate() + %ctls__inline__23 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %48 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ctls__inline__23, i64 0) + %49 = bitcast i8* %48 to %Qubit** + store %Qubit* %c, %Qubit** %49 + call void @__quantum__rt__array_add_access(%Array* %ctls__inline__23) + %50 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 0) + %51 = bitcast i8* %50 to %Qubit** + %qb__inline__23 = load %Qubit*, %Qubit** %51 + br i1 true, label %then0__1, label %else__1 + +then0__1: ; preds = %entry + call void @__quantum__trc__single_qubit_op_ctl(i64 1, i64 1, %Array* %ctls__inline__23, %Qubit* %qb__inline__23) + br label %continue__1 + +else__1: ; preds = %entry + call void @__quantum__trc__single_qubit_op_ctl(i64 2, i64 1, %Array* %ctls__inline__23, %Qubit* %qb__inline__23) + br label %continue__1 + +continue__1: ; preds = %else__1, %then0__1 + call void @__quantum__rt__array_remove_access(%Array* %ctls__inline__23) + call void @__quantum__rt__array_unreference(%Array* %ctls__inline__23) + %ctls__inline__24 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %52 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ctls__inline__24, i64 0) + %53 = bitcast i8* %52 to %Qubit** + store %Qubit* %c, %Qubit** %53 + call void @__quantum__rt__array_add_access(%Array* %ctls__inline__24) + %54 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 0) + %55 = bitcast i8* %54 to %Qubit** + %qb__inline__24 = load %Qubit*, %Qubit** %55 + br i1 true, label %then0__2, label %else__2 + +then0__2: ; preds = %continue__1 + call void @__quantum__trc__single_qubit_op_ctl(i64 4, i64 1, %Array* %ctls__inline__24, %Qubit* %qb__inline__24) + br label %continue__2 + +else__2: ; preds = %continue__1 + call void @__quantum__trc__single_qubit_op_ctl(i64 5, i64 1, %Array* %ctls__inline__24, %Qubit* %qb__inline__24) + br label %continue__2 + +continue__2: ; preds = %else__2, %then0__2 + call void @__quantum__rt__array_remove_access(%Array* %ctls__inline__24) + call void @__quantum__rt__array_unreference(%Array* %ctls__inline__24) + %ctls__inline__25 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %56 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ctls__inline__25, i64 0) + %57 = bitcast i8* %56 to %Qubit** + store %Qubit* %c, %Qubit** %57 + call void @__quantum__rt__array_add_access(%Array* %ctls__inline__25) + %58 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 1) + %59 = bitcast i8* %58 to %Qubit** + %qb__inline__25 = load %Qubit*, %Qubit** %59 + br i1 true, label %then0__3, label %else__3 + +then0__3: ; preds = %continue__2 + call void @__quantum__trc__single_qubit_op_ctl(i64 7, i64 1, %Array* %ctls__inline__25, %Qubit* %qb__inline__25) + br label %continue__3 + +else__3: ; preds = %continue__2 + call void @__quantum__trc__single_qubit_op_ctl(i64 8, i64 1, %Array* %ctls__inline__25, %Qubit* %qb__inline__25) + br label %continue__3 + +continue__3: ; preds = %else__3, %then0__3 + call void @__quantum__rt__array_remove_access(%Array* %ctls__inline__25) + call void @__quantum__rt__array_unreference(%Array* %ctls__inline__25) + %ctls__inline__26 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %60 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ctls__inline__26, i64 0) + %61 = bitcast i8* %60 to %Qubit** + store %Qubit* %c, %Qubit** %61 + call void @__quantum__rt__array_add_access(%Array* %ctls__inline__26) + %62 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 1) + %63 = bitcast i8* %62 to %Qubit** + %qb__inline__26 = load %Qubit*, %Qubit** %63 + call void @__quantum__trc__single_qubit_op_ctl(i64 10, i64 1, %Array* %ctls__inline__26, %Qubit* %qb__inline__26) + call void @__quantum__rt__array_remove_access(%Array* %ctls__inline__26) + call void @__quantum__rt__array_unreference(%Array* %ctls__inline__26) + %ctls__inline__27 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %64 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ctls__inline__27, i64 0) + %65 = bitcast i8* %64 to %Qubit** + store %Qubit* %c, %Qubit** %65 + call void @__quantum__rt__array_add_access(%Array* %ctls__inline__27) + %66 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 0) + %67 = bitcast i8* %66 to %Qubit** + %qb__inline__27 = load %Qubit*, %Qubit** %67 + call void @__quantum__trc__single_qubit_op_ctl(i64 20, i64 1, %Array* %ctls__inline__27, %Qubit* %qb__inline__27) + call void @__quantum__rt__array_remove_access(%Array* %ctls__inline__27) + call void @__quantum__rt__array_unreference(%Array* %ctls__inline__27) + %ctls__inline__28 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %68 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ctls__inline__28, i64 0) + %69 = bitcast i8* %68 to %Qubit** + store %Qubit* %c, %Qubit** %69 + call void @__quantum__rt__array_add_access(%Array* %ctls__inline__28) + %70 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 1) + %71 = bitcast i8* %70 to %Qubit** + %qb__inline__28 = load %Qubit*, %Qubit** %71 + call void @__quantum__trc__single_qubit_op_ctl(i64 22, i64 1, %Array* %ctls__inline__28, %Qubit* %qb__inline__28) + call void @__quantum__rt__array_remove_access(%Array* %ctls__inline__28) + call void @__quantum__rt__array_unreference(%Array* %ctls__inline__28) + %ctls__inline__29 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %72 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ctls__inline__29, i64 0) + %73 = bitcast i8* %72 to %Qubit** + store %Qubit* %c, %Qubit** %73 + call void @__quantum__rt__array_add_access(%Array* %ctls__inline__29) + %74 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 2) + %75 = bitcast i8* %74 to %Qubit** + %qb__inline__29 = load %Qubit*, %Qubit** %75 + call void @__quantum__trc__single_qubit_op_ctl(i64 25, i64 1, %Array* %ctls__inline__29, %Qubit* %qb__inline__29) + call void @__quantum__rt__array_remove_access(%Array* %ctls__inline__29) + call void @__quantum__rt__array_unreference(%Array* %ctls__inline__29) + %ctls__inline__30 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %76 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ctls__inline__30, i64 0) + %77 = bitcast i8* %76 to %Qubit** + store %Qubit* %c, %Qubit** %77 + call void @__quantum__rt__array_add_access(%Array* %ctls__inline__30) + %78 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 1) + %79 = bitcast i8* %78 to %Qubit** + %qb__inline__30 = load %Qubit*, %Qubit** %79 + call void @__quantum__rt__array_add_access(%Array* %ctls__inline__30) + call void @__quantum__trc__single_qubit_op_ctl(i64 16, i64 1, %Array* %ctls__inline__30, %Qubit* %qb__inline__30) + call void @__quantum__rt__array_remove_access(%Array* %ctls__inline__30) + call void @__quantum__rt__array_remove_access(%Array* %ctls__inline__30) + call void @__quantum__rt__array_unreference(%Array* %ctls__inline__30) + %ctls__inline__32 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %80 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ctls__inline__32, i64 0) + %81 = bitcast i8* %80 to %Qubit** + store %Qubit* %c, %Qubit** %81 + call void @__quantum__rt__array_add_access(%Array* %ctls__inline__32) + %82 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 2) + %83 = bitcast i8* %82 to %Qubit** + %qb__inline__32 = load %Qubit*, %Qubit** %83 + call void @__quantum__rt__array_add_access(%Array* %ctls__inline__32) + call void @__quantum__trc__single_qubit_op_ctl(i64 12, i64 1, %Array* %ctls__inline__32, %Qubit* %qb__inline__32) + call void @__quantum__rt__array_remove_access(%Array* %ctls__inline__32) + call void @__quantum__rt__array_remove_access(%Array* %ctls__inline__32) + call void @__quantum__rt__array_unreference(%Array* %ctls__inline__32) + call void @__quantum__rt__qubit_release(%Qubit* %c) + %cc = call %Array* @__quantum__rt__qubit_allocate_array(i64 2) + call void @__quantum__rt__array_add_access(%Array* %cc) + call void @__quantum__rt__array_add_access(%Array* %cc) + %84 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 0) + %85 = bitcast i8* %84 to %Qubit** + %qb__inline__34 = load %Qubit*, %Qubit** %85 + %86 = call i64 @__quantum__rt__array_get_size_1d(%Array* %cc) + %87 = icmp eq i64 %86, 1 + br i1 %87, label %then0__4, label %else__4 + +then0__4: ; preds = %continue__3 + call void @__quantum__trc__single_qubit_op_ctl(i64 1, i64 1, %Array* %cc, %Qubit* %qb__inline__34) + br label %continue__4 + +else__4: ; preds = %continue__3 + call void @__quantum__trc__single_qubit_op_ctl(i64 2, i64 1, %Array* %cc, %Qubit* %qb__inline__34) + br label %continue__4 + +continue__4: ; preds = %else__4, %then0__4 + call void @__quantum__rt__array_remove_access(%Array* %cc) + call void @__quantum__rt__array_add_access(%Array* %cc) + %88 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 0) + %89 = bitcast i8* %88 to %Qubit** + %qb__inline__35 = load %Qubit*, %Qubit** %89 + %90 = icmp eq i64 %86, 1 + br i1 %90, label %then0__5, label %else__5 + +then0__5: ; preds = %continue__4 + call void @__quantum__trc__single_qubit_op_ctl(i64 4, i64 1, %Array* %cc, %Qubit* %qb__inline__35) + br label %continue__5 + +else__5: ; preds = %continue__4 + call void @__quantum__trc__single_qubit_op_ctl(i64 5, i64 1, %Array* %cc, %Qubit* %qb__inline__35) + br label %continue__5 + +continue__5: ; preds = %else__5, %then0__5 + call void @__quantum__rt__array_remove_access(%Array* %cc) + call void @__quantum__rt__array_add_access(%Array* %cc) + %91 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 1) + %92 = bitcast i8* %91 to %Qubit** + %qb__inline__36 = load %Qubit*, %Qubit** %92 + %93 = icmp eq i64 %86, 1 + br i1 %93, label %then0__6, label %else__6 + +then0__6: ; preds = %continue__5 + call void @__quantum__trc__single_qubit_op_ctl(i64 7, i64 1, %Array* %cc, %Qubit* %qb__inline__36) + br label %continue__6 + +else__6: ; preds = %continue__5 + call void @__quantum__trc__single_qubit_op_ctl(i64 8, i64 1, %Array* %cc, %Qubit* %qb__inline__36) + br label %continue__6 + +continue__6: ; preds = %else__6, %then0__6 + call void @__quantum__rt__array_remove_access(%Array* %cc) + call void @__quantum__rt__array_add_access(%Array* %cc) + %94 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 1) + %95 = bitcast i8* %94 to %Qubit** + %qb__inline__37 = load %Qubit*, %Qubit** %95 + call void @__quantum__trc__single_qubit_op_ctl(i64 10, i64 1, %Array* %cc, %Qubit* %qb__inline__37) + call void @__quantum__rt__array_remove_access(%Array* %cc) + call void @__quantum__rt__array_add_access(%Array* %cc) + %96 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 0) + %97 = bitcast i8* %96 to %Qubit** + %qb__inline__38 = load %Qubit*, %Qubit** %97 + call void @__quantum__trc__single_qubit_op_ctl(i64 20, i64 1, %Array* %cc, %Qubit* %qb__inline__38) + call void @__quantum__rt__array_remove_access(%Array* %cc) + call void @__quantum__rt__array_add_access(%Array* %cc) + %98 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 1) + %99 = bitcast i8* %98 to %Qubit** + %qb__inline__39 = load %Qubit*, %Qubit** %99 + call void @__quantum__trc__single_qubit_op_ctl(i64 22, i64 1, %Array* %cc, %Qubit* %qb__inline__39) + call void @__quantum__rt__array_remove_access(%Array* %cc) + call void @__quantum__rt__array_add_access(%Array* %cc) + %100 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 2) + %101 = bitcast i8* %100 to %Qubit** + %qb__inline__40 = load %Qubit*, %Qubit** %101 + call void @__quantum__trc__single_qubit_op_ctl(i64 25, i64 1, %Array* %cc, %Qubit* %qb__inline__40) + call void @__quantum__rt__array_remove_access(%Array* %cc) + call void @__quantum__rt__array_add_access(%Array* %cc) + %102 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 1) + %103 = bitcast i8* %102 to %Qubit** + %qb__inline__41 = load %Qubit*, %Qubit** %103 + call void @__quantum__rt__array_add_access(%Array* %cc) + call void @__quantum__trc__single_qubit_op_ctl(i64 16, i64 1, %Array* %cc, %Qubit* %qb__inline__41) + call void @__quantum__rt__array_remove_access(%Array* %cc) + call void @__quantum__rt__array_remove_access(%Array* %cc) + call void @__quantum__rt__array_add_access(%Array* %cc) + %104 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 2) + %105 = bitcast i8* %104 to %Qubit** + %qb__inline__43 = load %Qubit*, %Qubit** %105 + call void @__quantum__rt__array_add_access(%Array* %cc) + call void @__quantum__trc__single_qubit_op_ctl(i64 12, i64 1, %Array* %cc, %Qubit* %qb__inline__43) + call void @__quantum__rt__array_remove_access(%Array* %cc) + call void @__quantum__rt__array_remove_access(%Array* %cc) + call void @__quantum__rt__qubit_release_array(%Array* %cc) + call void @__quantum__rt__array_remove_access(%Array* %cc) + call void @__quantum__rt__array_unreference(%Array* %cc) + %qs12 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 2) + %106 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs12, i64 0) + %107 = bitcast i8* %106 to %Qubit** + %108 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs12, i64 1) + %109 = bitcast i8* %108 to %Qubit** + %110 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 1) + %111 = bitcast i8* %110 to %Qubit** + %112 = load %Qubit*, %Qubit** %111 + %113 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 2) + %114 = bitcast i8* %113 to %Qubit** + %115 = load %Qubit*, %Qubit** %114 + store %Qubit* %112, %Qubit** %107 + store %Qubit* %115, %Qubit** %109 + call void @__quantum__rt__array_add_access(%Array* %qs12) + call void @__quantum__rt__qubit_release_array(%Array* %qs) + call void @__quantum__rt__array_remove_access(%Array* %qs) + call void @__quantum__rt__array_remove_access(%Array* %qs12) + call void @__quantum__rt__array_unreference(%Array* %qs) + call void @__quantum__rt__array_unreference(%Array* %qs12) + ret i1 true +} + +declare %Qubit* @__quantum__rt__qubit_allocate() + +declare %Array* @__quantum__rt__qubit_allocate_array(i64) + +declare void @__quantum__trc__inject_global_barrier(i64, i64) + +declare void @__quantum__rt__qubit_release(%Qubit*) + +declare void @__quantum__rt__qubit_release_array(%Array*) + +attributes #0 = { "EntryPoint" } diff --git a/src/QirRuntime/test/QIR-tracer/qir-test-intrinsics.qs b/src/QirRuntime/test/QIR-tracer/tracer-intrinsics.qs similarity index 84% rename from src/QirRuntime/test/QIR-tracer/qir-test-intrinsics.qs rename to src/QirRuntime/test/QIR-tracer/tracer-intrinsics.qs index c05344c80f5..2772d93b401 100644 --- a/src/QirRuntime/test/QIR-tracer/qir-test-intrinsics.qs +++ b/src/QirRuntime/test/QIR-tracer/tracer-intrinsics.qs @@ -5,6 +5,14 @@ namespace Microsoft.Quantum.Testing.Tracer { open Microsoft.Quantum.Intrinsic; + operation Fixup(qs : Qubit[]) : Unit + { + for (i in 0..Length(qs)-1) + { + X(qs[i]); + } + } + @EntryPoint() operation AllIntrinsics() : Bool { @@ -23,7 +31,7 @@ namespace Microsoft.Quantum.Testing.Tracer S(qs[1]); T(qs[2]); - Barrier("foo", 1); + Barrier(42, 1); Adjoint X(qs[0]); Adjoint Y(qs[0]); @@ -65,7 +73,12 @@ namespace Microsoft.Quantum.Testing.Tracer Controlled T(cc, (qs[2])); } - //set res = (M(qs[0]) == Measure([PauliY, PauliX], [qs[1], qs[2]])); + //let r0 = M(qs[0]) + //ApplyIfZero(r0, (Z, qs[0])); + + let qs12 = [qs[1], qs[2]]; + //let r12 = Measure([PauliY, PauliX], qs12); + //ApplyIfOne(r12, (Fixup, qs12)); } return true; } diff --git a/src/QirRuntime/test/QIR-tracer/qir-tracer-target.qs b/src/QirRuntime/test/QIR-tracer/tracer-target.qs similarity index 98% rename from src/QirRuntime/test/QIR-tracer/qir-tracer-target.qs rename to src/QirRuntime/test/QIR-tracer/tracer-target.qs index 634c7642f2c..f82d3887599 100644 --- a/src/QirRuntime/test/QIR-tracer/qir-tracer-target.qs +++ b/src/QirRuntime/test/QIR-tracer/tracer-target.qs @@ -31,6 +31,7 @@ namespace Microsoft.Quantum.Intrinsic { open Microsoft.Quantum.Core; open Microsoft.Quantum.Instructions as Phys; + open Microsoft.Quantum.Targeting; @Inline() operation X(qb : Qubit) : Unit @@ -198,7 +199,7 @@ namespace Microsoft.Quantum.Intrinsic { } @TargetInstruction("inject_global_barrier") - operation Barrier(name : String, duration : Int) : Unit + operation Barrier(id : Int, duration : Int) : Unit { body intrinsic; } diff --git a/src/QirRuntime/test/unittests/TracerTests.cpp b/src/QirRuntime/test/unittests/TracerTests.cpp index dbaed14852b..fa77aee15a0 100644 --- a/src/QirRuntime/test/unittests/TracerTests.cpp +++ b/src/QirRuntime/test/unittests/TracerTests.cpp @@ -153,7 +153,7 @@ TEST_CASE("Global barrier", "[tracer]") Qubit q3 = tr->AllocateQubit(); CHECK(0 == tr->TraceSingleQubitOp(1, 1, q1)); // L(0,1) created - CHECK(1 == tr->InjectGlobalBarrier("foo", 1)); // creates L(1,1) + CHECK(1 == tr->InjectGlobalBarrier(42, 1)); // creates L(1,1) CHECK(2 == tr->TraceMultiQubitOp(2, 1, 1 /*nFirst*/, &q2 /*first*/, 1 /*nSecond*/, &q3 /*second*/)); // the barrier shouldn't allow this op to fall through into L(0,1), so should create L(2,1) @@ -173,7 +173,7 @@ TEST_CASE("Global barrier", "[tracer]") const auto& ops0 = layers[0].operations; CHECK(ops0.find(1) != ops0.end()); - CHECK(std::string("foo") == layers[1].name); + CHECK(42 == layers[1].barrierId); const auto& ops2 = layers[2].operations; CHECK(ops2.find(2) != ops2.end()); From e3d47a8ed3dc754447237e0a04bbe48f00242676 Mon Sep 17 00:00:00 2001 From: Irina Yatsenko Date: Tue, 19 Jan 2021 15:54:43 -0800 Subject: [PATCH 12/27] Qir test now includes measurements --- src/QirRuntime/lib/Tracer/bridge-trc.ll | 11 +- src/QirRuntime/lib/Tracer/tracer-trc.cpp | 5 + .../test/QIR-tracer/tracer-intrinsics.ll | 651 ++++++++++++++---- .../test/QIR-tracer/tracer-intrinsics.qs | 4 +- .../test/QIR-tracer/tracer-target.qs | 98 ++- 5 files changed, 632 insertions(+), 137 deletions(-) diff --git a/src/QirRuntime/lib/Tracer/bridge-trc.ll b/src/QirRuntime/lib/Tracer/bridge-trc.ll index 081f9d03372..43a40dbe934 100644 --- a/src/QirRuntime/lib/Tracer/bridge-trc.ll +++ b/src/QirRuntime/lib/Tracer/bridge-trc.ll @@ -26,7 +26,8 @@ declare void @quantum__trc__single_qubit_op_ctl(i32 %id, i32 %duration, %struct. declare void @quantum__trc__multi_qubit_op(i32 %id, i32 %duration, %struct.QirArray*) declare void @quantum__trc__multi_qubit_op_ctl(i32 %id, i32 %duration, %struct.QirArray*, %struct.QirArray*) declare void @quantum__trc__inject_global_barrier(i32 %id, i32 %duration) -declare %class.RESULT* @quantum__trc__single_qubit_measure(i32 %id, i32 %duration, %class.QUBIT* %.q) +declare %class.RESULT* @quantum__trc__single_qubit_measure(i32 %id, i32 %duration, %class.QUBIT*) +declare %class.RESULT* @quantum__trc__multi_qubit_measure(i32 %id, i32 %duration, %struct.QirArray*) ;=============================================================================== ; quantum__trc namespace implementations @@ -73,4 +74,12 @@ define %Result* @__quantum__trc__single_qubit_measure(i32 %id, i32 %duration, %Q %r = call %class.RESULT* @quantum__trc__single_qubit_measure(i32 %id, i32 %duration, %class.QUBIT* %q) %.r = bitcast %class.RESULT* %r to %Result* ret %Result* %.r +} + +define %Result* @__quantum__trc__multi_qubit_measure(i32 %id, i32 %duration, %Array* %.qs) +{ + %qs = bitcast %Array* %.qs to %struct.QirArray* + %r = call %class.RESULT* @quantum__trc__multi_qubit_measure(i32 %id, i32 %duration, %struct.QirArray* %qs) + %.r = bitcast %class.RESULT* %r to %Result* + ret %Result* %.r } \ No newline at end of file diff --git a/src/QirRuntime/lib/Tracer/tracer-trc.cpp b/src/QirRuntime/lib/Tracer/tracer-trc.cpp index 574b45292f0..fb1c9bdf03a 100644 --- a/src/QirRuntime/lib/Tracer/tracer-trc.cpp +++ b/src/QirRuntime/lib/Tracer/tracer-trc.cpp @@ -58,4 +58,9 @@ extern "C" { return tracer->TraceSingleQubitMeasurement(id, duration, q); } + + RESULT* quantum__trc__multi_qubit_measure(int32_t id, int32_t duration, QirArray* qs) // NOLINT + { + return tracer->TraceMultiQubitMeasurement(id, duration, qs->count, reinterpret_cast(qs->buffer)); + } } \ No newline at end of file diff --git a/src/QirRuntime/test/QIR-tracer/tracer-intrinsics.ll b/src/QirRuntime/test/QIR-tracer/tracer-intrinsics.ll index 31d533b0185..94daa07ed27 100644 --- a/src/QirRuntime/test/QIR-tracer/tracer-intrinsics.ll +++ b/src/QirRuntime/test/QIR-tracer/tracer-intrinsics.ll @@ -14,24 +14,24 @@ @Microsoft__Quantum__Testing__Tracer__AllIntrinsics = alias i1 (), i1 ()* @Microsoft__Quantum__Testing__Tracer__AllIntrinsics__body -define void @Microsoft__Quantum__Intrinsic__Sz__body(%Qubit* %qb) { +define void @Microsoft__Quantum__Intrinsic__Tz__body(%Qubit* %qb) { entry: - call void @__quantum__trc__single_qubit_op(i64 15, i64 1, %Qubit* %qb) + call void @__quantum__trc__single_qubit_op(i64 11, i64 1, %Qubit* %qb) ret void } declare void @__quantum__trc__single_qubit_op(i64, i64, %Qubit*) -define void @Microsoft__Quantum__Intrinsic__Sz__adj(%Qubit* %qb) { +define void @Microsoft__Quantum__Intrinsic__Tz__adj(%Qubit* %qb) { entry: - call void @__quantum__trc__single_qubit_op(i64 15, i64 1, %Qubit* %qb) + call void @__quantum__trc__single_qubit_op(i64 11, i64 1, %Qubit* %qb) ret void } -define void @Microsoft__Quantum__Intrinsic__Sz__ctl(%Array* %ctls, %Qubit* %qb) { +define void @Microsoft__Quantum__Intrinsic__Tz__ctl(%Array* %ctls, %Qubit* %qb) { entry: call void @__quantum__rt__array_add_access(%Array* %ctls) - call void @__quantum__trc__single_qubit_op_ctl(i64 16, i64 1, %Array* %ctls, %Qubit* %qb) + call void @__quantum__trc__single_qubit_op_ctl(i64 12, i64 1, %Array* %ctls, %Qubit* %qb) call void @__quantum__rt__array_remove_access(%Array* %ctls) ret void } @@ -42,63 +42,293 @@ declare void @__quantum__trc__single_qubit_op_ctl(i64, i64, %Array*, %Qubit*) declare void @__quantum__rt__array_remove_access(%Array*) -define void @Microsoft__Quantum__Intrinsic__Sz__ctladj(%Array* %ctls, %Qubit* %qb) { +define void @Microsoft__Quantum__Intrinsic__Tz__ctladj(%Array* %ctls, %Qubit* %qb) { entry: + call void @__quantum__rt__array_add_access(%Array* %ctls) + call void @__quantum__trc__single_qubit_op_ctl(i64 12, i64 1, %Array* %ctls, %Qubit* %qb) + call void @__quantum__rt__array_remove_access(%Array* %ctls) + ret void +} + +define void @Microsoft__Quantum__Intrinsic__S__body(%Qubit* %qb) { +entry: + call void @__quantum__trc__single_qubit_op(i64 15, i64 1, %Qubit* %qb) + ret void +} + +define void @Microsoft__Quantum__Intrinsic__S__adj(%Qubit* %qb) { +entry: + call void @__quantum__trc__single_qubit_op(i64 15, i64 1, %Qubit* %qb) + ret void +} + +define void @Microsoft__Quantum__Intrinsic__S__ctl(%Array* %ctls, %Qubit* %qb) { +entry: + call void @__quantum__rt__array_add_access(%Array* %ctls) call void @__quantum__rt__array_add_access(%Array* %ctls) call void @__quantum__trc__single_qubit_op_ctl(i64 16, i64 1, %Array* %ctls, %Qubit* %qb) call void @__quantum__rt__array_remove_access(%Array* %ctls) + call void @__quantum__rt__array_remove_access(%Array* %ctls) ret void } -define void @Microsoft__Quantum__Intrinsic__Rx__body(double %theta, %Qubit* %qb) { +define void @Microsoft__Quantum__Intrinsic__S__ctladj(%Array* %ctls, %Qubit* %qb) { entry: - call void @__quantum__trc__single_qubit_op(i64 19, i64 1, %Qubit* %qb) + call void @__quantum__rt__array_add_access(%Array* %ctls) + call void @__quantum__rt__array_add_access(%Array* %ctls) + call void @__quantum__trc__single_qubit_op_ctl(i64 16, i64 1, %Array* %ctls, %Qubit* %qb) + call void @__quantum__rt__array_remove_access(%Array* %ctls) + call void @__quantum__rt__array_remove_access(%Array* %ctls) ret void } -define void @Microsoft__Quantum__Intrinsic__Rx__adj(double %theta, %Qubit* %qb) { +define %Result* @Microsoft__Quantum__Intrinsic__Measure__body(%Array* %paulis, %Array* %qubits) { entry: - call void @__quantum__trc__single_qubit_op(i64 19, i64 1, %Qubit* %qb) + call void @__quantum__rt__array_add_access(%Array* %paulis) + call void @__quantum__rt__array_add_access(%Array* %qubits) + %0 = load %Result*, %Result** @ResultOne + %res = alloca %Result* + store %Result* %0, %Result** %res + %haveY = alloca i1 + store i1 false, i1* %haveY + %1 = call i64 @__quantum__rt__array_get_size_1d(%Array* %paulis) + %2 = sub i64 %1, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %i = phi i64 [ 0, %entry ], [ %17, %exiting__1 ] + %3 = icmp sge i64 %i, %2 + %4 = icmp sle i64 %i, %2 + %5 = select i1 true, i1 %4, i1 %3 + br i1 %5, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %6 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 %i) + %7 = bitcast i8* %6 to i2* + %8 = load i2, i2* %7 + %9 = load i2, i2* @PauliY + %10 = icmp eq i2 %8, %9 + %11 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 %i) + %12 = bitcast i8* %11 to i2* + %13 = load i2, i2* %12 + %14 = load i2, i2* @PauliI + %15 = icmp eq i2 %13, %14 + %16 = or i1 %10, %15 + br i1 %16, label %then0__1, label %continue__1 + +then0__1: ; preds = %body__1 + store i1 true, i1* %haveY + br label %continue__1 + +continue__1: ; preds = %then0__1, %body__1 + br label %exiting__1 + +exiting__1: ; preds = %continue__1 + %17 = add i64 %i, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + %18 = load i1, i1* %haveY + br i1 %18, label %then0__2, label %test1__1 + +then0__2: ; preds = %exit__1 + %19 = call %Result* @__quantum__trc__multi_qubit_measure(i64 106, i64 1, %Array* %qubits) + store %Result* %19, %Result** %res + call void @__quantum__rt__result_unreference(%Result* %19) + br label %continue__2 + +test1__1: ; preds = %exit__1 + %20 = icmp sgt i64 %1, 2 + br i1 %20, label %then1__1, label %test2__1 + +then1__1: ; preds = %test1__1 + %21 = call %Result* @__quantum__trc__multi_qubit_measure(i64 107, i64 1, %Array* %qubits) + store %Result* %21, %Result** %res + call void @__quantum__rt__result_unreference(%Result* %21) + br label %continue__2 + +test2__1: ; preds = %test1__1 + %22 = icmp eq i64 %1, 1 + br i1 %22, label %then2__1, label %test3__1 + +then2__1: ; preds = %test2__1 + %23 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 0) + %24 = bitcast i8* %23 to i2* + %25 = load i2, i2* %24 + %26 = load i2, i2* @PauliX + %27 = icmp eq i2 %25, %26 + br i1 %27, label %then0__3, label %else__1 + +then0__3: ; preds = %then2__1 + %28 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qubits, i64 0) + %29 = bitcast i8* %28 to %Qubit** + %qb__inline__1 = load %Qubit*, %Qubit** %29 + %30 = call %Result* @__quantum__trc__single_qubit_measure(i64 101, i64 1, %Qubit* %qb__inline__1) + store %Result* %30, %Result** %res + call void @__quantum__rt__result_unreference(%Result* %30) + br label %continue__3 + +else__1: ; preds = %then2__1 + %31 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qubits, i64 0) + %32 = bitcast i8* %31 to %Qubit** + %qb__inline__2 = load %Qubit*, %Qubit** %32 + %33 = call %Result* @__quantum__trc__single_qubit_measure(i64 100, i64 1, %Qubit* %qb__inline__2) + store %Result* %33, %Result** %res + call void @__quantum__rt__result_unreference(%Result* %33) + br label %continue__3 + +continue__3: ; preds = %else__1, %then0__3 + br label %continue__2 + +test3__1: ; preds = %test2__1 + %34 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 0) + %35 = bitcast i8* %34 to i2* + %36 = load i2, i2* %35 + %37 = load i2, i2* @PauliX + %38 = icmp eq i2 %36, %37 + %39 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 1) + %40 = bitcast i8* %39 to i2* + %41 = load i2, i2* %40 + %42 = load i2, i2* @PauliX + %43 = icmp eq i2 %41, %42 + %44 = and i1 %38, %43 + br i1 %44, label %then3__1, label %test4__1 + +then3__1: ; preds = %test3__1 + %45 = call %Result* @__quantum__trc__multi_qubit_measure(i64 108, i64 1, %Array* %qubits) + store %Result* %45, %Result** %res + call void @__quantum__rt__result_unreference(%Result* %45) + br label %continue__2 + +test4__1: ; preds = %test3__1 + %46 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 0) + %47 = bitcast i8* %46 to i2* + %48 = load i2, i2* %47 + %49 = load i2, i2* @PauliX + %50 = icmp eq i2 %48, %49 + %51 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 1) + %52 = bitcast i8* %51 to i2* + %53 = load i2, i2* %52 + %54 = load i2, i2* @PauliZ + %55 = icmp eq i2 %53, %54 + %56 = and i1 %50, %55 + br i1 %56, label %then4__1, label %test5__1 + +then4__1: ; preds = %test4__1 + %57 = call %Result* @__quantum__trc__multi_qubit_measure(i64 109, i64 1, %Array* %qubits) + store %Result* %57, %Result** %res + call void @__quantum__rt__result_unreference(%Result* %57) + br label %continue__2 + +test5__1: ; preds = %test4__1 + %58 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 0) + %59 = bitcast i8* %58 to i2* + %60 = load i2, i2* %59 + %61 = load i2, i2* @PauliZ + %62 = icmp eq i2 %60, %61 + %63 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 1) + %64 = bitcast i8* %63 to i2* + %65 = load i2, i2* %64 + %66 = load i2, i2* @PauliX + %67 = icmp eq i2 %65, %66 + %68 = and i1 %62, %67 + br i1 %68, label %then5__1, label %test6__1 + +then5__1: ; preds = %test5__1 + %69 = call %Result* @__quantum__trc__multi_qubit_measure(i64 110, i64 1, %Array* %qubits) + store %Result* %69, %Result** %res + call void @__quantum__rt__result_unreference(%Result* %69) + br label %continue__2 + +test6__1: ; preds = %test5__1 + %70 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 0) + %71 = bitcast i8* %70 to i2* + %72 = load i2, i2* %71 + %73 = load i2, i2* @PauliZ + %74 = icmp eq i2 %72, %73 + %75 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 1) + %76 = bitcast i8* %75 to i2* + %77 = load i2, i2* %76 + %78 = load i2, i2* @PauliZ + %79 = icmp eq i2 %77, %78 + %80 = and i1 %74, %79 + br i1 %80, label %then6__1, label %continue__2 + +then6__1: ; preds = %test6__1 + %81 = call %Result* @__quantum__trc__multi_qubit_measure(i64 111, i64 1, %Array* %qubits) + store %Result* %81, %Result** %res + call void @__quantum__rt__result_unreference(%Result* %81) + br label %continue__2 + +continue__2: ; preds = %then6__1, %test6__1, %then5__1, %then4__1, %then3__1, %continue__3, %then1__1, %then0__2 + %82 = load %Result*, %Result** %res + call void @__quantum__rt__result_reference(%Result* %82) + call void @__quantum__rt__array_remove_access(%Array* %paulis) + call void @__quantum__rt__array_remove_access(%Array* %qubits) + ret %Result* %82 +} + +declare i64 @__quantum__rt__array_get_size_1d(%Array*) + +declare i8* @__quantum__rt__array_get_element_ptr_1d(%Array*, i64) + +declare %Result* @__quantum__trc__multi_qubit_measure(i64, i64, %Array*) + +declare void @__quantum__rt__result_unreference(%Result*) + +declare %Result* @__quantum__trc__single_qubit_measure(i64, i64, %Qubit*) + +declare void @__quantum__rt__result_reference(%Result*) + +define void @Microsoft__Quantum__Intrinsic__Rz__body(double %theta, %Qubit* %qb) { +entry: + call void @__quantum__trc__single_qubit_op(i64 23, i64 1, %Qubit* %qb) ret void } -define void @Microsoft__Quantum__Intrinsic__Rx__ctl(%Array* %ctls, { double, %Qubit* }* %0) { +define void @Microsoft__Quantum__Intrinsic__Rz__adj(double %theta, %Qubit* %qb) { +entry: + call void @__quantum__trc__single_qubit_op(i64 24, i64 1, %Qubit* %qb) + ret void +} + +define void @Microsoft__Quantum__Intrinsic__Rz__ctl(%Array* %ctls, { double, %Qubit* }* %0) { entry: call void @__quantum__rt__array_add_access(%Array* %ctls) %1 = getelementptr { double, %Qubit* }, { double, %Qubit* }* %0, i64 0, i32 0 %theta = load double, double* %1 %2 = getelementptr { double, %Qubit* }, { double, %Qubit* }* %0, i64 0, i32 1 %qb = load %Qubit*, %Qubit** %2 - call void @__quantum__trc__single_qubit_op_ctl(i64 20, i64 1, %Array* %ctls, %Qubit* %qb) + call void @__quantum__trc__single_qubit_op_ctl(i64 25, i64 1, %Array* %ctls, %Qubit* %qb) call void @__quantum__rt__array_remove_access(%Array* %ctls) ret void } -define void @Microsoft__Quantum__Intrinsic__Rx__ctladj(%Array* %ctls, { double, %Qubit* }* %0) { +define void @Microsoft__Quantum__Intrinsic__Rz__ctladj(%Array* %ctls, { double, %Qubit* }* %0) { entry: call void @__quantum__rt__array_add_access(%Array* %ctls) %1 = getelementptr { double, %Qubit* }, { double, %Qubit* }* %0, i64 0, i32 0 %theta = load double, double* %1 %2 = getelementptr { double, %Qubit* }, { double, %Qubit* }* %0, i64 0, i32 1 %qb = load %Qubit*, %Qubit** %2 - call void @__quantum__trc__single_qubit_op_ctl(i64 20, i64 1, %Array* %ctls, %Qubit* %qb) + call void @__quantum__trc__single_qubit_op_ctl(i64 25, i64 1, %Array* %ctls, %Qubit* %qb) call void @__quantum__rt__array_remove_access(%Array* %ctls) ret void } -define void @Microsoft__Quantum__Intrinsic__Z__body(%Qubit* %qb) { +define void @Microsoft__Quantum__Intrinsic__Y__body(%Qubit* %qb) { entry: - call void @__quantum__trc__single_qubit_op(i64 6, i64 1, %Qubit* %qb) + call void @__quantum__trc__single_qubit_op(i64 3, i64 1, %Qubit* %qb) ret void } -define void @Microsoft__Quantum__Intrinsic__Z__adj(%Qubit* %qb) { +define void @Microsoft__Quantum__Intrinsic__Y__adj(%Qubit* %qb) { entry: - call void @__quantum__trc__single_qubit_op(i64 6, i64 1, %Qubit* %qb) + call void @__quantum__trc__single_qubit_op(i64 3, i64 1, %Qubit* %qb) ret void } -define void @Microsoft__Quantum__Intrinsic__Z__ctl(%Array* %ctls, %Qubit* %qb) { +define void @Microsoft__Quantum__Intrinsic__Y__ctl(%Array* %ctls, %Qubit* %qb) { entry: call void @__quantum__rt__array_add_access(%Array* %ctls) %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %ctls) @@ -106,11 +336,11 @@ entry: br i1 %1, label %then0__1, label %else__1 then0__1: ; preds = %entry - call void @__quantum__trc__single_qubit_op_ctl(i64 7, i64 1, %Array* %ctls, %Qubit* %qb) + call void @__quantum__trc__single_qubit_op_ctl(i64 4, i64 1, %Array* %ctls, %Qubit* %qb) br label %continue__1 else__1: ; preds = %entry - call void @__quantum__trc__single_qubit_op_ctl(i64 8, i64 1, %Array* %ctls, %Qubit* %qb) + call void @__quantum__trc__single_qubit_op_ctl(i64 5, i64 1, %Array* %ctls, %Qubit* %qb) br label %continue__1 continue__1: ; preds = %else__1, %then0__1 @@ -118,9 +348,7 @@ continue__1: ; preds = %else__1, %then0__1 ret void } -declare i64 @__quantum__rt__array_get_size_1d(%Array*) - -define void @Microsoft__Quantum__Intrinsic__Z__ctladj(%Array* %ctls, %Qubit* %qb) { +define void @Microsoft__Quantum__Intrinsic__Y__ctladj(%Array* %ctls, %Qubit* %qb) { entry: call void @__quantum__rt__array_add_access(%Array* %ctls) %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %ctls) @@ -128,11 +356,11 @@ entry: br i1 %1, label %then0__1, label %else__1 then0__1: ; preds = %entry - call void @__quantum__trc__single_qubit_op_ctl(i64 7, i64 1, %Array* %ctls, %Qubit* %qb) + call void @__quantum__trc__single_qubit_op_ctl(i64 4, i64 1, %Array* %ctls, %Qubit* %qb) br label %continue__1 else__1: ; preds = %entry - call void @__quantum__trc__single_qubit_op_ctl(i64 8, i64 1, %Array* %ctls, %Qubit* %qb) + call void @__quantum__trc__single_qubit_op_ctl(i64 5, i64 1, %Array* %ctls, %Qubit* %qb) br label %continue__1 continue__1: ; preds = %else__1, %then0__1 @@ -192,64 +420,38 @@ continue__1: ; preds = %else__1, %then0__1 ret void } -define void @Microsoft__Quantum__Intrinsic__S__body(%Qubit* %qb) { +define void @Microsoft__Quantum__Intrinsic__Sz__body(%Qubit* %qb) { entry: call void @__quantum__trc__single_qubit_op(i64 15, i64 1, %Qubit* %qb) ret void } -define void @Microsoft__Quantum__Intrinsic__S__adj(%Qubit* %qb) { +define void @Microsoft__Quantum__Intrinsic__Sz__adj(%Qubit* %qb) { entry: call void @__quantum__trc__single_qubit_op(i64 15, i64 1, %Qubit* %qb) ret void } -define void @Microsoft__Quantum__Intrinsic__S__ctl(%Array* %ctls, %Qubit* %qb) { +define void @Microsoft__Quantum__Intrinsic__Sz__ctl(%Array* %ctls, %Qubit* %qb) { entry: - call void @__quantum__rt__array_add_access(%Array* %ctls) call void @__quantum__rt__array_add_access(%Array* %ctls) call void @__quantum__trc__single_qubit_op_ctl(i64 16, i64 1, %Array* %ctls, %Qubit* %qb) call void @__quantum__rt__array_remove_access(%Array* %ctls) - call void @__quantum__rt__array_remove_access(%Array* %ctls) ret void } -define void @Microsoft__Quantum__Intrinsic__S__ctladj(%Array* %ctls, %Qubit* %qb) { +define void @Microsoft__Quantum__Intrinsic__Sz__ctladj(%Array* %ctls, %Qubit* %qb) { entry: - call void @__quantum__rt__array_add_access(%Array* %ctls) call void @__quantum__rt__array_add_access(%Array* %ctls) call void @__quantum__trc__single_qubit_op_ctl(i64 16, i64 1, %Array* %ctls, %Qubit* %qb) call void @__quantum__rt__array_remove_access(%Array* %ctls) - call void @__quantum__rt__array_remove_access(%Array* %ctls) ret void } -define void @Microsoft__Quantum__Intrinsic__H__body(%Qubit* %qb) { +define %Result* @Microsoft__Quantum__Intrinsic__Mz__body(%Qubit* %qb) { entry: - call void @__quantum__trc__single_qubit_op(i64 9, i64 1, %Qubit* %qb) - ret void -} - -define void @Microsoft__Quantum__Intrinsic__H__adj(%Qubit* %qb) { -entry: - call void @__quantum__trc__single_qubit_op(i64 9, i64 1, %Qubit* %qb) - ret void -} - -define void @Microsoft__Quantum__Intrinsic__H__ctl(%Array* %ctls, %Qubit* %qb) { -entry: - call void @__quantum__rt__array_add_access(%Array* %ctls) - call void @__quantum__trc__single_qubit_op_ctl(i64 10, i64 1, %Array* %ctls, %Qubit* %qb) - call void @__quantum__rt__array_remove_access(%Array* %ctls) - ret void -} - -define void @Microsoft__Quantum__Intrinsic__H__ctladj(%Array* %ctls, %Qubit* %qb) { -entry: - call void @__quantum__rt__array_add_access(%Array* %ctls) - call void @__quantum__trc__single_qubit_op_ctl(i64 10, i64 1, %Array* %ctls, %Qubit* %qb) - call void @__quantum__rt__array_remove_access(%Array* %ctls) - ret void + %0 = call %Result* @__quantum__trc__single_qubit_measure(i64 100, i64 1, %Qubit* %qb) + ret %Result* %0 } define void @Microsoft__Quantum__Intrinsic__CNOT__body(%Qubit* %control, %Qubit* %target) { @@ -277,8 +479,6 @@ continue__1: ; preds = %else__1, %then0__1 declare %Array* @__quantum__rt__array_create_1d(i32, i64) -declare i8* @__quantum__rt__array_get_element_ptr_1d(%Array*, i64) - declare void @__quantum__rt__array_unreference(%Array*) define void @Microsoft__Quantum__Intrinsic__CNOT__adj(%Qubit* %control, %Qubit* %target) { @@ -304,66 +504,104 @@ continue__1: ; preds = %else__1, %then0__1 ret void } -define void @Microsoft__Quantum__Intrinsic__Tz__body(%Qubit* %qb) { +define void @Microsoft__Quantum__Intrinsic__H__body(%Qubit* %qb) { +entry: + call void @__quantum__trc__single_qubit_op(i64 9, i64 1, %Qubit* %qb) + ret void +} + +define void @Microsoft__Quantum__Intrinsic__H__adj(%Qubit* %qb) { +entry: + call void @__quantum__trc__single_qubit_op(i64 9, i64 1, %Qubit* %qb) + ret void +} + +define void @Microsoft__Quantum__Intrinsic__H__ctl(%Array* %ctls, %Qubit* %qb) { +entry: + call void @__quantum__rt__array_add_access(%Array* %ctls) + call void @__quantum__trc__single_qubit_op_ctl(i64 10, i64 1, %Array* %ctls, %Qubit* %qb) + call void @__quantum__rt__array_remove_access(%Array* %ctls) + ret void +} + +define void @Microsoft__Quantum__Intrinsic__H__ctladj(%Array* %ctls, %Qubit* %qb) { +entry: + call void @__quantum__rt__array_add_access(%Array* %ctls) + call void @__quantum__trc__single_qubit_op_ctl(i64 10, i64 1, %Array* %ctls, %Qubit* %qb) + call void @__quantum__rt__array_remove_access(%Array* %ctls) + ret void +} + +define void @Microsoft__Quantum__Intrinsic__T__body(%Qubit* %qb) { entry: call void @__quantum__trc__single_qubit_op(i64 11, i64 1, %Qubit* %qb) ret void } -define void @Microsoft__Quantum__Intrinsic__Tz__adj(%Qubit* %qb) { +define void @Microsoft__Quantum__Intrinsic__T__adj(%Qubit* %qb) { entry: call void @__quantum__trc__single_qubit_op(i64 11, i64 1, %Qubit* %qb) ret void } -define void @Microsoft__Quantum__Intrinsic__Tz__ctl(%Array* %ctls, %Qubit* %qb) { +define void @Microsoft__Quantum__Intrinsic__T__ctl(%Array* %ctls, %Qubit* %qb) { entry: + call void @__quantum__rt__array_add_access(%Array* %ctls) call void @__quantum__rt__array_add_access(%Array* %ctls) call void @__quantum__trc__single_qubit_op_ctl(i64 12, i64 1, %Array* %ctls, %Qubit* %qb) call void @__quantum__rt__array_remove_access(%Array* %ctls) + call void @__quantum__rt__array_remove_access(%Array* %ctls) ret void } -define void @Microsoft__Quantum__Intrinsic__Tz__ctladj(%Array* %ctls, %Qubit* %qb) { +define void @Microsoft__Quantum__Intrinsic__T__ctladj(%Array* %ctls, %Qubit* %qb) { entry: + call void @__quantum__rt__array_add_access(%Array* %ctls) call void @__quantum__rt__array_add_access(%Array* %ctls) call void @__quantum__trc__single_qubit_op_ctl(i64 12, i64 1, %Array* %ctls, %Qubit* %qb) call void @__quantum__rt__array_remove_access(%Array* %ctls) + call void @__quantum__rt__array_remove_access(%Array* %ctls) ret void } -define void @Microsoft__Quantum__Intrinsic__Rz__body(double %theta, %Qubit* %qb) { +define %Result* @Microsoft__Quantum__Intrinsic__Mx__body(%Qubit* %qb) { entry: - call void @__quantum__trc__single_qubit_op(i64 23, i64 1, %Qubit* %qb) + %0 = call %Result* @__quantum__trc__single_qubit_measure(i64 101, i64 1, %Qubit* %qb) + ret %Result* %0 +} + +define void @Microsoft__Quantum__Intrinsic__Rx__body(double %theta, %Qubit* %qb) { +entry: + call void @__quantum__trc__single_qubit_op(i64 19, i64 1, %Qubit* %qb) ret void } -define void @Microsoft__Quantum__Intrinsic__Rz__adj(double %theta, %Qubit* %qb) { +define void @Microsoft__Quantum__Intrinsic__Rx__adj(double %theta, %Qubit* %qb) { entry: - call void @__quantum__trc__single_qubit_op(i64 24, i64 1, %Qubit* %qb) + call void @__quantum__trc__single_qubit_op(i64 19, i64 1, %Qubit* %qb) ret void } -define void @Microsoft__Quantum__Intrinsic__Rz__ctl(%Array* %ctls, { double, %Qubit* }* %0) { +define void @Microsoft__Quantum__Intrinsic__Rx__ctl(%Array* %ctls, { double, %Qubit* }* %0) { entry: call void @__quantum__rt__array_add_access(%Array* %ctls) %1 = getelementptr { double, %Qubit* }, { double, %Qubit* }* %0, i64 0, i32 0 %theta = load double, double* %1 %2 = getelementptr { double, %Qubit* }, { double, %Qubit* }* %0, i64 0, i32 1 %qb = load %Qubit*, %Qubit** %2 - call void @__quantum__trc__single_qubit_op_ctl(i64 25, i64 1, %Array* %ctls, %Qubit* %qb) + call void @__quantum__trc__single_qubit_op_ctl(i64 20, i64 1, %Array* %ctls, %Qubit* %qb) call void @__quantum__rt__array_remove_access(%Array* %ctls) ret void } -define void @Microsoft__Quantum__Intrinsic__Rz__ctladj(%Array* %ctls, { double, %Qubit* }* %0) { +define void @Microsoft__Quantum__Intrinsic__Rx__ctladj(%Array* %ctls, { double, %Qubit* }* %0) { entry: call void @__quantum__rt__array_add_access(%Array* %ctls) %1 = getelementptr { double, %Qubit* }, { double, %Qubit* }* %0, i64 0, i32 0 %theta = load double, double* %1 %2 = getelementptr { double, %Qubit* }, { double, %Qubit* }* %0, i64 0, i32 1 %qb = load %Qubit*, %Qubit** %2 - call void @__quantum__trc__single_qubit_op_ctl(i64 25, i64 1, %Array* %ctls, %Qubit* %qb) + call void @__quantum__trc__single_qubit_op_ctl(i64 20, i64 1, %Array* %ctls, %Qubit* %qb) call void @__quantum__rt__array_remove_access(%Array* %ctls) ret void } @@ -404,51 +642,25 @@ entry: ret void } -define void @Microsoft__Quantum__Intrinsic__T__body(%Qubit* %qb) { -entry: - call void @__quantum__trc__single_qubit_op(i64 11, i64 1, %Qubit* %qb) - ret void -} - -define void @Microsoft__Quantum__Intrinsic__T__adj(%Qubit* %qb) { -entry: - call void @__quantum__trc__single_qubit_op(i64 11, i64 1, %Qubit* %qb) - ret void -} - -define void @Microsoft__Quantum__Intrinsic__T__ctl(%Array* %ctls, %Qubit* %qb) { -entry: - call void @__quantum__rt__array_add_access(%Array* %ctls) - call void @__quantum__rt__array_add_access(%Array* %ctls) - call void @__quantum__trc__single_qubit_op_ctl(i64 12, i64 1, %Array* %ctls, %Qubit* %qb) - call void @__quantum__rt__array_remove_access(%Array* %ctls) - call void @__quantum__rt__array_remove_access(%Array* %ctls) - ret void -} - -define void @Microsoft__Quantum__Intrinsic__T__ctladj(%Array* %ctls, %Qubit* %qb) { +define %Result* @Microsoft__Quantum__Intrinsic__M__body(%Qubit* %qb) { entry: - call void @__quantum__rt__array_add_access(%Array* %ctls) - call void @__quantum__rt__array_add_access(%Array* %ctls) - call void @__quantum__trc__single_qubit_op_ctl(i64 12, i64 1, %Array* %ctls, %Qubit* %qb) - call void @__quantum__rt__array_remove_access(%Array* %ctls) - call void @__quantum__rt__array_remove_access(%Array* %ctls) - ret void + %0 = call %Result* @__quantum__trc__single_qubit_measure(i64 100, i64 1, %Qubit* %qb) + ret %Result* %0 } -define void @Microsoft__Quantum__Intrinsic__Y__body(%Qubit* %qb) { +define void @Microsoft__Quantum__Intrinsic__Z__body(%Qubit* %qb) { entry: - call void @__quantum__trc__single_qubit_op(i64 3, i64 1, %Qubit* %qb) + call void @__quantum__trc__single_qubit_op(i64 6, i64 1, %Qubit* %qb) ret void } -define void @Microsoft__Quantum__Intrinsic__Y__adj(%Qubit* %qb) { +define void @Microsoft__Quantum__Intrinsic__Z__adj(%Qubit* %qb) { entry: - call void @__quantum__trc__single_qubit_op(i64 3, i64 1, %Qubit* %qb) + call void @__quantum__trc__single_qubit_op(i64 6, i64 1, %Qubit* %qb) ret void } -define void @Microsoft__Quantum__Intrinsic__Y__ctl(%Array* %ctls, %Qubit* %qb) { +define void @Microsoft__Quantum__Intrinsic__Z__ctl(%Array* %ctls, %Qubit* %qb) { entry: call void @__quantum__rt__array_add_access(%Array* %ctls) %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %ctls) @@ -456,11 +668,11 @@ entry: br i1 %1, label %then0__1, label %else__1 then0__1: ; preds = %entry - call void @__quantum__trc__single_qubit_op_ctl(i64 4, i64 1, %Array* %ctls, %Qubit* %qb) + call void @__quantum__trc__single_qubit_op_ctl(i64 7, i64 1, %Array* %ctls, %Qubit* %qb) br label %continue__1 else__1: ; preds = %entry - call void @__quantum__trc__single_qubit_op_ctl(i64 5, i64 1, %Array* %ctls, %Qubit* %qb) + call void @__quantum__trc__single_qubit_op_ctl(i64 8, i64 1, %Array* %ctls, %Qubit* %qb) br label %continue__1 continue__1: ; preds = %else__1, %then0__1 @@ -468,7 +680,7 @@ continue__1: ; preds = %else__1, %then0__1 ret void } -define void @Microsoft__Quantum__Intrinsic__Y__ctladj(%Array* %ctls, %Qubit* %qb) { +define void @Microsoft__Quantum__Intrinsic__Z__ctladj(%Array* %ctls, %Qubit* %qb) { entry: call void @__quantum__rt__array_add_access(%Array* %ctls) %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %ctls) @@ -476,11 +688,11 @@ entry: br i1 %1, label %then0__1, label %else__1 then0__1: ; preds = %entry - call void @__quantum__trc__single_qubit_op_ctl(i64 4, i64 1, %Array* %ctls, %Qubit* %qb) + call void @__quantum__trc__single_qubit_op_ctl(i64 7, i64 1, %Array* %ctls, %Qubit* %qb) br label %continue__1 else__1: ; preds = %entry - call void @__quantum__trc__single_qubit_op_ctl(i64 5, i64 1, %Array* %ctls, %Qubit* %qb) + call void @__quantum__trc__single_qubit_op_ctl(i64 8, i64 1, %Array* %ctls, %Qubit* %qb) br label %continue__1 continue__1: ; preds = %else__1, %then0__1 @@ -813,25 +1025,218 @@ continue__6: ; preds = %else__6, %then0__6 call void @__quantum__rt__qubit_release_array(%Array* %cc) call void @__quantum__rt__array_remove_access(%Array* %cc) call void @__quantum__rt__array_unreference(%Array* %cc) - %qs12 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 2) - %106 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs12, i64 0) + %106 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 0) %107 = bitcast i8* %106 to %Qubit** - %108 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs12, i64 1) + %qb__inline__45 = load %Qubit*, %Qubit** %107 + %r0 = call %Result* @__quantum__trc__single_qubit_measure(i64 100, i64 1, %Qubit* %qb__inline__45) + %qs12 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 2) + %108 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs12, i64 0) %109 = bitcast i8* %108 to %Qubit** - %110 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 1) + %110 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs12, i64 1) %111 = bitcast i8* %110 to %Qubit** - %112 = load %Qubit*, %Qubit** %111 - %113 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 2) - %114 = bitcast i8* %113 to %Qubit** - %115 = load %Qubit*, %Qubit** %114 - store %Qubit* %112, %Qubit** %107 - store %Qubit* %115, %Qubit** %109 + %112 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 1) + %113 = bitcast i8* %112 to %Qubit** + %114 = load %Qubit*, %Qubit** %113 + %115 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 2) + %116 = bitcast i8* %115 to %Qubit** + %117 = load %Qubit*, %Qubit** %116 + store %Qubit* %114, %Qubit** %109 + store %Qubit* %117, %Qubit** %111 call void @__quantum__rt__array_add_access(%Array* %qs12) + %paulis__inline__47 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 2) + %118 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis__inline__47, i64 0) + %119 = bitcast i8* %118 to i2* + %120 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis__inline__47, i64 1) + %121 = bitcast i8* %120 to i2* + %122 = load i2, i2* @PauliY + %123 = load i2, i2* @PauliX + store i2 %122, i2* %119 + store i2 %123, i2* %121 + call void @__quantum__rt__array_add_access(%Array* %paulis__inline__47) + call void @__quantum__rt__array_add_access(%Array* %qs12) + %124 = load %Result*, %Result** @ResultOne + %res__inline__47 = alloca %Result* + store %Result* %124, %Result** %res__inline__47 + %haveY__inline__47 = alloca i1 + store i1 false, i1* %haveY__inline__47 + br label %header__1 + +header__1: ; preds = %exiting__1, %continue__6 + %i__inline__47 = phi i64 [ 0, %continue__6 ], [ %139, %exiting__1 ] + %125 = icmp sge i64 %i__inline__47, 1 + %126 = icmp sle i64 %i__inline__47, 1 + %127 = select i1 true, i1 %126, i1 %125 + br i1 %127, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %128 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis__inline__47, i64 %i__inline__47) + %129 = bitcast i8* %128 to i2* + %130 = load i2, i2* %129 + %131 = load i2, i2* @PauliY + %132 = icmp eq i2 %130, %131 + %133 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis__inline__47, i64 %i__inline__47) + %134 = bitcast i8* %133 to i2* + %135 = load i2, i2* %134 + %136 = load i2, i2* @PauliI + %137 = icmp eq i2 %135, %136 + %138 = or i1 %132, %137 + br i1 %138, label %then0__7, label %continue__7 + +then0__7: ; preds = %body__1 + store i1 true, i1* %haveY__inline__47 + br label %continue__7 + +continue__7: ; preds = %then0__7, %body__1 + br label %exiting__1 + +exiting__1: ; preds = %continue__7 + %139 = add i64 %i__inline__47, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + %140 = load i1, i1* %haveY__inline__47 + br i1 %140, label %then0__8, label %test1__1 + +then0__8: ; preds = %exit__1 + %141 = call %Result* @__quantum__trc__multi_qubit_measure(i64 106, i64 1, %Array* %qs12) + store %Result* %141, %Result** %res__inline__47 + call void @__quantum__rt__result_unreference(%Result* %141) + br label %continue__8 + +test1__1: ; preds = %exit__1 + br i1 false, label %then1__1, label %test2__1 + +then1__1: ; preds = %test1__1 + %142 = call %Result* @__quantum__trc__multi_qubit_measure(i64 107, i64 1, %Array* %qs12) + store %Result* %142, %Result** %res__inline__47 + call void @__quantum__rt__result_unreference(%Result* %142) + br label %continue__8 + +test2__1: ; preds = %test1__1 + br i1 false, label %then2__1, label %test3__1 + +then2__1: ; preds = %test2__1 + %143 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis__inline__47, i64 0) + %144 = bitcast i8* %143 to i2* + %145 = load i2, i2* %144 + %146 = load i2, i2* @PauliX + %147 = icmp eq i2 %145, %146 + br i1 %147, label %then0__9, label %else__7 + +then0__9: ; preds = %then2__1 + %148 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs12, i64 0) + %149 = bitcast i8* %148 to %Qubit** + %qb__inline__48 = load %Qubit*, %Qubit** %149 + %150 = call %Result* @__quantum__trc__single_qubit_measure(i64 101, i64 1, %Qubit* %qb__inline__48) + store %Result* %150, %Result** %res__inline__47 + call void @__quantum__rt__result_unreference(%Result* %150) + br label %continue__9 + +else__7: ; preds = %then2__1 + %151 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs12, i64 0) + %152 = bitcast i8* %151 to %Qubit** + %qb__inline__49 = load %Qubit*, %Qubit** %152 + %153 = call %Result* @__quantum__trc__single_qubit_measure(i64 100, i64 1, %Qubit* %qb__inline__49) + store %Result* %153, %Result** %res__inline__47 + call void @__quantum__rt__result_unreference(%Result* %153) + br label %continue__9 + +continue__9: ; preds = %else__7, %then0__9 + br label %continue__8 + +test3__1: ; preds = %test2__1 + %154 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis__inline__47, i64 0) + %155 = bitcast i8* %154 to i2* + %156 = load i2, i2* %155 + %157 = load i2, i2* @PauliX + %158 = icmp eq i2 %156, %157 + %159 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis__inline__47, i64 1) + %160 = bitcast i8* %159 to i2* + %161 = load i2, i2* %160 + %162 = load i2, i2* @PauliX + %163 = icmp eq i2 %161, %162 + %164 = and i1 %158, %163 + br i1 %164, label %then3__1, label %test4__1 + +then3__1: ; preds = %test3__1 + %165 = call %Result* @__quantum__trc__multi_qubit_measure(i64 108, i64 1, %Array* %qs12) + store %Result* %165, %Result** %res__inline__47 + call void @__quantum__rt__result_unreference(%Result* %165) + br label %continue__8 + +test4__1: ; preds = %test3__1 + %166 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis__inline__47, i64 0) + %167 = bitcast i8* %166 to i2* + %168 = load i2, i2* %167 + %169 = load i2, i2* @PauliX + %170 = icmp eq i2 %168, %169 + %171 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis__inline__47, i64 1) + %172 = bitcast i8* %171 to i2* + %173 = load i2, i2* %172 + %174 = load i2, i2* @PauliZ + %175 = icmp eq i2 %173, %174 + %176 = and i1 %170, %175 + br i1 %176, label %then4__1, label %test5__1 + +then4__1: ; preds = %test4__1 + %177 = call %Result* @__quantum__trc__multi_qubit_measure(i64 109, i64 1, %Array* %qs12) + store %Result* %177, %Result** %res__inline__47 + call void @__quantum__rt__result_unreference(%Result* %177) + br label %continue__8 + +test5__1: ; preds = %test4__1 + %178 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis__inline__47, i64 0) + %179 = bitcast i8* %178 to i2* + %180 = load i2, i2* %179 + %181 = load i2, i2* @PauliZ + %182 = icmp eq i2 %180, %181 + %183 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis__inline__47, i64 1) + %184 = bitcast i8* %183 to i2* + %185 = load i2, i2* %184 + %186 = load i2, i2* @PauliX + %187 = icmp eq i2 %185, %186 + %188 = and i1 %182, %187 + br i1 %188, label %then5__1, label %test6__1 + +then5__1: ; preds = %test5__1 + %189 = call %Result* @__quantum__trc__multi_qubit_measure(i64 110, i64 1, %Array* %qs12) + store %Result* %189, %Result** %res__inline__47 + call void @__quantum__rt__result_unreference(%Result* %189) + br label %continue__8 + +test6__1: ; preds = %test5__1 + %190 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis__inline__47, i64 0) + %191 = bitcast i8* %190 to i2* + %192 = load i2, i2* %191 + %193 = load i2, i2* @PauliZ + %194 = icmp eq i2 %192, %193 + %195 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis__inline__47, i64 1) + %196 = bitcast i8* %195 to i2* + %197 = load i2, i2* %196 + %198 = load i2, i2* @PauliZ + %199 = icmp eq i2 %197, %198 + %200 = and i1 %194, %199 + br i1 %200, label %then6__1, label %continue__8 + +then6__1: ; preds = %test6__1 + %201 = call %Result* @__quantum__trc__multi_qubit_measure(i64 111, i64 1, %Array* %qs12) + store %Result* %201, %Result** %res__inline__47 + call void @__quantum__rt__result_unreference(%Result* %201) + br label %continue__8 + +continue__8: ; preds = %then6__1, %test6__1, %then5__1, %then4__1, %then3__1, %continue__9, %then1__1, %then0__8 + %r12 = load %Result*, %Result** %res__inline__47 + call void @__quantum__rt__result_reference(%Result* %r12) + call void @__quantum__rt__array_remove_access(%Array* %paulis__inline__47) + call void @__quantum__rt__array_remove_access(%Array* %qs12) + call void @__quantum__rt__array_unreference(%Array* %paulis__inline__47) call void @__quantum__rt__qubit_release_array(%Array* %qs) call void @__quantum__rt__array_remove_access(%Array* %qs) call void @__quantum__rt__array_remove_access(%Array* %qs12) call void @__quantum__rt__array_unreference(%Array* %qs) + call void @__quantum__rt__result_unreference(%Result* %r0) call void @__quantum__rt__array_unreference(%Array* %qs12) + call void @__quantum__rt__result_unreference(%Result* %r12) ret i1 true } diff --git a/src/QirRuntime/test/QIR-tracer/tracer-intrinsics.qs b/src/QirRuntime/test/QIR-tracer/tracer-intrinsics.qs index 2772d93b401..9764f154745 100644 --- a/src/QirRuntime/test/QIR-tracer/tracer-intrinsics.qs +++ b/src/QirRuntime/test/QIR-tracer/tracer-intrinsics.qs @@ -73,11 +73,11 @@ namespace Microsoft.Quantum.Testing.Tracer Controlled T(cc, (qs[2])); } - //let r0 = M(qs[0]) + let r0 = M(qs[0]); //ApplyIfZero(r0, (Z, qs[0])); let qs12 = [qs[1], qs[2]]; - //let r12 = Measure([PauliY, PauliX], qs12); + let r12 = Measure([PauliY, PauliX], qs12); //ApplyIfOne(r12, (Fixup, qs12)); } return true; diff --git a/src/QirRuntime/test/QIR-tracer/tracer-target.qs b/src/QirRuntime/test/QIR-tracer/tracer-target.qs index f82d3887599..c9854547b14 100644 --- a/src/QirRuntime/test/QIR-tracer/tracer-target.qs +++ b/src/QirRuntime/test/QIR-tracer/tracer-target.qs @@ -6,22 +6,32 @@ namespace Microsoft.Quantum.Instructions { open Microsoft.Quantum.Targeting; @TargetInstruction("single_qubit_op") - operation single_qubit_op (op_id: Int, duration: Int, qb : Qubit) : Unit { + operation single_qubit_op(op_id: Int, duration: Int, qb : Qubit) : Unit { body intrinsic; } @TargetInstruction("multi_qubit_op") - operation multi_qubit_op (op_id: Int, duration: Int, qbs : Qubit[]) : Unit { + operation multi_qubit_op(op_id: Int, duration: Int, qbs : Qubit[]) : Unit { body intrinsic; } @TargetInstruction("single_qubit_op_ctl") - operation single_qubit_op_ctl (op_id: Int, duration: Int, ctl : Qubit[], qb : Qubit) : Unit { + operation single_qubit_op_ctl(op_id: Int, duration: Int, ctl : Qubit[], qb : Qubit) : Unit { body intrinsic; } @TargetInstruction("multi_qubit_op_ctl") - operation multi_qubit_op_ctl (op_id: Int, duration: Int, ctl : Qubit[], qbs : Qubit[]) : Unit { + operation multi_qubit_op_ctl(op_id: Int, duration: Int, ctl : Qubit[], qbs : Qubit[]) : Unit { + body intrinsic; + } + + @TargetInstruction("single_qubit_measure") + operation single_qubit_measure(op_id: Int, duration: Int, qb : Qubit) : Result { + body intrinsic; + } + + @TargetInstruction("multi_qubit_measure") + operation multi_qubit_measure(op_id: Int, duration: Int, qbs : Qubit[]) : Result { body intrinsic; } } @@ -198,6 +208,79 @@ namespace Microsoft.Quantum.Intrinsic { controlled adjoint (ctls, ...) { Phys.single_qubit_op_ctl(25, 1, ctls, qb); } } + @Inline() + operation Mz(qb : Qubit) : Result { + body (...) { return Phys.single_qubit_measure(100, 1, qb); } + } + + @Inline() + operation Mx(qb : Qubit) : Result { + body (...) { return Phys.single_qubit_measure(101, 1, qb); } + } + + @Inline() + operation M(qb : Qubit) : Result { + body (...) { return Mz(qb); } + } + + @Inline() + operation Mzz(qubits : Qubit[]) : Result { + body (...) { return Phys.multi_qubit_measure(102, 1, qubits); } + } + + @Inline() + operation Mxz(qubits : Qubit[]) : Result { + body (...) { return Phys.multi_qubit_measure(103, 1, qubits); } + } + + @Inline() + operation Mzx(qubits : Qubit[]) : Result { + body (...) { return Phys.multi_qubit_measure(104, 1, qubits); } + } + + @Inline() + operation Mxx(qubits : Qubit[]) : Result { + body (...) { return Phys.multi_qubit_measure(105, 1, qubits); } + } + + @Inline() + operation Measure(paulis : Pauli[], qubits : Qubit[]) : Result { + body (...) + { + mutable res = One; + mutable haveY = false; + // Measurements that involve PauliY or PauliI + for (i in 0..Length(paulis)-1) + { + if (paulis[i] == PauliY or paulis[i] == PauliI) + { + set haveY = true; + } + } + if (haveY) { set res = Phys.multi_qubit_measure(106, 1, qubits); } + + // More than two qubits (but no PauliY or PauliI) + elif (Length(paulis) > 2) { set res = Phys.multi_qubit_measure(107, 1, qubits); } + + // Single qubit measurement -- differentiate between Mx and Mz + elif (Length(paulis) == 1) + { + if (paulis[0] == PauliX) { set res = Mx(qubits[0]); } + else { set res = Mz(qubits[0]); } + } + + // Specialize for two-qubit measurements: Mxx, Mxz, Mzx, Mzz + elif (paulis[0] == PauliX and paulis[1] == PauliX) { set res = Phys.multi_qubit_measure(108, 1, qubits); } + elif (paulis[0] == PauliX and paulis[1] == PauliZ) { set res = Phys.multi_qubit_measure(109, 1, qubits); } + elif (paulis[0] == PauliZ and paulis[1] == PauliX) { set res = Phys.multi_qubit_measure(110, 1, qubits); } + elif (paulis[0] == PauliZ and paulis[1] == PauliZ) { set res = Phys.multi_qubit_measure(111, 1, qubits); } + + //shouldn't get here + return res; + } + } + + @TargetInstruction("inject_global_barrier") operation Barrier(id : Int, duration : Int) : Unit { @@ -210,12 +293,5 @@ namespace Microsoft.Quantum.Intrinsic { adjoint self; } - operation M(qb : Qubit) : Result { - body intrinsic; - } - - operation Measure(bases : Pauli[], qubits : Qubit[]) : Result { - body intrinsic; - } } From 1d9a7d79712d7c1998daa6ab603971a290781cc0 Mon Sep 17 00:00:00 2001 From: Irina Yatsenko Date: Tue, 19 Jan 2021 17:50:43 -0800 Subject: [PATCH 13/27] The spec of the tracer --- src/QirRuntime/lib/Tracer/README.md | 171 +++++++++++++++++++++++++++- 1 file changed, 169 insertions(+), 2 deletions(-) diff --git a/src/QirRuntime/lib/Tracer/README.md b/src/QirRuntime/lib/Tracer/README.md index 55a15f58e56..16bdc4c31cf 100644 --- a/src/QirRuntime/lib/Tracer/README.md +++ b/src/QirRuntime/lib/Tracer/README.md @@ -1,5 +1,172 @@ # Resource Tracer Design Document # -The purpose of the Resource Tracer is to provide efficient and flexible way to estimate resources of a quantum program in QIR representation. +The purpose of the Resource Tracer is to provide efficient and flexible way to estimate resources of a quantum program + in QIR representation. The estimates are calculated by simulating execution of the program (as opposed to the static + analysis). -Details are TBD. +In addition to the standard QIR runtime functions, the quantum program will have to: + +1. convert _all_ used intrinsic operations into one of the supported by the tracer _trc_ operations (see the list below); +1. (_optional_) provide callbacks for handling of conditional branches on a measurement (if not provided, the estimates + would cover only one branch of the execution); +1. (_optional_) provide a C++ header file with names of the gates (for user friendly output); +1. (_optional_) provide callbacks for start/end of quantum operations (if not provided, all operations will be treated + as inlined as if the whole program consisted of a single operation); +1. (_optional_) provide callbacks for global barriers; +1. (_optional_) provide description of mapping for frame tracking. + +The Resource Tracer will consist of: + +1. the bridge for the `__quantum__trc__*` extension methods; +2. the native implementation to back the extensions; +3. the logic for partitioning gates into layers; +4. the logic for frame tracking; +5. output of the collected statistics; +6. (_lower priority_) the scheduling component to optimize depth and/or width of the circuit. + +## List of `__quantum__trc__*` methods ## + +___WIP___ + +| Signature | Description | +| :---------------------------------------------------- | :----------------------------------------------------------- | +| `void __quantum__trc__inject_global_barrier(i32 %id, i32 %duration)` | Function to insert a global barrier. It will be inserted into QIR based on a user defined intrinsic. See [Layering](#layering) section for details. | +| `void __quantum__trc__on_operation_start(i64 %id)` | Function to identify the start of a quantum module. The argument is a unique _id_ of the module. The tracer will have an option to treat module boundaries as barriers between layers and (_lower priority_) and option to cache estimates for a module, executed multiple times. The call to the function will be inserted into QIR by the Q# compiler when Tracer is specified as the compilation target. | +| `void __quantum__trc__on_operation_end(i64 %id)` | Function to identify the end of a quantum module. The argument is a unique _id_ of the module and must match the _id_ supplied on start of the module. The call to the function will be inserted into QIR by the Q# compiler when Tracer is specified as the compilation target. | +| `void __quantum__trc__single_qubit_op(i32 %id, i32 %duration, %Qubit* %q)` | Function for counting operations that involve a single qubit. The first argument is the id of the operation, as assigned by the client. Multiple intrinsics can be assigned the same id, in which case they will be counted together. The second argument is duration to be assigned to the particular invocation of the operation. | +| `void __quantum__trc__multi_qubit_op(i32 %id, i32 %duration, %Array* %qs)` | Function for counting operations that involve multiple qubits.| +| `void __quantum__trc__single_qubit_op__ctl(i32 %id, i32 %duration, %Array* %ctls, %Qubit* %q)` | Function for counting controlled operations with single target qubit. | +| `void __quantum__trc__multi_qubit_op__ctl(i32 %id, i32 %duration, %Array* %ctls, %Array* %qs)` | Function for counting controlled operations with multiple target qubits. | +| `%Result* @__quantum__trc__single_qubit_measure(i32 %id, i32 %duration, %Qubit* %q)` | Function for counting measurements of a sigle qubit. The user might assign different operation id, depending on the basis of the measurement. | +| `%Result* @__quantum__trc__multi_qubit_measure(i32 %id, i32 %duration, %Array* %qs)` | Function for counting joint-measurements of qubits. The user might assign different operation id, depending on the basis of the measurement for each qubit.| +| `void __quantum__trc__swap(%Qubit* %q1, %Qubit* %q2)` | See [Special handling of SWAP](#special-handling-of-swap) for details. | + +_Note on operation ids_: The client is responsible for using opeartion ids in a consistent manner. Operations with the + same id will be counted by the tracer as the _same_ operation, even accross invocations with different number of target + qubits or when different functors are applied. + +## Native backing of the extension methods ## + +The Resource Tracer will reuse qir-rt library as much as possible while extending it with the entry points specified above. + +__Conditionals on measurements__: The Resource Tracer will execute LLVM IR's branching structures "as is", depending on + the values of the corresponding variables at runtime. To enable estimation of branches that depend on a measurement + result, the source Q# program will have to be authored in such a way that the Q# compiler will translate the + conditionals into `__quantum__trc__apply_if*` calls. The tracer will add operations from _both branches_ into the + layers it creates to compute the upper bound estimate. + +Nested conditionals, conditional measurements and conditional tracked operations will _not_ be supported. + +__Caching__ (lower priority): It might be a huge perf win if the Resource Tracer could cache statistics for repeated + computations. The Tracer will have an option to cache layering results per quantum module if the boundaries of modules + are treated as layering barriers. + +## Layering ## + +_Definition_: ___Time___ is an integer-valued function on all quantum operations in a program (gates, measurements, + qubits allocation/release). For each gate there is start and end times. For each qubit, there are times when the qubit + is allocated and released. Start time of a gate cannot be less than allocation time of any of the qubits the gate uses. + If two gates or measurements use the same qubit, one of the gates must have start time greater of equal than the end + time of the other. + +A sequentially executed quantum program can be assigned a trivial time function, when all quantum operations have + duration of 1 and unique start times, ordered to match the flow of the program. Layering compresses the timeline by + assuming that some operations might be executed simultaneously while allowing for different operations to have various + durations. + +_Definition_: Provided a valid _time_ function for the program a ___layer of duration N at time T, denoted as L(T,N),___ + is a subset of operations in the program such that all of these operations have start time greater or equal _T_ and + finish time less than _T + N_. The program is ___layered___ if all gates in it are partitioned into layers, that don't + overlap in time. The union of all qubits that are involved in operations of a given layer, will be denoted _Qubits(T,N)_. + +A sequential program can be trivially layered such that each layer contains exactly one operation. Notice, that the + definition of layer doesn't require the gates to be executed _in parallel_. For example, all gates in a fully sequential + program can be also placed into a single layer L(0, infinity). Some gates might be considered to be very cheap and take + zero time to execute, those gates can be added to a layer even if they act on the same qubit another gate in this layer + is acting on and have to be executed sequentially within the layer. + +_Definition_: A ___global barrier___ is any operation that acts on _all_ currently allocated qubits. We'll provide means + for the clients to inject global barriers equivalent to an identity operator. This will allow the clients to enforce a + particular layering structure (because no later operation can sink below the barrier). + +The global barriers will be implemented as calls to `__quantum__trc__global_barrier` function. No additional support from + the compiler should be needed, as the user can define their own intrinsic to represent the barrier and map it to the above + runtime function via targets.qs file. The user can choose duration of a barrier which would affect start time of the + following layers but no operations will be added to a barrier, independent of its width. + +### The Resource Tracer's Layering Algorithm ### + +As the tracer is executing a sequential quantum program, it will compute a time function and corresponding layering + using the _conceptual_ algorithm, described below (aka "tetris algorithm"). The actual implementation of layering might + be done differently, as long as the resulting layering is the same as if running the conceptual algorithm. + +1. The tracer will have a setting for preferred layer duration: P. +1. The first encountered operation of __non-zero__ duration N is added into layer L(0, max(P,N)). The value + for _conditional barrier_ is set to 0. +1. When conditional callback is encountered, the layer L(t,N) of the measurement that produced the result the conditional + is on, is looked up and the _conditional barrier_ is set to _t + N_. At the end of the conditional scope the barrier + is reset to 0. (Effectively, no operations, conditioned on the result of a measurement, can happen before or in the same + layer as the measurement, even if they don't involve the measured qubits.) +1. Suppose, there are already layers L(0,N0), ... , L(k,Nk) and the operation being executed is a single-qubit _op_ of + duration __0__ (controlled and multi-qubit operations of duration 0 are treated the same as non-zero operations). + Starting at L(k, Nk) and scanning backwards to L(conditional barrier, Nb) find the _first_ layer that contains an + operation that acts on at least one of the qubits _op_ is acting on. Add _op_ into this layer. If no such layer found, + remember that the qubits have pending 0-duration _op_ and commit it to the layer that first uses any of the qubits in a + an operation of non-zero duration. At the end of the program commit all pending operations of duration zero into a new + layer. +1. Suppose, there are already layers L(0,N0), ... , L(k,Nk) and the operation being executed is _op_ of duration _N > 0_. + Starting at L(k, Nk) and scanning backwards to L(conditional barrier, Nb) find the _last_ layer L(t, Nt) such that + Qubits(t, Nt) don't contain any of the _op_'s qubits and find the _first_ layer L(w, Nw) such that Qubits(w, Nw) contains + some of _op_'s qubits but Nw + N <= P. Add _op_ into one of the two layer with later time. If neither such layers found, + add _op_ into a new layer L(k+1, max(P, N)). + +## Special handling of SWAP ## + +The tracer will provide a way to handle SWAP as, effectively, renaming of the involved qubits. The client will have the + choice of using the special handling versus treating the gate as a standard counted intrinsic. + +## Frame tracking ## + +The client might want to count differently operations that are applied in a different state. For example, if Hadamard gate + is applied to a qubit and then Rz and Mx gates, the client might want to count the sequence as if Rz as Mz were executed. + The frame is closed when the state of the qubit is reset (in Hadamard's case, another Hadamard operator is applied to + the qubit). The client will be able to register the required frame tracking with the tracer via a C++ registration + callback. + +The descriptor of the frame will contain the following information and will be provided to the Tracer when initializing + it in C++. + +- openingOp: the operation id that opens the frame on the qubits this operation is applied to +- closingOp: the operation is that closes the the frame on the qubits this operation is applied to +- vector of: { bitmask_ctls, bitmask_targets, operationIdOriginal, operationIdMapped } + +The closing operation will be ignored if the frame on the qubit hasn't been open. The bitmasks define which of the qubits + should be in an open frame to trigger the mapping. For non-controlled operations the first mask will be ignored. To + begin with, the tracer will support frame mapping for up to 8 control/target qubits. + +__TBD__: C++ definitions of the structure above + the interface to register frame tracking with the Tracer. + +## Output format ## + +The tracer will have options to output the estimates into command line or into a file, specified by the user. In both + cases the output will be in the same format: + +- Tab separated, where: + + - the first column specifies the time _t_ of a layer _L(t, n)_ + - the second column contains an optional name of the layer, that corresponds to a global barrier + - the remaining columns contain counts per operation in the layer + +- The first row is a header row: `layer_id\tname(\t[a-zA-Z]+)*`, where specific operation names are listed, such as + CNOT, Mz, etc., if provided by the user alongside with the target.qs file (if not provided, the header row will list + operation ids). +- All following rows contain statistics per layer: `[0-9]+\t[a-zA-Z]*(\t([0-9]*))*`. +- The rows are sorted in order of increasing layer time. +- Zero counts for any of the statistics _might_ be replaced with empty string. +- The global barrier layer lists the name and no statistics. + +__TBD__: specify the header that maps operation ids to gate names + +## Depth vs width optimizations ## + +TBD but lower priority. From a7dac27e0a105a1c4d6b42b8bca2812694c737fc Mon Sep 17 00:00:00 2001 From: Irina Yatsenko Date: Thu, 21 Jan 2021 10:41:48 -0800 Subject: [PATCH 14/27] PR review feedback + fixed bug in handling of barriers --- src/QirRuntime/lib/Tracer/CMakeLists.txt | 1 - src/QirRuntime/lib/Tracer/tracer.cpp | 77 ++++++++----------- src/QirRuntime/lib/Tracer/tracer.hpp | 34 ++++---- src/QirRuntime/public/CoreTypes.hpp | 3 - src/QirRuntime/test/unittests/TracerTests.cpp | 47 +++++++---- 5 files changed, 82 insertions(+), 80 deletions(-) diff --git a/src/QirRuntime/lib/Tracer/CMakeLists.txt b/src/QirRuntime/lib/Tracer/CMakeLists.txt index 1426c9cc1aa..b5652cca053 100644 --- a/src/QirRuntime/lib/Tracer/CMakeLists.txt +++ b/src/QirRuntime/lib/Tracer/CMakeLists.txt @@ -19,7 +19,6 @@ add_library(${component_name} STATIC ${source_files}) target_include_directories(${component_name} PUBLIC ${includes}) -MESSAGE(INFO "*** ${QIR_UTILITY_LIB}") target_link_libraries(${component_name} ${QIR_UTILITY_LIB} # absolute path to the bridge library, set by compile_from_ir ) diff --git a/src/QirRuntime/lib/Tracer/tracer.cpp b/src/QirRuntime/lib/Tracer/tracer.cpp index 35b077d0302..2e72c9af99d 100644 --- a/src/QirRuntime/lib/Tracer/tracer.cpp +++ b/src/QirRuntime/lib/Tracer/tracer.cpp @@ -20,20 +20,12 @@ namespace Quantum } //------------------------------------------------------------------------------------------------------------------ - // CTracer's ISumulator implementation + // CTracer's ISimulator implementation //------------------------------------------------------------------------------------------------------------------ - IQuantumGateSet* CTracer::AsQuantumGateSet() - { - return nullptr; - } - IDiagnostics* CTracer::AsDiagnostics() - { - return nullptr; - } Qubit CTracer::AllocateQubit() { size_t qubit = qubits.size(); - qubits.push_back({}); + qubits.emplace_back(QubitState{}); return reinterpret_cast(qubit); } void CTracer::ReleaseQubit(Qubit /*qubit*/) @@ -45,7 +37,7 @@ namespace Quantum size_t qubitIndex = reinterpret_cast(q); const QubitState& qstate = this->UseQubit(q); - stringstream str(qubitIndex); + stringstream str(std::to_string(qubitIndex)); str << " last used in layer " << qstate.layer << "(pending zero ops: " << qstate.pendingZeroOps.size() << ")"; return str.str(); } @@ -67,7 +59,7 @@ namespace Quantum //------------------------------------------------------------------------------------------------------------------ // CTracer::CreateNewLayer //------------------------------------------------------------------------------------------------------------------ - LayerId CTracer::CreateNewLayer(Duration opDuration) + LayerId CTracer::CreateNewLayer(Duration minRequiredDuration) { // Create a new layer for the operation. Time layerStartTime = 0; @@ -76,7 +68,8 @@ namespace Quantum const Layer& lastLayer = this->metricsByLayer.back(); layerStartTime = lastLayer.startTime + lastLayer.duration; } - this->metricsByLayer.push_back(Layer{max(this->preferredLayerDuration, opDuration), layerStartTime}); + this->metricsByLayer.emplace_back( + Layer{max(this->preferredLayerDuration, minRequiredDuration), layerStartTime}); return this->metricsByLayer.size() - 1; } @@ -89,16 +82,26 @@ namespace Quantum const QubitState& qstate = this->UseQubit(q); LayerId layerToInsertInto = INVALID; - if (qstate.layer != INVALID) + + const LayerId firstLayerAfterBarrier = + this->globalBarrier == INVALID + ? this->metricsByLayer.empty() ? INVALID : 0 + : this->globalBarrier + 1 == this->metricsByLayer.size() ? INVALID : this->globalBarrier + 1; + + LayerId candidate = max(qstate.layer, firstLayerAfterBarrier); + + if (candidate != INVALID) { - const Layer& lastUsedIn = this->metricsByLayer[qstate.layer]; - if (qstate.lastUsedTime + opDuration <= lastUsedIn.startTime + lastUsedIn.duration) + // Find the earliest layer that the operation fits in by duration + const Layer& candidateLayer = this->metricsByLayer[candidate]; + const Time lastUsedTime = max(qstate.lastUsedTime, candidateLayer.startTime); + if (lastUsedTime + opDuration <= candidateLayer.startTime + candidateLayer.duration) { - layerToInsertInto = qstate.layer; + layerToInsertInto = candidate; } else { - for (LayerId candidate = qstate.layer + 1; candidate < this->metricsByLayer.size(); candidate++) + for (candidate += 1; candidate < this->metricsByLayer.size(); ++candidate) { if (opDuration <= this->metricsByLayer[candidate].duration) { @@ -108,22 +111,9 @@ namespace Quantum } } } - else if (opDuration <= this->preferredLayerDuration && !this->metricsByLayer.empty()) + else if (opDuration <= this->preferredLayerDuration) { - // the qubit hasn't been used in any of the layers yet -- add it to the first layer - layerToInsertInto = 0; - } - - if (layerToInsertInto != INVALID && this->globalBarrier != INVALID) - { - if (this->globalBarrier + 1 == this->metricsByLayer.size()) - { - layerToInsertInto = INVALID; - } - else - { - layerToInsertInto = std::max(layerToInsertInto, this->globalBarrier + 1); - } + layerToInsertInto = firstLayerAfterBarrier; } return layerToInsertInto; @@ -135,12 +125,7 @@ namespace Quantum void CTracer::AddOperationToLayer(OpId id, LayerId layer) { assert(layer < this->metricsByLayer.size()); - auto inserted = this->metricsByLayer[layer].operations.insert({id, 1}); - if (!inserted.second) - { - assert(inserted.first->first == id); - inserted.first->second += 1; - } + this->metricsByLayer[layer].operations[id] += 1; } //------------------------------------------------------------------------------------------------------------------ @@ -194,9 +179,9 @@ namespace Quantum LayerId CTracer::TraceMultiQubitOp( OpId id, Duration opDuration, - int64_t nFirstGroup, + long nFirstGroup, Qubit* firstGroup, - int64_t nSecondGroup, + long nSecondGroup, Qubit* secondGroup) { assert(nFirstGroup >= 0); @@ -214,12 +199,12 @@ namespace Quantum // Figure out the layer this operation should go into. LayerId layerToInsertInto = this->FindLayerToInsertOperationInto(secondGroup[0], opDuration); - for (int64_t i = 1; i < nSecondGroup && layerToInsertInto != INVALID; i++) + for (long i = 1; i < nSecondGroup && layerToInsertInto != INVALID; i++) { layerToInsertInto = max(layerToInsertInto, this->FindLayerToInsertOperationInto(secondGroup[i], opDuration)); } - for (int64_t i = 0; i < nFirstGroup && layerToInsertInto != INVALID; i++) + for (long i = 0; i < nFirstGroup && layerToInsertInto != INVALID; i++) { layerToInsertInto = max(layerToInsertInto, this->FindLayerToInsertOperationInto(firstGroup[i], opDuration)); } @@ -232,11 +217,11 @@ namespace Quantum this->AddOperationToLayer(id, layerToInsertInto); // Update the state of the involved qubits. - for (int64_t i = 0; i < nFirstGroup; i++) + for (long i = 0; i < nFirstGroup; i++) { this->UpdateQubitState(firstGroup[i], layerToInsertInto, opDuration); } - for (int64_t i = 0; i < nSecondGroup; i++) + for (long i = 0; i < nSecondGroup; i++) { this->UpdateQubitState(secondGroup[i], layerToInsertInto, opDuration); } @@ -258,7 +243,7 @@ namespace Quantum return reinterpret_cast(layerId); } - Result CTracer::TraceMultiQubitMeasurement(OpId id, Duration duration, int64_t nTargets, Qubit* targets) + Result CTracer::TraceMultiQubitMeasurement(OpId id, Duration duration, long nTargets, Qubit* targets) { LayerId layerId = this->TraceMultiQubitOp(id, duration, 0, nullptr, nTargets, targets); return reinterpret_cast(layerId); diff --git a/src/QirRuntime/lib/Tracer/tracer.hpp b/src/QirRuntime/lib/Tracer/tracer.hpp index 0271ad57c42..c62cc4e05b8 100644 --- a/src/QirRuntime/lib/Tracer/tracer.hpp +++ b/src/QirRuntime/lib/Tracer/tracer.hpp @@ -13,9 +13,9 @@ namespace Microsoft { namespace Quantum { - using OpId = int32_t; - using Time = int32_t; - using Duration = int32_t; + using OpId = int; + using Time = int; + using Duration = int; using LayerId = size_t; constexpr LayerId INVALID = std::numeric_limits::max(); @@ -32,7 +32,7 @@ namespace Quantum const Time startTime; // Quantum operations, assigned to this layer. - std::unordered_map operations; + std::unordered_map operations; // Optional id, if the layer represents a global barrier. OpId barrierId = -1; @@ -96,7 +96,7 @@ namespace Quantum LayerId FindLayerToInsertOperationInto(Qubit q, Duration opDuration) const; // Returns the index of the created layer. - LayerId CreateNewLayer(Duration opDuration); + LayerId CreateNewLayer(Duration minRequiredDuration); // Adds operation with given id into the given layer. Assumes that duration contraints have been satisfied. void AddOperationToLayer(OpId id, LayerId layer); @@ -108,28 +108,34 @@ namespace Quantum // ------------------------------------------------------------------------------------------------------------- // ISimulator interface // ------------------------------------------------------------------------------------------------------------- - IQuantumGateSet* AsQuantumGateSet() override; - IDiagnostics* AsDiagnostics() override; Qubit AllocateQubit() override; void ReleaseQubit(Qubit qubit) override; std::string QubitToString(Qubit qubit) override; void ReleaseResult(Result result) override; + IQuantumGateSet* AsQuantumGateSet() override + { + throw std::logic_error("Not supported: all intrinsics must be converted to tracing operations"); + } + IDiagnostics* AsDiagnostics() override + { + return nullptr; + } Result M(Qubit target) override { - throw std::logic_error("not_implemented"); + throw std::logic_error("Not supported: all measurements must be converted to tracing operations"); } Result Measure(long numBases, PauliId bases[], long numTargets, Qubit targets[]) override { - throw std::logic_error("not_implemented"); + throw std::logic_error("Not supported: all measurements must be converted to tracing operations"); } bool AreEqualResults(Result r1, Result r2) override { - throw std::logic_error("Cannot compare results, when tracing!"); + throw std::logic_error("Cannot compare results while tracing!"); } ResultValue GetResultValue(Result result) override { - throw std::logic_error("Result values aren't available, when tracing!"); + throw std::logic_error("Result values aren't available while tracing!"); } Result UseZero() override; Result UseOne() override; @@ -146,13 +152,13 @@ namespace Quantum LayerId TraceMultiQubitOp( OpId id, Duration duration, - int64_t nFirstGroup, + long nFirstGroup, Qubit* firstGroup, - int64_t nSecondGroup, + long nSecondGroup, Qubit* secondGroup); Result TraceSingleQubitMeasurement(OpId id, Duration duration, Qubit target); - Result TraceMultiQubitMeasurement(OpId id, Duration duration, int64_t nTargets, Qubit* targets); + Result TraceMultiQubitMeasurement(OpId id, Duration duration, long nTargets, Qubit* targets); LayerId GetLayerIdOfSourceMeasurement(Result r) const { return reinterpret_cast(r); diff --git a/src/QirRuntime/public/CoreTypes.hpp b/src/QirRuntime/public/CoreTypes.hpp index 6dd527f7f85..6fce56e5020 100644 --- a/src/QirRuntime/public/CoreTypes.hpp +++ b/src/QirRuntime/public/CoreTypes.hpp @@ -3,7 +3,6 @@ // The core types will be exposed in the C-interfaces for interop, thus no // namespaces or scoped enums can be used to define them. - /*============================================================================== Qubit & Result @@ -34,5 +33,3 @@ enum PauliId PauliId_Z = 2, PauliId_Y = 3, }; - - diff --git a/src/QirRuntime/test/unittests/TracerTests.cpp b/src/QirRuntime/test/unittests/TracerTests.cpp index fa77aee15a0..3ec12382cee 100644 --- a/src/QirRuntime/test/unittests/TracerTests.cpp +++ b/src/QirRuntime/test/unittests/TracerTests.cpp @@ -37,7 +37,8 @@ TEST_CASE("Layering distinct single-qubit operations of non-zero durations", "[t CHECK(layers[2].startTime == 6); CHECK(layers[2].operations.size() == 2); CHECK(layers[3].startTime == 10); - CHECK(layers[3].operations.size() == 1);} + CHECK(layers[3].operations.size() == 1); +} TEST_CASE("Layering single-qubit operations of zero duration", "[tracer]") { @@ -48,12 +49,12 @@ TEST_CASE("Layering single-qubit operations of zero duration", "[tracer]") Qubit q2 = tr->AllocateQubit(); Qubit q3 = tr->AllocateQubit(); - CHECK(0 == tr->TraceSingleQubitOp(1, 1, q1)); // L(0,3) should be created - CHECK(0 == tr->TraceSingleQubitOp(2, 0, q1)); // add the op into L(0,3) + CHECK(0 == tr->TraceSingleQubitOp(1, 1, q1)); // L(0,3) should be created + CHECK(0 == tr->TraceSingleQubitOp(2, 0, q1)); // add the op into L(0,3) CHECK(INVALID == tr->TraceSingleQubitOp(3, 0, q3)); // pending zero op (will remain orphan) CHECK(INVALID == tr->TraceSingleQubitOp(4, 0, q2)); // pending zero op CHECK(INVALID == tr->TraceSingleQubitOp(5, 0, q2)); // another pending zero op - CHECK(0 == tr->TraceSingleQubitOp(6, 1, q2)); // add the op into L(0,3) together with the pending ones + CHECK(0 == tr->TraceSingleQubitOp(6, 1, q2)); // add the op into L(0,3) together with the pending ones const vector& layers = tr->UseLayers(); REQUIRE(layers.size() == 1); @@ -146,39 +147,53 @@ TEST_CASE("Operations with same id are counted together", "[tracer]") TEST_CASE("Global barrier", "[tracer]") { shared_ptr tr = CreateTracer(); - tr->SetPreferredLayerDuration(1); + tr->SetPreferredLayerDuration(2); Qubit q1 = tr->AllocateQubit(); Qubit q2 = tr->AllocateQubit(); Qubit q3 = tr->AllocateQubit(); + Qubit q4 = tr->AllocateQubit(); + + CHECK(0 == tr->TraceSingleQubitOp(1, 4, q1)); // L(0,4) created + CHECK(0 == tr->TraceSingleQubitOp(2, 1, q4)); // added to L(0,4) + CHECK(1 == tr->InjectGlobalBarrier(42, 1)); // creates L(4,2) - CHECK(0 == tr->TraceSingleQubitOp(1, 1, q1)); // L(0,1) created - CHECK(1 == tr->InjectGlobalBarrier(42, 1)); // creates L(1,1) + CHECK(2 == tr->TraceMultiQubitOp(3, 1, 1 /*nFirst*/, &q2 /*first*/, 1 /*nSecond*/, &q3 /*second*/)); + // the barrier shouldn't allow this op to fall through into L(0,4), so should create L(6,2) - CHECK(2 == tr->TraceMultiQubitOp(2, 1, 1 /*nFirst*/, &q2 /*first*/, 1 /*nSecond*/, &q3 /*second*/)); - // the barrier shouldn't allow this op to fall through into L(0,1), so should create L(2,1) + CHECK(INVALID == tr->TraceSingleQubitOp(4, 0, q1)); + // the barrier shouldn't allow this op to fall through into L(0,4), so should create pending op - CHECK(INVALID == tr->TraceSingleQubitOp(3, 0, q1)); - // the barrier shouldn't allow this op to fall through into L(0,1), so should create pending op + CHECK(2 == tr->TraceSingleQubitOp(5, 1, q1)); + // should be added into L(6,2) together with the pending op `3` - CHECK(2 == tr->TraceSingleQubitOp(4, 1, q1)); - // should be added into L(2,1) together with the pending op `3` + CHECK(3 == tr->TraceSingleQubitOp(6, 3, q2)); + // long op, with no existing wide layers to host it, so should create L(8,3) + + CHECK(3 == tr->TraceSingleQubitOp(7, 3, q4)); + // long op but can be added into L(8,3), which is post the barrier const vector& layers = tr->UseLayers(); - REQUIRE(layers.size() == 3); - CHECK(layers[0].operations.size() == 1); + REQUIRE(layers.size() == 4); + CHECK(layers[0].operations.size() == 2); CHECK(layers[1].operations.size() == 0); CHECK(layers[2].operations.size() == 3); + CHECK(layers[3].operations.size() == 2); const auto& ops0 = layers[0].operations; CHECK(ops0.find(1) != ops0.end()); + CHECK(ops0.find(2) != ops0.end()); CHECK(42 == layers[1].barrierId); const auto& ops2 = layers[2].operations; - CHECK(ops2.find(2) != ops2.end()); CHECK(ops2.find(3) != ops2.end()); CHECK(ops2.find(4) != ops2.end()); + CHECK(ops2.find(5) != ops2.end()); + + const auto& ops3 = layers[3].operations; + CHECK(ops3.find(6) != ops3.end()); + CHECK(ops3.find(7) != ops3.end()); } // For layering purposes, measurements behave pretty much the same as other operations From db246af25300ef96b8d0079a5907ff57ad992605 Mon Sep 17 00:00:00 2001 From: Irina Yatsenko Date: Thu, 4 Feb 2021 16:00:27 -0800 Subject: [PATCH 15/27] Minor updates in spec wording --- src/QirRuntime/lib/Tracer/README.md | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/src/QirRuntime/lib/Tracer/README.md b/src/QirRuntime/lib/Tracer/README.md index 16bdc4c31cf..80d0baf3926 100644 --- a/src/QirRuntime/lib/Tracer/README.md +++ b/src/QirRuntime/lib/Tracer/README.md @@ -6,7 +6,7 @@ The purpose of the Resource Tracer is to provide efficient and flexible way to e In addition to the standard QIR runtime functions, the quantum program will have to: -1. convert _all_ used intrinsic operations into one of the supported by the tracer _trc_ operations (see the list below); +1. convert _all_ used intrinsic operations into one of the _trc_ operations supported by the tracer (see the list below); 1. (_optional_) provide callbacks for handling of conditional branches on a measurement (if not provided, the estimates would cover only one branch of the execution); 1. (_optional_) provide a C++ header file with names of the gates (for user friendly output); @@ -37,17 +37,17 @@ ___WIP___ | `void __quantum__trc__multi_qubit_op(i32 %id, i32 %duration, %Array* %qs)` | Function for counting operations that involve multiple qubits.| | `void __quantum__trc__single_qubit_op__ctl(i32 %id, i32 %duration, %Array* %ctls, %Qubit* %q)` | Function for counting controlled operations with single target qubit. | | `void __quantum__trc__multi_qubit_op__ctl(i32 %id, i32 %duration, %Array* %ctls, %Array* %qs)` | Function for counting controlled operations with multiple target qubits. | -| `%Result* @__quantum__trc__single_qubit_measure(i32 %id, i32 %duration, %Qubit* %q)` | Function for counting measurements of a sigle qubit. The user might assign different operation id, depending on the basis of the measurement. | -| `%Result* @__quantum__trc__multi_qubit_measure(i32 %id, i32 %duration, %Array* %qs)` | Function for counting joint-measurements of qubits. The user might assign different operation id, depending on the basis of the measurement for each qubit.| +| `%Result* @__quantum__trc__single_qubit_measure(i32 %id, i32 %duration, %Qubit* %q)` | Function for counting measurements of a single qubit. The user might assign different operation ids for different measurement bases. | +| `%Result* @__quantum__trc__multi_qubit_measure(i32 %id, i32 %duration, %Array* %qs)` | Function for counting joint-measurements of qubits. The user might assign different operation ids for different measurement bases. | | `void __quantum__trc__swap(%Qubit* %q1, %Qubit* %q2)` | See [Special handling of SWAP](#special-handling-of-swap) for details. | -_Note on operation ids_: The client is responsible for using opeartion ids in a consistent manner. Operations with the +_Note on operation ids_: The client is responsible for using operation ids in a consistent manner. Operations with the same id will be counted by the tracer as the _same_ operation, even accross invocations with different number of target qubits or when different functors are applied. ## Native backing of the extension methods ## -The Resource Tracer will reuse qir-rt library as much as possible while extending it with the entry points specified above. +The Resource Tracer will reuse qir-rt library as much as possible while extending it with the callbacks specified above. __Conditionals on measurements__: The Resource Tracer will execute LLVM IR's branching structures "as is", depending on the values of the corresponding variables at runtime. To enable estimation of branches that depend on a measurement @@ -66,7 +66,7 @@ __Caching__ (lower priority): It might be a huge perf win if the Resource Tracer _Definition_: ___Time___ is an integer-valued function on all quantum operations in a program (gates, measurements, qubits allocation/release). For each gate there is start and end times. For each qubit, there are times when the qubit is allocated and released. Start time of a gate cannot be less than allocation time of any of the qubits the gate uses. - If two gates or measurements use the same qubit, one of the gates must have start time greater of equal than the end + If two gates or measurements use the same qubit, one of the gates must have start time greater than or equal to the end time of the other. A sequentially executed quantum program can be assigned a trivial time function, when all quantum operations have @@ -104,9 +104,9 @@ As the tracer is executing a sequential quantum program, it will compute a time 1. The first encountered operation of __non-zero__ duration N is added into layer L(0, max(P,N)). The value for _conditional barrier_ is set to 0. 1. When conditional callback is encountered, the layer L(t,N) of the measurement that produced the result the conditional - is on, is looked up and the _conditional barrier_ is set to _t + N_. At the end of the conditional scope the barrier - is reset to 0. (Effectively, no operations, conditioned on the result of a measurement, can happen before or in the same - layer as the measurement, even if they don't involve the measured qubits.) + is dependent on, is looked up and the _conditional barrier_ is set to _t + N_. At the end of the conditional scope the + barrier is reset to 0. (Effectively, no operations, conditioned on the result of a measurement, can happen before or in + the same layer as the measurement, even if they don't involve the measured qubits.) 1. Suppose, there are already layers L(0,N0), ... , L(k,Nk) and the operation being executed is a single-qubit _op_ of duration __0__ (controlled and multi-qubit operations of duration 0 are treated the same as non-zero operations). Starting at L(k, Nk) and scanning backwards to L(conditional barrier, Nb) find the _first_ layer that contains an From 4cf321d660f2bbce2d9aeab4815ebccce785a5f7 Mon Sep 17 00:00:00 2001 From: Irina Yatsenko Date: Fri, 5 Feb 2021 14:10:07 -0800 Subject: [PATCH 16/27] Enable tracer to output metrics (string or file) --- src/QirRuntime/CMakeLists.txt | 3 +- src/QirRuntime/lib/Tracer/CMakeLists.txt | 16 +- src/QirRuntime/lib/Tracer/README.md | 7 +- src/QirRuntime/lib/Tracer/tracer.cpp | 59 +- src/QirRuntime/lib/Tracer/tracer.hpp | 34 +- src/QirRuntime/test/QIR-tracer/CMakeLists.txt | 19 +- src/QirRuntime/test/QIR-tracer/generate.py | 15 +- .../test/QIR-tracer/qir-tracer-driver.cpp | 32 +- .../test/QIR-tracer/tracer-config.cpp | 29 + .../test/QIR-tracer/tracer-config.hpp | 29 + .../{tracer-intrinsics.ll => tracer-qir.ll} | 1150 +++++++++-------- src/QirRuntime/test/unittests/TracerTests.cpp | 105 +- 12 files changed, 875 insertions(+), 623 deletions(-) create mode 100644 src/QirRuntime/test/QIR-tracer/tracer-config.cpp create mode 100644 src/QirRuntime/test/QIR-tracer/tracer-config.hpp rename src/QirRuntime/test/QIR-tracer/{tracer-intrinsics.ll => tracer-qir.ll} (62%) diff --git a/src/QirRuntime/CMakeLists.txt b/src/QirRuntime/CMakeLists.txt index 28ea5e8c349..d109665c0db 100644 --- a/src/QirRuntime/CMakeLists.txt +++ b/src/QirRuntime/CMakeLists.txt @@ -114,12 +114,13 @@ endmacro(compile_from_qir) if (WIN32) set(QIR_BRIDGE_UTILITY_LIB "${PROJECT_BINARY_DIR}/lib/QIR/bridge-rt-u.lib") set(QIR_BRIDGE_QIS_UTILITY_LIB "${PROJECT_BINARY_DIR}/lib/QIR/bridge-qis-u.lib") + set(QIR_BRIDGE_TRACER_UTILITY_LIB "${PROJECT_BINARY_DIR}/lib/Tracer/bridge-trc-u.lib") else() set(QIR_BRIDGE_UTILITY_LIB "${PROJECT_BINARY_DIR}/lib/QIR/libbridge-rt-u.a") set(QIR_BRIDGE_QIS_UTILITY_LIB "${PROJECT_BINARY_DIR}/lib/QIR/libbridge-qis-u.a") + set(QIR_BRIDGE_TRACER_UTILITY_LIB "${PROJECT_BINARY_DIR}/lib/Tracer/libbridge-trc-u.a") endif() - add_subdirectory(lib) add_subdirectory(test) diff --git a/src/QirRuntime/lib/Tracer/CMakeLists.txt b/src/QirRuntime/lib/Tracer/CMakeLists.txt index b5652cca053..d79db1802ec 100644 --- a/src/QirRuntime/lib/Tracer/CMakeLists.txt +++ b/src/QirRuntime/lib/Tracer/CMakeLists.txt @@ -1,10 +1,9 @@ -set(component_name "tracer") -set(bridge_lib "bridge_trc_u") +# build the utility lib for tracer's bridge +compile_from_qir(bridge-trc bridge_trc) -compile_from_qir(bridge-trc ${bridge_lib}) +# build the native part of the tracer +set(component_name "tracer") -# the tracer implements its own management of qubits/results but reuses the rest of the qir-runtime -# TODO: consider splitting qir-rt into two libs set(source_files "tracer-trc.cpp" "tracer.cpp" @@ -16,11 +15,6 @@ set(includes ) add_library(${component_name} STATIC ${source_files}) - target_include_directories(${component_name} PUBLIC ${includes}) -target_link_libraries(${component_name} - ${QIR_UTILITY_LIB} # absolute path to the bridge library, set by compile_from_ir -) - -add_dependencies(${component_name} ${bridge_lib}) +add_dependencies(${component_name} bridge_trc) diff --git a/src/QirRuntime/lib/Tracer/README.md b/src/QirRuntime/lib/Tracer/README.md index 80d0baf3926..50847e74174 100644 --- a/src/QirRuntime/lib/Tracer/README.md +++ b/src/QirRuntime/lib/Tracer/README.md @@ -9,11 +9,14 @@ In addition to the standard QIR runtime functions, the quantum program will have 1. convert _all_ used intrinsic operations into one of the _trc_ operations supported by the tracer (see the list below); 1. (_optional_) provide callbacks for handling of conditional branches on a measurement (if not provided, the estimates would cover only one branch of the execution); -1. (_optional_) provide a C++ header file with names of the gates (for user friendly output); 1. (_optional_) provide callbacks for start/end of quantum operations (if not provided, all operations will be treated as inlined as if the whole program consisted of a single operation); 1. (_optional_) provide callbacks for global barriers; -1. (_optional_) provide description of mapping for frame tracking. +1. (_optional_) provide description of mapping for frame tracking; +1. (_optional_) provide names of operations for user friendly output (in the form of `tracer-config.hpp|cpp` files that, + ideally, can be generated by the Q# compiler from the mappings described in target.qs). + +The last provisions The Resource Tracer will consist of: diff --git a/src/QirRuntime/lib/Tracer/tracer.cpp b/src/QirRuntime/lib/Tracer/tracer.cpp index 2e72c9af99d..9ee0e340391 100644 --- a/src/QirRuntime/lib/Tracer/tracer.cpp +++ b/src/QirRuntime/lib/Tracer/tracer.cpp @@ -1,7 +1,8 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. -#include +#include +#include #include #include "tracer.hpp" @@ -13,9 +14,14 @@ namespace Microsoft namespace Quantum { thread_local std::shared_ptr tracer = nullptr; - std::shared_ptr CreateTracer() + std::shared_ptr CreateTracer(int preferredLayerDuration) { - tracer = std::make_shared(); + tracer = std::make_shared(preferredLayerDuration); + return tracer; + } + std::shared_ptr CreateTracer(int preferredLayerDuration, const std::unordered_map& opNames) + { + tracer = std::make_shared(preferredLayerDuration, opNames); return tracer; } @@ -151,6 +157,8 @@ namespace Quantum //------------------------------------------------------------------------------------------------------------------ LayerId CTracer::TraceSingleQubitOp(OpId id, Duration opDuration, Qubit target) { + this->seenOps.insert(id); + QubitState& qstate = this->UseQubit(target); if (opDuration == 0 && (qstate.layer == INVALID || (this->globalBarrier != INVALID && qstate.layer < this->globalBarrier))) @@ -187,6 +195,8 @@ namespace Quantum assert(nFirstGroup >= 0); assert(nSecondGroup > 0); + this->seenOps.insert(id); + // Operations that involve a single qubit can special case duration zero. if (nFirstGroup == 0 && nSecondGroup == 1) { @@ -248,5 +258,48 @@ namespace Quantum LayerId layerId = this->TraceMultiQubitOp(id, duration, 0, nullptr, nTargets, targets); return reinterpret_cast(layerId); } + + //------------------------------------------------------------------------------------------------------------------ + // CTracer::PrintLayerMetrics + //------------------------------------------------------------------------------------------------------------------ + static std::string GetOperationName(OpId opId, const std::unordered_map& opNames) + { + if (opId < 0) + { + return ""; + } + + auto nameIt = opNames.find(opId); + return nameIt == opNames.end() ? std::to_string(opId) : nameIt->second; + } + void CTracer::PrintLayerMetrics(std::ostream& out, const std::string& separator, bool printZeroMetrics) const + { + // Sort the operations by id so the output is deterministic. + std::set seenOpsOrederedById(this->seenOps.begin(), this->seenOps.end()); + + // header row + out << "layer_id" << separator << "name"; + for (OpId opId : seenOpsOrederedById) + { + out << separator << GetOperationName(opId, this->opNames); + } + out << std::endl; + + // data rows + const std::string zeroString = printZeroMetrics ? "0" : ""; + for (const Layer& layer : this->metricsByLayer) + { + out << layer.startTime; + out << separator << GetOperationName(layer.barrierId, this->opNames); + + for (OpId opId : seenOpsOrederedById) + { + auto foundInLayer = layer.operations.find(opId); + out << separator + << ((foundInLayer == layer.operations.end()) ? zeroString : std::to_string(foundInLayer->second)); + } + out << std::endl; + } + } } // namespace Quantum } // namespace Microsoft \ No newline at end of file diff --git a/src/QirRuntime/lib/Tracer/tracer.hpp b/src/QirRuntime/lib/Tracer/tracer.hpp index c62cc4e05b8..c060229a9f1 100644 --- a/src/QirRuntime/lib/Tracer/tracer.hpp +++ b/src/QirRuntime/lib/Tracer/tracer.hpp @@ -3,7 +3,9 @@ #include #include +#include #include +#include #include #include "CoreTypes.hpp" @@ -69,7 +71,7 @@ namespace Quantum std::vector qubits; // The preferred duration of a layer. - int preferredLayerDuration = 0; + const int preferredLayerDuration = 0; // The index into the vector is treated as implicit id of the layer. std::vector metricsByLayer; @@ -78,6 +80,13 @@ namespace Quantum // layer that preceeded it, even if the new operations involve completely new qubits. LayerId globalBarrier = INVALID; + // Mapping of operation ids to user-chosen names, for operations that user didn't name, the output will use + // operation ids. + std::unordered_map opNames; + + // Operations we've seen so far (to be able to trim output to include only these) + std::unordered_set seenOps; + private: QubitState& UseQubit(Qubit q) { @@ -105,6 +114,17 @@ namespace Quantum void UpdateQubitState(Qubit q, LayerId layer, Duration opDuration); public: + explicit CTracer(int preferredLayerDuration) + : preferredLayerDuration(preferredLayerDuration) + { + } + + CTracer(int preferredLayerDuration, const std::unordered_map& opNames) + : preferredLayerDuration(preferredLayerDuration) + , opNames(opNames) + { + } + // ------------------------------------------------------------------------------------------------------------- // ISimulator interface // ------------------------------------------------------------------------------------------------------------- @@ -172,20 +192,20 @@ namespace Quantum // ------------------------------------------------------------------------------------------------------------- // Configuring the tracer and getting data back from it. // ------------------------------------------------------------------------------------------------------------- - void SetPreferredLayerDuration(int dur) - { - this->preferredLayerDuration = dur; - } - // Temporary method for initial testing // TODO: replace with a safer accessor const std::vector& UseLayers() { return this->metricsByLayer; } + + void PrintLayerMetrics(std::ostream& out, const std::string& separator, bool printZeroMetrics) const; }; - std::shared_ptr CreateTracer(); + std::shared_ptr CreateTracer(int preferredLayerDuration); + std::shared_ptr CreateTracer( + int preferredLayerDuration, + const std::unordered_map& opNames); } // namespace Quantum } // namespace Microsoft \ No newline at end of file diff --git a/src/QirRuntime/test/QIR-tracer/CMakeLists.txt b/src/QirRuntime/test/QIR-tracer/CMakeLists.txt index 092ab2d250d..e48b4c619f7 100644 --- a/src/QirRuntime/test/QIR-tracer/CMakeLists.txt +++ b/src/QirRuntime/test/QIR-tracer/CMakeLists.txt @@ -1,23 +1,18 @@ -set(TEST_FILES - tracer-intrinsics -) - -foreach(file ${TEST_FILES}) - compile_from_qir(${file} "") # don't create a target per file - list(APPEND QIR_TESTS_LIBS ${QIR_UTILITY_LIB}) -endforeach() -add_custom_target(qir_tracer_test_lib DEPENDS ${QIR_TESTS_LIBS}) +compile_from_qir(tracer-qir tracer_qir) #============================================================================== # The executable target for QIR tests triggers the custom actions to compile ll files # add_executable(qir-tracer-tests - qir-tracer-driver.cpp) + qir-tracer-driver.cpp + tracer-config.cpp +) target_link_libraries(qir-tracer-tests PUBLIC - ${QIR_TESTS_LIBS} + ${QIR_UTILITY_LIB} # set by compile_from_qir ${QIR_BRIDGE_UTILITY_LIB} + ${QIR_BRIDGE_TRACER_UTILITY_LIB} tracer qir-rt-support ) @@ -27,7 +22,7 @@ target_include_directories(qir-tracer-tests PUBLIC "${public_includes}" "${PROJECT_SOURCE_DIR}/lib/Tracer" ) -add_dependencies(qir-tracer-tests qir_tracer_test_lib) +add_dependencies(qir-tracer-tests tracer_qir) install(TARGETS qir-tracer-tests RUNTIME DESTINATION "${CMAKE_BINARY_DIR}/bin") add_unit_test(qir-tracer-tests) diff --git a/src/QirRuntime/test/QIR-tracer/generate.py b/src/QirRuntime/test/QIR-tracer/generate.py index e1e5828a44f..bf5a716b017 100644 --- a/src/QirRuntime/test/QIR-tracer/generate.py +++ b/src/QirRuntime/test/QIR-tracer/generate.py @@ -23,10 +23,15 @@ def log(message): # parameters qsc = sys.argv[1] # argv[0] is the name of this script file +# find all qs files in this folder +files_to_process = "" +output_file = "tracer-qir" for file in os.listdir(root_dir): (file_name, ext) = os.path.splitext(file) - if ext == ".qs" and file_name != "tracer-core" and file_name != "tracer-target": - log("Generating QIR from " + file) - subprocess.run( - qsc + " build --qir s --build-exe --input " + file + - " tracer-core.qs tracer-target.qs --proj " + file_name, shell = True) + if ext == ".qs": + files_to_process = files_to_process + " " + file + +command = (qsc + " build --qir s --build-exe --input " + files_to_process + " --proj " + output_file) +log("Executing: " + command) +subprocess.run(command, shell = True) + diff --git a/src/QirRuntime/test/QIR-tracer/qir-tracer-driver.cpp b/src/QirRuntime/test/QIR-tracer/qir-tracer-driver.cpp index ae12e0aef4b..4a0f358d23a 100644 --- a/src/QirRuntime/test/QIR-tracer/qir-tracer-driver.cpp +++ b/src/QirRuntime/test/QIR-tracer/qir-tracer-driver.cpp @@ -2,33 +2,37 @@ // Licensed under the MIT License. #include +#include #define CATCH_CONFIG_MAIN // This tells Catch to provide a main() - only do this in one cpp file #include "catch.hpp" +#include "context.hpp" +#include "tracer-config.hpp" #include "tracer.hpp" using namespace std; using namespace Microsoft::Quantum; -namespace Microsoft +namespace TracerUser { -namespace Quantum -{ - std::unique_ptr CreateFullstateSimulator() - { - throw std::logic_error("Tracer should not instantiate full state simulator"); - } -} // namespace Quantum -} // namespace Microsoft - -extern "C" bool Microsoft__Quantum__Testing__Tracer__AllIntrinsics__body(); // NOLINT + TEST_CASE("Test that we are building the new components correctly", "[qir-tracer]") { - shared_ptr tr = CreateTracer(); - SetSimulatorForQIR(tr.get()); + shared_ptr tr = CreateTracer(1 /*layer duration*/, g_operationNames); + QirContextScope qirctx(tr.get(), false /*trackAllocatedObjects*/); REQUIRE(Microsoft__Quantum__Testing__Tracer__AllIntrinsics__body()); vector layers = tr->UseLayers(); - CHECK(layers.size() > 0); + + // AllIntrinsics happens to produce 25 layers right now and we are not checking whether that's expected -- as + // testing of layering logic is better done by unit tests. + CHECK(layers.size() == 25); + + std::ofstream out; + out.open("qir-tracer-test.txt"); + tr->PrintLayerMetrics(out, "\t", false /*printZeroMetrics*/); + out.close(); } + +} \ No newline at end of file diff --git a/src/QirRuntime/test/QIR-tracer/tracer-config.cpp b/src/QirRuntime/test/QIR-tracer/tracer-config.cpp new file mode 100644 index 00000000000..b337fa903ec --- /dev/null +++ b/src/QirRuntime/test/QIR-tracer/tracer-config.cpp @@ -0,0 +1,29 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// TODO: ideally, this file should be generated by the Q# compiler alongside the qir, using the mappings specified in +// target.qs. + +#include + +#include "QuantumApi_I.hpp" +#include "tracer-config.hpp" + +namespace TracerUser +{ +const std::unordered_map g_operationNames = {{0, "X"}, {1, "CX"}, {2, "MCX"}, + {3, "Y"}, {4, "CY"}, {5, "MCY"} /*etc.*/}; +} + +namespace Microsoft +{ +namespace Quantum +{ + + std::unique_ptr CreateFullstateSimulator() + { + throw std::logic_error("Tracer should not instantiate full state simulator"); + } + +} // namespace Quantum +} // namespace Microsoft \ No newline at end of file diff --git a/src/QirRuntime/test/QIR-tracer/tracer-config.hpp b/src/QirRuntime/test/QIR-tracer/tracer-config.hpp new file mode 100644 index 00000000000..66b9a747ae3 --- /dev/null +++ b/src/QirRuntime/test/QIR-tracer/tracer-config.hpp @@ -0,0 +1,29 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// TODO: ideally, this file should be generated by the Q# compiler alongside the qir + +#pragma once + +#include +#include + +namespace Microsoft +{ +namespace Quantum +{ + using OpId = int; + using Time = int; + using Duration = int; + using LayerId = size_t; +} +} + +namespace TracerUser +{ +using OpId = int; +extern const std::unordered_map g_operationNames; +} // namespace TracerUser + +// Available function in generated QIR +extern "C" bool Microsoft__Quantum__Testing__Tracer__AllIntrinsics__body(); // NOLINT diff --git a/src/QirRuntime/test/QIR-tracer/tracer-intrinsics.ll b/src/QirRuntime/test/QIR-tracer/tracer-qir.ll similarity index 62% rename from src/QirRuntime/test/QIR-tracer/tracer-intrinsics.ll rename to src/QirRuntime/test/QIR-tracer/tracer-qir.ll index 94daa07ed27..f14da190f2a 100644 --- a/src/QirRuntime/test/QIR-tracer/tracer-intrinsics.ll +++ b/src/QirRuntime/test/QIR-tracer/tracer-qir.ll @@ -14,39 +14,159 @@ @Microsoft__Quantum__Testing__Tracer__AllIntrinsics = alias i1 (), i1 ()* @Microsoft__Quantum__Testing__Tracer__AllIntrinsics__body -define void @Microsoft__Quantum__Intrinsic__Tz__body(%Qubit* %qb) { +define void @Microsoft__Quantum__Intrinsic__Rz__body(double %theta, %Qubit* %qb) { entry: - call void @__quantum__trc__single_qubit_op(i64 11, i64 1, %Qubit* %qb) + call void @__quantum__trc__single_qubit_op(i64 23, i64 1, %Qubit* %qb) ret void } declare void @__quantum__trc__single_qubit_op(i64, i64, %Qubit*) -define void @Microsoft__Quantum__Intrinsic__Tz__adj(%Qubit* %qb) { +define void @Microsoft__Quantum__Intrinsic__Rz__adj(double %theta, %Qubit* %qb) { entry: - call void @__quantum__trc__single_qubit_op(i64 11, i64 1, %Qubit* %qb) + call void @__quantum__trc__single_qubit_op(i64 24, i64 1, %Qubit* %qb) ret void } -define void @Microsoft__Quantum__Intrinsic__Tz__ctl(%Array* %ctls, %Qubit* %qb) { +define void @Microsoft__Quantum__Intrinsic__Rz__ctl(%Array* %ctls, { double, %Qubit* }* %0) { entry: - call void @__quantum__rt__array_add_access(%Array* %ctls) - call void @__quantum__trc__single_qubit_op_ctl(i64 12, i64 1, %Array* %ctls, %Qubit* %qb) - call void @__quantum__rt__array_remove_access(%Array* %ctls) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 1) + %1 = getelementptr { double, %Qubit* }, { double, %Qubit* }* %0, i64 0, i32 0 + %theta = load double, double* %1 + %2 = getelementptr { double, %Qubit* }, { double, %Qubit* }* %0, i64 0, i32 1 + %qb = load %Qubit*, %Qubit** %2 + call void @__quantum__trc__single_qubit_op_ctl(i64 25, i64 1, %Array* %ctls, %Qubit* %qb) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 -1) ret void } -declare void @__quantum__rt__array_add_access(%Array*) +declare void @__quantum__rt__array_update_alias_count(%Array*, i64) declare void @__quantum__trc__single_qubit_op_ctl(i64, i64, %Array*, %Qubit*) -declare void @__quantum__rt__array_remove_access(%Array*) +define void @Microsoft__Quantum__Intrinsic__Rz__ctladj(%Array* %ctls, { double, %Qubit* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 1) + %1 = getelementptr { double, %Qubit* }, { double, %Qubit* }* %0, i64 0, i32 0 + %theta = load double, double* %1 + %2 = getelementptr { double, %Qubit* }, { double, %Qubit* }* %0, i64 0, i32 1 + %qb = load %Qubit*, %Qubit** %2 + call void @__quantum__trc__single_qubit_op_ctl(i64 25, i64 1, %Array* %ctls, %Qubit* %qb) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 -1) + ret void +} -define void @Microsoft__Quantum__Intrinsic__Tz__ctladj(%Array* %ctls, %Qubit* %qb) { +define %Result* @Microsoft__Quantum__Intrinsic__Mz__body(%Qubit* %qb) { entry: - call void @__quantum__rt__array_add_access(%Array* %ctls) - call void @__quantum__trc__single_qubit_op_ctl(i64 12, i64 1, %Array* %ctls, %Qubit* %qb) - call void @__quantum__rt__array_remove_access(%Array* %ctls) + %0 = call %Result* @__quantum__trc__single_qubit_measure(i64 100, i64 1, %Qubit* %qb) + ret %Result* %0 +} + +declare %Result* @__quantum__trc__single_qubit_measure(i64, i64, %Qubit*) + +define void @Microsoft__Quantum__Intrinsic__Y__body(%Qubit* %qb) { +entry: + call void @__quantum__trc__single_qubit_op(i64 3, i64 1, %Qubit* %qb) + ret void +} + +define void @Microsoft__Quantum__Intrinsic__Y__adj(%Qubit* %qb) { +entry: + call void @__quantum__trc__single_qubit_op(i64 3, i64 1, %Qubit* %qb) + ret void +} + +define void @Microsoft__Quantum__Intrinsic__Y__ctl(%Array* %ctls, %Qubit* %qb) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 1) + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %ctls) + %1 = icmp eq i64 %0, 1 + br i1 %1, label %then0__1, label %else__1 + +then0__1: ; preds = %entry + call void @__quantum__trc__single_qubit_op_ctl(i64 4, i64 1, %Array* %ctls, %Qubit* %qb) + br label %continue__1 + +else__1: ; preds = %entry + call void @__quantum__trc__single_qubit_op_ctl(i64 5, i64 1, %Array* %ctls, %Qubit* %qb) + br label %continue__1 + +continue__1: ; preds = %else__1, %then0__1 + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 -1) + ret void +} + +declare i64 @__quantum__rt__array_get_size_1d(%Array*) + +define void @Microsoft__Quantum__Intrinsic__Y__ctladj(%Array* %ctls, %Qubit* %qb) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 1) + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %ctls) + %1 = icmp eq i64 %0, 1 + br i1 %1, label %then0__1, label %else__1 + +then0__1: ; preds = %entry + call void @__quantum__trc__single_qubit_op_ctl(i64 4, i64 1, %Array* %ctls, %Qubit* %qb) + br label %continue__1 + +else__1: ; preds = %entry + call void @__quantum__trc__single_qubit_op_ctl(i64 5, i64 1, %Array* %ctls, %Qubit* %qb) + br label %continue__1 + +continue__1: ; preds = %else__1, %then0__1 + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 -1) + ret void +} + +define void @Microsoft__Quantum__Intrinsic__CNOT__body(%Qubit* %control, %Qubit* %target) { +entry: + %ctls__inline__1 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %0 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ctls__inline__1, i64 0) + %1 = bitcast i8* %0 to %Qubit** + store %Qubit* %control, %Qubit** %1 + call void @__quantum__rt__array_update_alias_count(%Array* %ctls__inline__1, i64 1) + br i1 true, label %then0__1, label %else__1 + +then0__1: ; preds = %entry + call void @__quantum__trc__single_qubit_op_ctl(i64 1, i64 1, %Array* %ctls__inline__1, %Qubit* %target) + br label %continue__1 + +else__1: ; preds = %entry + call void @__quantum__trc__single_qubit_op_ctl(i64 2, i64 1, %Array* %ctls__inline__1, %Qubit* %target) + br label %continue__1 + +continue__1: ; preds = %else__1, %then0__1 + call void @__quantum__rt__array_update_alias_count(%Array* %ctls__inline__1, i64 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %ctls__inline__1, i64 -1) + ret void +} + +declare %Array* @__quantum__rt__array_create_1d(i32, i64) + +declare i8* @__quantum__rt__array_get_element_ptr_1d(%Array*, i64) + +declare void @__quantum__rt__array_update_reference_count(%Array*, i64) + +define void @Microsoft__Quantum__Intrinsic__CNOT__adj(%Qubit* %control, %Qubit* %target) { +entry: + %ctls__inline__1 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %0 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ctls__inline__1, i64 0) + %1 = bitcast i8* %0 to %Qubit** + store %Qubit* %control, %Qubit** %1 + call void @__quantum__rt__array_update_alias_count(%Array* %ctls__inline__1, i64 1) + br i1 true, label %then0__1, label %else__1 + +then0__1: ; preds = %entry + call void @__quantum__trc__single_qubit_op_ctl(i64 1, i64 1, %Array* %ctls__inline__1, %Qubit* %target) + br label %continue__1 + +else__1: ; preds = %entry + call void @__quantum__trc__single_qubit_op_ctl(i64 2, i64 1, %Array* %ctls__inline__1, %Qubit* %target) + br label %continue__1 + +continue__1: ; preds = %else__1, %then0__1 + call void @__quantum__rt__array_update_alias_count(%Array* %ctls__inline__1, i64 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %ctls__inline__1, i64 -1) ret void } @@ -64,31 +184,32 @@ entry: define void @Microsoft__Quantum__Intrinsic__S__ctl(%Array* %ctls, %Qubit* %qb) { entry: - call void @__quantum__rt__array_add_access(%Array* %ctls) - call void @__quantum__rt__array_add_access(%Array* %ctls) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 1) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 1) call void @__quantum__trc__single_qubit_op_ctl(i64 16, i64 1, %Array* %ctls, %Qubit* %qb) - call void @__quantum__rt__array_remove_access(%Array* %ctls) - call void @__quantum__rt__array_remove_access(%Array* %ctls) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 -1) ret void } define void @Microsoft__Quantum__Intrinsic__S__ctladj(%Array* %ctls, %Qubit* %qb) { entry: - call void @__quantum__rt__array_add_access(%Array* %ctls) - call void @__quantum__rt__array_add_access(%Array* %ctls) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 1) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 1) call void @__quantum__trc__single_qubit_op_ctl(i64 16, i64 1, %Array* %ctls, %Qubit* %qb) - call void @__quantum__rt__array_remove_access(%Array* %ctls) - call void @__quantum__rt__array_remove_access(%Array* %ctls) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 -1) ret void } define %Result* @Microsoft__Quantum__Intrinsic__Measure__body(%Array* %paulis, %Array* %qubits) { entry: - call void @__quantum__rt__array_add_access(%Array* %paulis) - call void @__quantum__rt__array_add_access(%Array* %qubits) + call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i64 1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i64 1) %0 = load %Result*, %Result** @ResultOne %res = alloca %Result* store %Result* %0, %Result** %res + call void @__quantum__rt__result_update_reference_count(%Result* %0, i64 1) %haveY = alloca i1 store i1 false, i1* %haveY %1 = call i64 @__quantum__rt__array_get_size_1d(%Array* %paulis) @@ -96,25 +217,23 @@ entry: br label %header__1 header__1: ; preds = %exiting__1, %entry - %i = phi i64 [ 0, %entry ], [ %17, %exiting__1 ] - %3 = icmp sge i64 %i, %2 - %4 = icmp sle i64 %i, %2 - %5 = select i1 true, i1 %4, i1 %3 - br i1 %5, label %body__1, label %exit__1 + %i = phi i64 [ 0, %entry ], [ %15, %exiting__1 ] + %3 = icmp sle i64 %i, %2 + br i1 %3, label %body__1, label %exit__1 body__1: ; preds = %header__1 - %6 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 %i) - %7 = bitcast i8* %6 to i2* - %8 = load i2, i2* %7 - %9 = load i2, i2* @PauliY - %10 = icmp eq i2 %8, %9 - %11 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 %i) - %12 = bitcast i8* %11 to i2* - %13 = load i2, i2* %12 - %14 = load i2, i2* @PauliI - %15 = icmp eq i2 %13, %14 - %16 = or i1 %10, %15 - br i1 %16, label %then0__1, label %continue__1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 %i) + %5 = bitcast i8* %4 to i2* + %6 = load i2, i2* %5 + %7 = load i2, i2* @PauliY + %8 = icmp eq i2 %6, %7 + %9 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 %i) + %10 = bitcast i8* %9 to i2* + %11 = load i2, i2* %10 + %12 = load i2, i2* @PauliI + %13 = icmp eq i2 %11, %12 + %14 = or i1 %8, %13 + br i1 %14, label %then0__1, label %continue__1 then0__1: ; preds = %body__1 store i1 true, i1* %haveY @@ -124,48 +243,56 @@ continue__1: ; preds = %then0__1, %body__1 br label %exiting__1 exiting__1: ; preds = %continue__1 - %17 = add i64 %i, 1 + %15 = add i64 %i, 1 br label %header__1 exit__1: ; preds = %header__1 - %18 = load i1, i1* %haveY - br i1 %18, label %then0__2, label %test1__1 + %16 = load i1, i1* %haveY + br i1 %16, label %then0__2, label %test1__1 then0__2: ; preds = %exit__1 - %19 = call %Result* @__quantum__trc__multi_qubit_measure(i64 106, i64 1, %Array* %qubits) - store %Result* %19, %Result** %res - call void @__quantum__rt__result_unreference(%Result* %19) + %17 = call %Result* @__quantum__trc__multi_qubit_measure(i64 106, i64 1, %Array* %qubits) + call void @__quantum__rt__result_update_reference_count(%Result* %17, i64 1) + store %Result* %17, %Result** %res + call void @__quantum__rt__result_update_reference_count(%Result* %17, i64 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %0, i64 -1) br label %continue__2 test1__1: ; preds = %exit__1 - %20 = icmp sgt i64 %1, 2 - br i1 %20, label %then1__1, label %test2__1 + %18 = icmp sgt i64 %1, 2 + br i1 %18, label %then1__1, label %test2__1 then1__1: ; preds = %test1__1 - %21 = call %Result* @__quantum__trc__multi_qubit_measure(i64 107, i64 1, %Array* %qubits) - store %Result* %21, %Result** %res - call void @__quantum__rt__result_unreference(%Result* %21) + %19 = call %Result* @__quantum__trc__multi_qubit_measure(i64 107, i64 1, %Array* %qubits) + call void @__quantum__rt__result_update_reference_count(%Result* %19, i64 1) + %20 = load %Result*, %Result** %res + store %Result* %19, %Result** %res + call void @__quantum__rt__result_update_reference_count(%Result* %19, i64 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %20, i64 -1) br label %continue__2 test2__1: ; preds = %test1__1 - %22 = icmp eq i64 %1, 1 - br i1 %22, label %then2__1, label %test3__1 + %21 = icmp eq i64 %1, 1 + br i1 %21, label %then2__1, label %test3__1 then2__1: ; preds = %test2__1 - %23 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 0) - %24 = bitcast i8* %23 to i2* - %25 = load i2, i2* %24 - %26 = load i2, i2* @PauliX - %27 = icmp eq i2 %25, %26 - br i1 %27, label %then0__3, label %else__1 + %22 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 0) + %23 = bitcast i8* %22 to i2* + %24 = load i2, i2* %23 + %25 = load i2, i2* @PauliX + %26 = icmp eq i2 %24, %25 + br i1 %26, label %then0__3, label %else__1 then0__3: ; preds = %then2__1 - %28 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qubits, i64 0) - %29 = bitcast i8* %28 to %Qubit** - %qb__inline__1 = load %Qubit*, %Qubit** %29 - %30 = call %Result* @__quantum__trc__single_qubit_measure(i64 101, i64 1, %Qubit* %qb__inline__1) - store %Result* %30, %Result** %res - call void @__quantum__rt__result_unreference(%Result* %30) + %27 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qubits, i64 0) + %28 = bitcast i8* %27 to %Qubit** + %qb__inline__1 = load %Qubit*, %Qubit** %28 + %29 = call %Result* @__quantum__trc__single_qubit_measure(i64 101, i64 1, %Qubit* %qb__inline__1) + call void @__quantum__rt__result_update_reference_count(%Result* %29, i64 1) + %30 = load %Result*, %Result** %res + store %Result* %29, %Result** %res + call void @__quantum__rt__result_update_reference_count(%Result* %29, i64 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %30, i64 -1) br label %continue__3 else__1: ; preds = %then2__1 @@ -173,436 +300,348 @@ else__1: ; preds = %then2__1 %32 = bitcast i8* %31 to %Qubit** %qb__inline__2 = load %Qubit*, %Qubit** %32 %33 = call %Result* @__quantum__trc__single_qubit_measure(i64 100, i64 1, %Qubit* %qb__inline__2) + call void @__quantum__rt__result_update_reference_count(%Result* %33, i64 1) + %34 = load %Result*, %Result** %res store %Result* %33, %Result** %res - call void @__quantum__rt__result_unreference(%Result* %33) + call void @__quantum__rt__result_update_reference_count(%Result* %33, i64 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %34, i64 -1) br label %continue__3 continue__3: ; preds = %else__1, %then0__3 br label %continue__2 test3__1: ; preds = %test2__1 - %34 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 0) - %35 = bitcast i8* %34 to i2* - %36 = load i2, i2* %35 - %37 = load i2, i2* @PauliX - %38 = icmp eq i2 %36, %37 - %39 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 1) - %40 = bitcast i8* %39 to i2* - %41 = load i2, i2* %40 - %42 = load i2, i2* @PauliX - %43 = icmp eq i2 %41, %42 - %44 = and i1 %38, %43 - br i1 %44, label %then3__1, label %test4__1 + %35 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 0) + %36 = bitcast i8* %35 to i2* + %37 = load i2, i2* %36 + %38 = load i2, i2* @PauliX + %39 = icmp eq i2 %37, %38 + %40 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 1) + %41 = bitcast i8* %40 to i2* + %42 = load i2, i2* %41 + %43 = load i2, i2* @PauliX + %44 = icmp eq i2 %42, %43 + %45 = and i1 %39, %44 + br i1 %45, label %then3__1, label %test4__1 then3__1: ; preds = %test3__1 - %45 = call %Result* @__quantum__trc__multi_qubit_measure(i64 108, i64 1, %Array* %qubits) - store %Result* %45, %Result** %res - call void @__quantum__rt__result_unreference(%Result* %45) + %46 = call %Result* @__quantum__trc__multi_qubit_measure(i64 108, i64 1, %Array* %qubits) + call void @__quantum__rt__result_update_reference_count(%Result* %46, i64 1) + %47 = load %Result*, %Result** %res + store %Result* %46, %Result** %res + call void @__quantum__rt__result_update_reference_count(%Result* %46, i64 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %47, i64 -1) br label %continue__2 test4__1: ; preds = %test3__1 - %46 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 0) - %47 = bitcast i8* %46 to i2* - %48 = load i2, i2* %47 - %49 = load i2, i2* @PauliX - %50 = icmp eq i2 %48, %49 - %51 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 1) - %52 = bitcast i8* %51 to i2* - %53 = load i2, i2* %52 - %54 = load i2, i2* @PauliZ - %55 = icmp eq i2 %53, %54 - %56 = and i1 %50, %55 - br i1 %56, label %then4__1, label %test5__1 + %48 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 0) + %49 = bitcast i8* %48 to i2* + %50 = load i2, i2* %49 + %51 = load i2, i2* @PauliX + %52 = icmp eq i2 %50, %51 + %53 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 1) + %54 = bitcast i8* %53 to i2* + %55 = load i2, i2* %54 + %56 = load i2, i2* @PauliZ + %57 = icmp eq i2 %55, %56 + %58 = and i1 %52, %57 + br i1 %58, label %then4__1, label %test5__1 then4__1: ; preds = %test4__1 - %57 = call %Result* @__quantum__trc__multi_qubit_measure(i64 109, i64 1, %Array* %qubits) - store %Result* %57, %Result** %res - call void @__quantum__rt__result_unreference(%Result* %57) + %59 = call %Result* @__quantum__trc__multi_qubit_measure(i64 109, i64 1, %Array* %qubits) + call void @__quantum__rt__result_update_reference_count(%Result* %59, i64 1) + %60 = load %Result*, %Result** %res + store %Result* %59, %Result** %res + call void @__quantum__rt__result_update_reference_count(%Result* %59, i64 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %60, i64 -1) br label %continue__2 test5__1: ; preds = %test4__1 - %58 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 0) - %59 = bitcast i8* %58 to i2* - %60 = load i2, i2* %59 - %61 = load i2, i2* @PauliZ - %62 = icmp eq i2 %60, %61 - %63 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 1) - %64 = bitcast i8* %63 to i2* - %65 = load i2, i2* %64 - %66 = load i2, i2* @PauliX - %67 = icmp eq i2 %65, %66 - %68 = and i1 %62, %67 - br i1 %68, label %then5__1, label %test6__1 + %61 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 0) + %62 = bitcast i8* %61 to i2* + %63 = load i2, i2* %62 + %64 = load i2, i2* @PauliZ + %65 = icmp eq i2 %63, %64 + %66 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 1) + %67 = bitcast i8* %66 to i2* + %68 = load i2, i2* %67 + %69 = load i2, i2* @PauliX + %70 = icmp eq i2 %68, %69 + %71 = and i1 %65, %70 + br i1 %71, label %then5__1, label %test6__1 then5__1: ; preds = %test5__1 - %69 = call %Result* @__quantum__trc__multi_qubit_measure(i64 110, i64 1, %Array* %qubits) - store %Result* %69, %Result** %res - call void @__quantum__rt__result_unreference(%Result* %69) + %72 = call %Result* @__quantum__trc__multi_qubit_measure(i64 110, i64 1, %Array* %qubits) + call void @__quantum__rt__result_update_reference_count(%Result* %72, i64 1) + %73 = load %Result*, %Result** %res + store %Result* %72, %Result** %res + call void @__quantum__rt__result_update_reference_count(%Result* %72, i64 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %73, i64 -1) br label %continue__2 test6__1: ; preds = %test5__1 - %70 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 0) - %71 = bitcast i8* %70 to i2* - %72 = load i2, i2* %71 - %73 = load i2, i2* @PauliZ - %74 = icmp eq i2 %72, %73 - %75 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 1) - %76 = bitcast i8* %75 to i2* - %77 = load i2, i2* %76 - %78 = load i2, i2* @PauliZ - %79 = icmp eq i2 %77, %78 - %80 = and i1 %74, %79 - br i1 %80, label %then6__1, label %continue__2 + %74 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 0) + %75 = bitcast i8* %74 to i2* + %76 = load i2, i2* %75 + %77 = load i2, i2* @PauliZ + %78 = icmp eq i2 %76, %77 + %79 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 1) + %80 = bitcast i8* %79 to i2* + %81 = load i2, i2* %80 + %82 = load i2, i2* @PauliZ + %83 = icmp eq i2 %81, %82 + %84 = and i1 %78, %83 + br i1 %84, label %then6__1, label %continue__2 then6__1: ; preds = %test6__1 - %81 = call %Result* @__quantum__trc__multi_qubit_measure(i64 111, i64 1, %Array* %qubits) - store %Result* %81, %Result** %res - call void @__quantum__rt__result_unreference(%Result* %81) + %85 = call %Result* @__quantum__trc__multi_qubit_measure(i64 111, i64 1, %Array* %qubits) + call void @__quantum__rt__result_update_reference_count(%Result* %85, i64 1) + %86 = load %Result*, %Result** %res + store %Result* %85, %Result** %res + call void @__quantum__rt__result_update_reference_count(%Result* %85, i64 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %86, i64 -1) br label %continue__2 continue__2: ; preds = %then6__1, %test6__1, %then5__1, %then4__1, %then3__1, %continue__3, %then1__1, %then0__2 - %82 = load %Result*, %Result** %res - call void @__quantum__rt__result_reference(%Result* %82) - call void @__quantum__rt__array_remove_access(%Array* %paulis) - call void @__quantum__rt__array_remove_access(%Array* %qubits) - ret %Result* %82 + %87 = load %Result*, %Result** %res + call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i64 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i64 -1) + ret %Result* %87 } -declare i64 @__quantum__rt__array_get_size_1d(%Array*) - -declare i8* @__quantum__rt__array_get_element_ptr_1d(%Array*, i64) +declare void @__quantum__rt__result_update_reference_count(%Result*, i64) declare %Result* @__quantum__trc__multi_qubit_measure(i64, i64, %Array*) -declare void @__quantum__rt__result_unreference(%Result*) - -declare %Result* @__quantum__trc__single_qubit_measure(i64, i64, %Qubit*) - -declare void @__quantum__rt__result_reference(%Result*) - -define void @Microsoft__Quantum__Intrinsic__Rz__body(double %theta, %Qubit* %qb) { -entry: - call void @__quantum__trc__single_qubit_op(i64 23, i64 1, %Qubit* %qb) - ret void -} - -define void @Microsoft__Quantum__Intrinsic__Rz__adj(double %theta, %Qubit* %qb) { -entry: - call void @__quantum__trc__single_qubit_op(i64 24, i64 1, %Qubit* %qb) - ret void -} - -define void @Microsoft__Quantum__Intrinsic__Rz__ctl(%Array* %ctls, { double, %Qubit* }* %0) { -entry: - call void @__quantum__rt__array_add_access(%Array* %ctls) - %1 = getelementptr { double, %Qubit* }, { double, %Qubit* }* %0, i64 0, i32 0 - %theta = load double, double* %1 - %2 = getelementptr { double, %Qubit* }, { double, %Qubit* }* %0, i64 0, i32 1 - %qb = load %Qubit*, %Qubit** %2 - call void @__quantum__trc__single_qubit_op_ctl(i64 25, i64 1, %Array* %ctls, %Qubit* %qb) - call void @__quantum__rt__array_remove_access(%Array* %ctls) - ret void -} - -define void @Microsoft__Quantum__Intrinsic__Rz__ctladj(%Array* %ctls, { double, %Qubit* }* %0) { -entry: - call void @__quantum__rt__array_add_access(%Array* %ctls) - %1 = getelementptr { double, %Qubit* }, { double, %Qubit* }* %0, i64 0, i32 0 - %theta = load double, double* %1 - %2 = getelementptr { double, %Qubit* }, { double, %Qubit* }* %0, i64 0, i32 1 - %qb = load %Qubit*, %Qubit** %2 - call void @__quantum__trc__single_qubit_op_ctl(i64 25, i64 1, %Array* %ctls, %Qubit* %qb) - call void @__quantum__rt__array_remove_access(%Array* %ctls) - ret void -} - -define void @Microsoft__Quantum__Intrinsic__Y__body(%Qubit* %qb) { +define void @Microsoft__Quantum__Intrinsic__Z__body(%Qubit* %qb) { entry: - call void @__quantum__trc__single_qubit_op(i64 3, i64 1, %Qubit* %qb) + call void @__quantum__trc__single_qubit_op(i64 6, i64 1, %Qubit* %qb) ret void } -define void @Microsoft__Quantum__Intrinsic__Y__adj(%Qubit* %qb) { +define void @Microsoft__Quantum__Intrinsic__Z__adj(%Qubit* %qb) { entry: - call void @__quantum__trc__single_qubit_op(i64 3, i64 1, %Qubit* %qb) + call void @__quantum__trc__single_qubit_op(i64 6, i64 1, %Qubit* %qb) ret void } -define void @Microsoft__Quantum__Intrinsic__Y__ctl(%Array* %ctls, %Qubit* %qb) { +define void @Microsoft__Quantum__Intrinsic__Z__ctl(%Array* %ctls, %Qubit* %qb) { entry: - call void @__quantum__rt__array_add_access(%Array* %ctls) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 1) %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %ctls) %1 = icmp eq i64 %0, 1 br i1 %1, label %then0__1, label %else__1 then0__1: ; preds = %entry - call void @__quantum__trc__single_qubit_op_ctl(i64 4, i64 1, %Array* %ctls, %Qubit* %qb) + call void @__quantum__trc__single_qubit_op_ctl(i64 7, i64 1, %Array* %ctls, %Qubit* %qb) br label %continue__1 else__1: ; preds = %entry - call void @__quantum__trc__single_qubit_op_ctl(i64 5, i64 1, %Array* %ctls, %Qubit* %qb) + call void @__quantum__trc__single_qubit_op_ctl(i64 8, i64 1, %Array* %ctls, %Qubit* %qb) br label %continue__1 continue__1: ; preds = %else__1, %then0__1 - call void @__quantum__rt__array_remove_access(%Array* %ctls) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 -1) ret void } -define void @Microsoft__Quantum__Intrinsic__Y__ctladj(%Array* %ctls, %Qubit* %qb) { +define void @Microsoft__Quantum__Intrinsic__Z__ctladj(%Array* %ctls, %Qubit* %qb) { entry: - call void @__quantum__rt__array_add_access(%Array* %ctls) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 1) %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %ctls) %1 = icmp eq i64 %0, 1 br i1 %1, label %then0__1, label %else__1 then0__1: ; preds = %entry - call void @__quantum__trc__single_qubit_op_ctl(i64 4, i64 1, %Array* %ctls, %Qubit* %qb) + call void @__quantum__trc__single_qubit_op_ctl(i64 7, i64 1, %Array* %ctls, %Qubit* %qb) br label %continue__1 else__1: ; preds = %entry - call void @__quantum__trc__single_qubit_op_ctl(i64 5, i64 1, %Array* %ctls, %Qubit* %qb) + call void @__quantum__trc__single_qubit_op_ctl(i64 8, i64 1, %Array* %ctls, %Qubit* %qb) br label %continue__1 continue__1: ; preds = %else__1, %then0__1 - call void @__quantum__rt__array_remove_access(%Array* %ctls) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 -1) ret void } -define void @Microsoft__Quantum__Intrinsic__X__body(%Qubit* %qb) { +define void @Microsoft__Quantum__Intrinsic__Tz__body(%Qubit* %qb) { entry: - call void @__quantum__trc__single_qubit_op(i64 0, i64 1, %Qubit* %qb) + call void @__quantum__trc__single_qubit_op(i64 11, i64 1, %Qubit* %qb) ret void } -define void @Microsoft__Quantum__Intrinsic__X__adj(%Qubit* %qb) { +define void @Microsoft__Quantum__Intrinsic__Tz__adj(%Qubit* %qb) { entry: - call void @__quantum__trc__single_qubit_op(i64 0, i64 1, %Qubit* %qb) + call void @__quantum__trc__single_qubit_op(i64 11, i64 1, %Qubit* %qb) ret void } -define void @Microsoft__Quantum__Intrinsic__X__ctl(%Array* %ctls, %Qubit* %qb) { +define void @Microsoft__Quantum__Intrinsic__Tz__ctl(%Array* %ctls, %Qubit* %qb) { entry: - call void @__quantum__rt__array_add_access(%Array* %ctls) - %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %ctls) - %1 = icmp eq i64 %0, 1 - br i1 %1, label %then0__1, label %else__1 - -then0__1: ; preds = %entry - call void @__quantum__trc__single_qubit_op_ctl(i64 1, i64 1, %Array* %ctls, %Qubit* %qb) - br label %continue__1 - -else__1: ; preds = %entry - call void @__quantum__trc__single_qubit_op_ctl(i64 2, i64 1, %Array* %ctls, %Qubit* %qb) - br label %continue__1 - -continue__1: ; preds = %else__1, %then0__1 - call void @__quantum__rt__array_remove_access(%Array* %ctls) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 1) + call void @__quantum__trc__single_qubit_op_ctl(i64 12, i64 1, %Array* %ctls, %Qubit* %qb) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 -1) ret void } -define void @Microsoft__Quantum__Intrinsic__X__ctladj(%Array* %ctls, %Qubit* %qb) { +define void @Microsoft__Quantum__Intrinsic__Tz__ctladj(%Array* %ctls, %Qubit* %qb) { entry: - call void @__quantum__rt__array_add_access(%Array* %ctls) - %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %ctls) - %1 = icmp eq i64 %0, 1 - br i1 %1, label %then0__1, label %else__1 - -then0__1: ; preds = %entry - call void @__quantum__trc__single_qubit_op_ctl(i64 1, i64 1, %Array* %ctls, %Qubit* %qb) - br label %continue__1 - -else__1: ; preds = %entry - call void @__quantum__trc__single_qubit_op_ctl(i64 2, i64 1, %Array* %ctls, %Qubit* %qb) - br label %continue__1 - -continue__1: ; preds = %else__1, %then0__1 - call void @__quantum__rt__array_remove_access(%Array* %ctls) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 1) + call void @__quantum__trc__single_qubit_op_ctl(i64 12, i64 1, %Array* %ctls, %Qubit* %qb) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 -1) ret void } -define void @Microsoft__Quantum__Intrinsic__Sz__body(%Qubit* %qb) { +define void @Microsoft__Quantum__Intrinsic__H__body(%Qubit* %qb) { entry: - call void @__quantum__trc__single_qubit_op(i64 15, i64 1, %Qubit* %qb) + call void @__quantum__trc__single_qubit_op(i64 9, i64 1, %Qubit* %qb) ret void } -define void @Microsoft__Quantum__Intrinsic__Sz__adj(%Qubit* %qb) { +define void @Microsoft__Quantum__Intrinsic__H__adj(%Qubit* %qb) { entry: - call void @__quantum__trc__single_qubit_op(i64 15, i64 1, %Qubit* %qb) + call void @__quantum__trc__single_qubit_op(i64 9, i64 1, %Qubit* %qb) ret void } -define void @Microsoft__Quantum__Intrinsic__Sz__ctl(%Array* %ctls, %Qubit* %qb) { +define void @Microsoft__Quantum__Intrinsic__H__ctl(%Array* %ctls, %Qubit* %qb) { entry: - call void @__quantum__rt__array_add_access(%Array* %ctls) - call void @__quantum__trc__single_qubit_op_ctl(i64 16, i64 1, %Array* %ctls, %Qubit* %qb) - call void @__quantum__rt__array_remove_access(%Array* %ctls) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 1) + call void @__quantum__trc__single_qubit_op_ctl(i64 10, i64 1, %Array* %ctls, %Qubit* %qb) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 -1) ret void } -define void @Microsoft__Quantum__Intrinsic__Sz__ctladj(%Array* %ctls, %Qubit* %qb) { +define void @Microsoft__Quantum__Intrinsic__H__ctladj(%Array* %ctls, %Qubit* %qb) { entry: - call void @__quantum__rt__array_add_access(%Array* %ctls) - call void @__quantum__trc__single_qubit_op_ctl(i64 16, i64 1, %Array* %ctls, %Qubit* %qb) - call void @__quantum__rt__array_remove_access(%Array* %ctls) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 1) + call void @__quantum__trc__single_qubit_op_ctl(i64 10, i64 1, %Array* %ctls, %Qubit* %qb) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 -1) ret void } -define %Result* @Microsoft__Quantum__Intrinsic__Mz__body(%Qubit* %qb) { +define %Result* @Microsoft__Quantum__Intrinsic__Mx__body(%Qubit* %qb) { entry: - %0 = call %Result* @__quantum__trc__single_qubit_measure(i64 100, i64 1, %Qubit* %qb) + %0 = call %Result* @__quantum__trc__single_qubit_measure(i64 101, i64 1, %Qubit* %qb) ret %Result* %0 } -define void @Microsoft__Quantum__Intrinsic__CNOT__body(%Qubit* %control, %Qubit* %target) { +define %Result* @Microsoft__Quantum__Intrinsic__M__body(%Qubit* %qb) { entry: - %ctls__inline__1 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) - %0 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ctls__inline__1, i64 0) - %1 = bitcast i8* %0 to %Qubit** - store %Qubit* %control, %Qubit** %1 - call void @__quantum__rt__array_add_access(%Array* %ctls__inline__1) - br i1 true, label %then0__1, label %else__1 - -then0__1: ; preds = %entry - call void @__quantum__trc__single_qubit_op_ctl(i64 1, i64 1, %Array* %ctls__inline__1, %Qubit* %target) - br label %continue__1 - -else__1: ; preds = %entry - call void @__quantum__trc__single_qubit_op_ctl(i64 2, i64 1, %Array* %ctls__inline__1, %Qubit* %target) - br label %continue__1 - -continue__1: ; preds = %else__1, %then0__1 - call void @__quantum__rt__array_remove_access(%Array* %ctls__inline__1) - call void @__quantum__rt__array_unreference(%Array* %ctls__inline__1) - ret void + %0 = call %Result* @__quantum__trc__single_qubit_measure(i64 100, i64 1, %Qubit* %qb) + ret %Result* %0 } -declare %Array* @__quantum__rt__array_create_1d(i32, i64) - -declare void @__quantum__rt__array_unreference(%Array*) - -define void @Microsoft__Quantum__Intrinsic__CNOT__adj(%Qubit* %control, %Qubit* %target) { +define void @Microsoft__Quantum__Intrinsic__T__body(%Qubit* %qb) { entry: - %ctls__inline__1 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) - %0 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ctls__inline__1, i64 0) - %1 = bitcast i8* %0 to %Qubit** - store %Qubit* %control, %Qubit** %1 - call void @__quantum__rt__array_add_access(%Array* %ctls__inline__1) - br i1 true, label %then0__1, label %else__1 - -then0__1: ; preds = %entry - call void @__quantum__trc__single_qubit_op_ctl(i64 1, i64 1, %Array* %ctls__inline__1, %Qubit* %target) - br label %continue__1 - -else__1: ; preds = %entry - call void @__quantum__trc__single_qubit_op_ctl(i64 2, i64 1, %Array* %ctls__inline__1, %Qubit* %target) - br label %continue__1 - -continue__1: ; preds = %else__1, %then0__1 - call void @__quantum__rt__array_remove_access(%Array* %ctls__inline__1) - call void @__quantum__rt__array_unreference(%Array* %ctls__inline__1) + call void @__quantum__trc__single_qubit_op(i64 11, i64 1, %Qubit* %qb) ret void } -define void @Microsoft__Quantum__Intrinsic__H__body(%Qubit* %qb) { +define void @Microsoft__Quantum__Intrinsic__T__adj(%Qubit* %qb) { entry: - call void @__quantum__trc__single_qubit_op(i64 9, i64 1, %Qubit* %qb) + call void @__quantum__trc__single_qubit_op(i64 11, i64 1, %Qubit* %qb) ret void } -define void @Microsoft__Quantum__Intrinsic__H__adj(%Qubit* %qb) { +define void @Microsoft__Quantum__Intrinsic__T__ctl(%Array* %ctls, %Qubit* %qb) { entry: - call void @__quantum__trc__single_qubit_op(i64 9, i64 1, %Qubit* %qb) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 1) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 1) + call void @__quantum__trc__single_qubit_op_ctl(i64 12, i64 1, %Array* %ctls, %Qubit* %qb) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 -1) ret void } -define void @Microsoft__Quantum__Intrinsic__H__ctl(%Array* %ctls, %Qubit* %qb) { +define void @Microsoft__Quantum__Intrinsic__T__ctladj(%Array* %ctls, %Qubit* %qb) { entry: - call void @__quantum__rt__array_add_access(%Array* %ctls) - call void @__quantum__trc__single_qubit_op_ctl(i64 10, i64 1, %Array* %ctls, %Qubit* %qb) - call void @__quantum__rt__array_remove_access(%Array* %ctls) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 1) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 1) + call void @__quantum__trc__single_qubit_op_ctl(i64 12, i64 1, %Array* %ctls, %Qubit* %qb) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 -1) ret void } -define void @Microsoft__Quantum__Intrinsic__H__ctladj(%Array* %ctls, %Qubit* %qb) { +define void @Microsoft__Quantum__Intrinsic__X__body(%Qubit* %qb) { entry: - call void @__quantum__rt__array_add_access(%Array* %ctls) - call void @__quantum__trc__single_qubit_op_ctl(i64 10, i64 1, %Array* %ctls, %Qubit* %qb) - call void @__quantum__rt__array_remove_access(%Array* %ctls) + call void @__quantum__trc__single_qubit_op(i64 0, i64 1, %Qubit* %qb) ret void } -define void @Microsoft__Quantum__Intrinsic__T__body(%Qubit* %qb) { +define void @Microsoft__Quantum__Intrinsic__X__adj(%Qubit* %qb) { entry: - call void @__quantum__trc__single_qubit_op(i64 11, i64 1, %Qubit* %qb) + call void @__quantum__trc__single_qubit_op(i64 0, i64 1, %Qubit* %qb) ret void } -define void @Microsoft__Quantum__Intrinsic__T__adj(%Qubit* %qb) { +define void @Microsoft__Quantum__Intrinsic__X__ctl(%Array* %ctls, %Qubit* %qb) { entry: - call void @__quantum__trc__single_qubit_op(i64 11, i64 1, %Qubit* %qb) - ret void -} + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 1) + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %ctls) + %1 = icmp eq i64 %0, 1 + br i1 %1, label %then0__1, label %else__1 -define void @Microsoft__Quantum__Intrinsic__T__ctl(%Array* %ctls, %Qubit* %qb) { -entry: - call void @__quantum__rt__array_add_access(%Array* %ctls) - call void @__quantum__rt__array_add_access(%Array* %ctls) - call void @__quantum__trc__single_qubit_op_ctl(i64 12, i64 1, %Array* %ctls, %Qubit* %qb) - call void @__quantum__rt__array_remove_access(%Array* %ctls) - call void @__quantum__rt__array_remove_access(%Array* %ctls) - ret void -} +then0__1: ; preds = %entry + call void @__quantum__trc__single_qubit_op_ctl(i64 1, i64 1, %Array* %ctls, %Qubit* %qb) + br label %continue__1 -define void @Microsoft__Quantum__Intrinsic__T__ctladj(%Array* %ctls, %Qubit* %qb) { -entry: - call void @__quantum__rt__array_add_access(%Array* %ctls) - call void @__quantum__rt__array_add_access(%Array* %ctls) - call void @__quantum__trc__single_qubit_op_ctl(i64 12, i64 1, %Array* %ctls, %Qubit* %qb) - call void @__quantum__rt__array_remove_access(%Array* %ctls) - call void @__quantum__rt__array_remove_access(%Array* %ctls) +else__1: ; preds = %entry + call void @__quantum__trc__single_qubit_op_ctl(i64 2, i64 1, %Array* %ctls, %Qubit* %qb) + br label %continue__1 + +continue__1: ; preds = %else__1, %then0__1 + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 -1) ret void } -define %Result* @Microsoft__Quantum__Intrinsic__Mx__body(%Qubit* %qb) { +define void @Microsoft__Quantum__Intrinsic__X__ctladj(%Array* %ctls, %Qubit* %qb) { entry: - %0 = call %Result* @__quantum__trc__single_qubit_measure(i64 101, i64 1, %Qubit* %qb) - ret %Result* %0 + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 1) + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %ctls) + %1 = icmp eq i64 %0, 1 + br i1 %1, label %then0__1, label %else__1 + +then0__1: ; preds = %entry + call void @__quantum__trc__single_qubit_op_ctl(i64 1, i64 1, %Array* %ctls, %Qubit* %qb) + br label %continue__1 + +else__1: ; preds = %entry + call void @__quantum__trc__single_qubit_op_ctl(i64 2, i64 1, %Array* %ctls, %Qubit* %qb) + br label %continue__1 + +continue__1: ; preds = %else__1, %then0__1 + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 -1) + ret void } -define void @Microsoft__Quantum__Intrinsic__Rx__body(double %theta, %Qubit* %qb) { +define void @Microsoft__Quantum__Intrinsic__Sz__body(%Qubit* %qb) { entry: - call void @__quantum__trc__single_qubit_op(i64 19, i64 1, %Qubit* %qb) + call void @__quantum__trc__single_qubit_op(i64 15, i64 1, %Qubit* %qb) ret void } -define void @Microsoft__Quantum__Intrinsic__Rx__adj(double %theta, %Qubit* %qb) { +define void @Microsoft__Quantum__Intrinsic__Sz__adj(%Qubit* %qb) { entry: - call void @__quantum__trc__single_qubit_op(i64 19, i64 1, %Qubit* %qb) + call void @__quantum__trc__single_qubit_op(i64 15, i64 1, %Qubit* %qb) ret void } -define void @Microsoft__Quantum__Intrinsic__Rx__ctl(%Array* %ctls, { double, %Qubit* }* %0) { +define void @Microsoft__Quantum__Intrinsic__Sz__ctl(%Array* %ctls, %Qubit* %qb) { entry: - call void @__quantum__rt__array_add_access(%Array* %ctls) - %1 = getelementptr { double, %Qubit* }, { double, %Qubit* }* %0, i64 0, i32 0 - %theta = load double, double* %1 - %2 = getelementptr { double, %Qubit* }, { double, %Qubit* }* %0, i64 0, i32 1 - %qb = load %Qubit*, %Qubit** %2 - call void @__quantum__trc__single_qubit_op_ctl(i64 20, i64 1, %Array* %ctls, %Qubit* %qb) - call void @__quantum__rt__array_remove_access(%Array* %ctls) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 1) + call void @__quantum__trc__single_qubit_op_ctl(i64 16, i64 1, %Array* %ctls, %Qubit* %qb) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 -1) ret void } -define void @Microsoft__Quantum__Intrinsic__Rx__ctladj(%Array* %ctls, { double, %Qubit* }* %0) { +define void @Microsoft__Quantum__Intrinsic__Sz__ctladj(%Array* %ctls, %Qubit* %qb) { entry: - call void @__quantum__rt__array_add_access(%Array* %ctls) - %1 = getelementptr { double, %Qubit* }, { double, %Qubit* }* %0, i64 0, i32 0 - %theta = load double, double* %1 - %2 = getelementptr { double, %Qubit* }, { double, %Qubit* }* %0, i64 0, i32 1 - %qb = load %Qubit*, %Qubit** %2 - call void @__quantum__trc__single_qubit_op_ctl(i64 20, i64 1, %Array* %ctls, %Qubit* %qb) - call void @__quantum__rt__array_remove_access(%Array* %ctls) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 1) + call void @__quantum__trc__single_qubit_op_ctl(i64 16, i64 1, %Array* %ctls, %Qubit* %qb) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 -1) ret void } @@ -620,83 +659,61 @@ entry: define void @Microsoft__Quantum__Intrinsic__Ry__ctl(%Array* %ctls, { double, %Qubit* }* %0) { entry: - call void @__quantum__rt__array_add_access(%Array* %ctls) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 1) %1 = getelementptr { double, %Qubit* }, { double, %Qubit* }* %0, i64 0, i32 0 %theta = load double, double* %1 %2 = getelementptr { double, %Qubit* }, { double, %Qubit* }* %0, i64 0, i32 1 %qb = load %Qubit*, %Qubit** %2 call void @__quantum__trc__single_qubit_op_ctl(i64 22, i64 1, %Array* %ctls, %Qubit* %qb) - call void @__quantum__rt__array_remove_access(%Array* %ctls) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 -1) ret void } define void @Microsoft__Quantum__Intrinsic__Ry__ctladj(%Array* %ctls, { double, %Qubit* }* %0) { entry: - call void @__quantum__rt__array_add_access(%Array* %ctls) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 1) %1 = getelementptr { double, %Qubit* }, { double, %Qubit* }* %0, i64 0, i32 0 %theta = load double, double* %1 %2 = getelementptr { double, %Qubit* }, { double, %Qubit* }* %0, i64 0, i32 1 %qb = load %Qubit*, %Qubit** %2 call void @__quantum__trc__single_qubit_op_ctl(i64 22, i64 1, %Array* %ctls, %Qubit* %qb) - call void @__quantum__rt__array_remove_access(%Array* %ctls) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 -1) ret void } -define %Result* @Microsoft__Quantum__Intrinsic__M__body(%Qubit* %qb) { -entry: - %0 = call %Result* @__quantum__trc__single_qubit_measure(i64 100, i64 1, %Qubit* %qb) - ret %Result* %0 -} - -define void @Microsoft__Quantum__Intrinsic__Z__body(%Qubit* %qb) { +define void @Microsoft__Quantum__Intrinsic__Rx__body(double %theta, %Qubit* %qb) { entry: - call void @__quantum__trc__single_qubit_op(i64 6, i64 1, %Qubit* %qb) + call void @__quantum__trc__single_qubit_op(i64 19, i64 1, %Qubit* %qb) ret void } -define void @Microsoft__Quantum__Intrinsic__Z__adj(%Qubit* %qb) { +define void @Microsoft__Quantum__Intrinsic__Rx__adj(double %theta, %Qubit* %qb) { entry: - call void @__quantum__trc__single_qubit_op(i64 6, i64 1, %Qubit* %qb) + call void @__quantum__trc__single_qubit_op(i64 19, i64 1, %Qubit* %qb) ret void } -define void @Microsoft__Quantum__Intrinsic__Z__ctl(%Array* %ctls, %Qubit* %qb) { +define void @Microsoft__Quantum__Intrinsic__Rx__ctl(%Array* %ctls, { double, %Qubit* }* %0) { entry: - call void @__quantum__rt__array_add_access(%Array* %ctls) - %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %ctls) - %1 = icmp eq i64 %0, 1 - br i1 %1, label %then0__1, label %else__1 - -then0__1: ; preds = %entry - call void @__quantum__trc__single_qubit_op_ctl(i64 7, i64 1, %Array* %ctls, %Qubit* %qb) - br label %continue__1 - -else__1: ; preds = %entry - call void @__quantum__trc__single_qubit_op_ctl(i64 8, i64 1, %Array* %ctls, %Qubit* %qb) - br label %continue__1 - -continue__1: ; preds = %else__1, %then0__1 - call void @__quantum__rt__array_remove_access(%Array* %ctls) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 1) + %1 = getelementptr { double, %Qubit* }, { double, %Qubit* }* %0, i64 0, i32 0 + %theta = load double, double* %1 + %2 = getelementptr { double, %Qubit* }, { double, %Qubit* }* %0, i64 0, i32 1 + %qb = load %Qubit*, %Qubit** %2 + call void @__quantum__trc__single_qubit_op_ctl(i64 20, i64 1, %Array* %ctls, %Qubit* %qb) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 -1) ret void } -define void @Microsoft__Quantum__Intrinsic__Z__ctladj(%Array* %ctls, %Qubit* %qb) { +define void @Microsoft__Quantum__Intrinsic__Rx__ctladj(%Array* %ctls, { double, %Qubit* }* %0) { entry: - call void @__quantum__rt__array_add_access(%Array* %ctls) - %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %ctls) - %1 = icmp eq i64 %0, 1 - br i1 %1, label %then0__1, label %else__1 - -then0__1: ; preds = %entry - call void @__quantum__trc__single_qubit_op_ctl(i64 7, i64 1, %Array* %ctls, %Qubit* %qb) - br label %continue__1 - -else__1: ; preds = %entry - call void @__quantum__trc__single_qubit_op_ctl(i64 8, i64 1, %Array* %ctls, %Qubit* %qb) - br label %continue__1 - -continue__1: ; preds = %else__1, %then0__1 - call void @__quantum__rt__array_remove_access(%Array* %ctls) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 1) + %1 = getelementptr { double, %Qubit* }, { double, %Qubit* }* %0, i64 0, i32 0 + %theta = load double, double* %1 + %2 = getelementptr { double, %Qubit* }, { double, %Qubit* }* %0, i64 0, i32 1 + %qb = load %Qubit*, %Qubit** %2 + call void @__quantum__trc__single_qubit_op_ctl(i64 20, i64 1, %Array* %ctls, %Qubit* %qb) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 -1) ret void } @@ -705,7 +722,7 @@ entry: %res = alloca i1 store i1 false, i1* %res %qs = call %Array* @__quantum__rt__qubit_allocate_array(i64 3) - call void @__quantum__rt__array_add_access(%Array* %qs) + call void @__quantum__rt__array_update_alias_count(%Array* %qs, i64 1) %0 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 0) %1 = bitcast i8* %0 to %Qubit** %qb__inline__1 = load %Qubit*, %Qubit** %1 @@ -798,7 +815,7 @@ entry: %48 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ctls__inline__23, i64 0) %49 = bitcast i8* %48 to %Qubit** store %Qubit* %c, %Qubit** %49 - call void @__quantum__rt__array_add_access(%Array* %ctls__inline__23) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls__inline__23, i64 1) %50 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 0) %51 = bitcast i8* %50 to %Qubit** %qb__inline__23 = load %Qubit*, %Qubit** %51 @@ -813,13 +830,13 @@ else__1: ; preds = %entry br label %continue__1 continue__1: ; preds = %else__1, %then0__1 - call void @__quantum__rt__array_remove_access(%Array* %ctls__inline__23) - call void @__quantum__rt__array_unreference(%Array* %ctls__inline__23) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls__inline__23, i64 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %ctls__inline__23, i64 -1) %ctls__inline__24 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) %52 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ctls__inline__24, i64 0) %53 = bitcast i8* %52 to %Qubit** store %Qubit* %c, %Qubit** %53 - call void @__quantum__rt__array_add_access(%Array* %ctls__inline__24) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls__inline__24, i64 1) %54 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 0) %55 = bitcast i8* %54 to %Qubit** %qb__inline__24 = load %Qubit*, %Qubit** %55 @@ -834,13 +851,13 @@ else__2: ; preds = %continue__1 br label %continue__2 continue__2: ; preds = %else__2, %then0__2 - call void @__quantum__rt__array_remove_access(%Array* %ctls__inline__24) - call void @__quantum__rt__array_unreference(%Array* %ctls__inline__24) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls__inline__24, i64 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %ctls__inline__24, i64 -1) %ctls__inline__25 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) %56 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ctls__inline__25, i64 0) %57 = bitcast i8* %56 to %Qubit** store %Qubit* %c, %Qubit** %57 - call void @__quantum__rt__array_add_access(%Array* %ctls__inline__25) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls__inline__25, i64 1) %58 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 1) %59 = bitcast i8* %58 to %Qubit** %qb__inline__25 = load %Qubit*, %Qubit** %59 @@ -855,82 +872,82 @@ else__3: ; preds = %continue__2 br label %continue__3 continue__3: ; preds = %else__3, %then0__3 - call void @__quantum__rt__array_remove_access(%Array* %ctls__inline__25) - call void @__quantum__rt__array_unreference(%Array* %ctls__inline__25) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls__inline__25, i64 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %ctls__inline__25, i64 -1) %ctls__inline__26 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) %60 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ctls__inline__26, i64 0) %61 = bitcast i8* %60 to %Qubit** store %Qubit* %c, %Qubit** %61 - call void @__quantum__rt__array_add_access(%Array* %ctls__inline__26) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls__inline__26, i64 1) %62 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 1) %63 = bitcast i8* %62 to %Qubit** %qb__inline__26 = load %Qubit*, %Qubit** %63 call void @__quantum__trc__single_qubit_op_ctl(i64 10, i64 1, %Array* %ctls__inline__26, %Qubit* %qb__inline__26) - call void @__quantum__rt__array_remove_access(%Array* %ctls__inline__26) - call void @__quantum__rt__array_unreference(%Array* %ctls__inline__26) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls__inline__26, i64 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %ctls__inline__26, i64 -1) %ctls__inline__27 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) %64 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ctls__inline__27, i64 0) %65 = bitcast i8* %64 to %Qubit** store %Qubit* %c, %Qubit** %65 - call void @__quantum__rt__array_add_access(%Array* %ctls__inline__27) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls__inline__27, i64 1) %66 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 0) %67 = bitcast i8* %66 to %Qubit** %qb__inline__27 = load %Qubit*, %Qubit** %67 call void @__quantum__trc__single_qubit_op_ctl(i64 20, i64 1, %Array* %ctls__inline__27, %Qubit* %qb__inline__27) - call void @__quantum__rt__array_remove_access(%Array* %ctls__inline__27) - call void @__quantum__rt__array_unreference(%Array* %ctls__inline__27) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls__inline__27, i64 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %ctls__inline__27, i64 -1) %ctls__inline__28 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) %68 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ctls__inline__28, i64 0) %69 = bitcast i8* %68 to %Qubit** store %Qubit* %c, %Qubit** %69 - call void @__quantum__rt__array_add_access(%Array* %ctls__inline__28) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls__inline__28, i64 1) %70 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 1) %71 = bitcast i8* %70 to %Qubit** %qb__inline__28 = load %Qubit*, %Qubit** %71 call void @__quantum__trc__single_qubit_op_ctl(i64 22, i64 1, %Array* %ctls__inline__28, %Qubit* %qb__inline__28) - call void @__quantum__rt__array_remove_access(%Array* %ctls__inline__28) - call void @__quantum__rt__array_unreference(%Array* %ctls__inline__28) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls__inline__28, i64 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %ctls__inline__28, i64 -1) %ctls__inline__29 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) %72 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ctls__inline__29, i64 0) %73 = bitcast i8* %72 to %Qubit** store %Qubit* %c, %Qubit** %73 - call void @__quantum__rt__array_add_access(%Array* %ctls__inline__29) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls__inline__29, i64 1) %74 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 2) %75 = bitcast i8* %74 to %Qubit** %qb__inline__29 = load %Qubit*, %Qubit** %75 call void @__quantum__trc__single_qubit_op_ctl(i64 25, i64 1, %Array* %ctls__inline__29, %Qubit* %qb__inline__29) - call void @__quantum__rt__array_remove_access(%Array* %ctls__inline__29) - call void @__quantum__rt__array_unreference(%Array* %ctls__inline__29) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls__inline__29, i64 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %ctls__inline__29, i64 -1) %ctls__inline__30 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) %76 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ctls__inline__30, i64 0) %77 = bitcast i8* %76 to %Qubit** store %Qubit* %c, %Qubit** %77 - call void @__quantum__rt__array_add_access(%Array* %ctls__inline__30) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls__inline__30, i64 1) %78 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 1) %79 = bitcast i8* %78 to %Qubit** %qb__inline__30 = load %Qubit*, %Qubit** %79 - call void @__quantum__rt__array_add_access(%Array* %ctls__inline__30) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls__inline__30, i64 1) call void @__quantum__trc__single_qubit_op_ctl(i64 16, i64 1, %Array* %ctls__inline__30, %Qubit* %qb__inline__30) - call void @__quantum__rt__array_remove_access(%Array* %ctls__inline__30) - call void @__quantum__rt__array_remove_access(%Array* %ctls__inline__30) - call void @__quantum__rt__array_unreference(%Array* %ctls__inline__30) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls__inline__30, i64 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls__inline__30, i64 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %ctls__inline__30, i64 -1) %ctls__inline__32 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) %80 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ctls__inline__32, i64 0) %81 = bitcast i8* %80 to %Qubit** store %Qubit* %c, %Qubit** %81 - call void @__quantum__rt__array_add_access(%Array* %ctls__inline__32) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls__inline__32, i64 1) %82 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 2) %83 = bitcast i8* %82 to %Qubit** %qb__inline__32 = load %Qubit*, %Qubit** %83 - call void @__quantum__rt__array_add_access(%Array* %ctls__inline__32) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls__inline__32, i64 1) call void @__quantum__trc__single_qubit_op_ctl(i64 12, i64 1, %Array* %ctls__inline__32, %Qubit* %qb__inline__32) - call void @__quantum__rt__array_remove_access(%Array* %ctls__inline__32) - call void @__quantum__rt__array_remove_access(%Array* %ctls__inline__32) - call void @__quantum__rt__array_unreference(%Array* %ctls__inline__32) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls__inline__32, i64 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls__inline__32, i64 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %ctls__inline__32, i64 -1) call void @__quantum__rt__qubit_release(%Qubit* %c) %cc = call %Array* @__quantum__rt__qubit_allocate_array(i64 2) - call void @__quantum__rt__array_add_access(%Array* %cc) - call void @__quantum__rt__array_add_access(%Array* %cc) + call void @__quantum__rt__array_update_alias_count(%Array* %cc, i64 1) + call void @__quantum__rt__array_update_alias_count(%Array* %cc, i64 1) %84 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 0) %85 = bitcast i8* %84 to %Qubit** %qb__inline__34 = load %Qubit*, %Qubit** %85 @@ -947,8 +964,8 @@ else__4: ; preds = %continue__3 br label %continue__4 continue__4: ; preds = %else__4, %then0__4 - call void @__quantum__rt__array_remove_access(%Array* %cc) - call void @__quantum__rt__array_add_access(%Array* %cc) + call void @__quantum__rt__array_update_alias_count(%Array* %cc, i64 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %cc, i64 1) %88 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 0) %89 = bitcast i8* %88 to %Qubit** %qb__inline__35 = load %Qubit*, %Qubit** %89 @@ -964,8 +981,8 @@ else__5: ; preds = %continue__4 br label %continue__5 continue__5: ; preds = %else__5, %then0__5 - call void @__quantum__rt__array_remove_access(%Array* %cc) - call void @__quantum__rt__array_add_access(%Array* %cc) + call void @__quantum__rt__array_update_alias_count(%Array* %cc, i64 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %cc, i64 1) %91 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 1) %92 = bitcast i8* %91 to %Qubit** %qb__inline__36 = load %Qubit*, %Qubit** %92 @@ -981,50 +998,50 @@ else__6: ; preds = %continue__5 br label %continue__6 continue__6: ; preds = %else__6, %then0__6 - call void @__quantum__rt__array_remove_access(%Array* %cc) - call void @__quantum__rt__array_add_access(%Array* %cc) + call void @__quantum__rt__array_update_alias_count(%Array* %cc, i64 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %cc, i64 1) %94 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 1) %95 = bitcast i8* %94 to %Qubit** %qb__inline__37 = load %Qubit*, %Qubit** %95 call void @__quantum__trc__single_qubit_op_ctl(i64 10, i64 1, %Array* %cc, %Qubit* %qb__inline__37) - call void @__quantum__rt__array_remove_access(%Array* %cc) - call void @__quantum__rt__array_add_access(%Array* %cc) + call void @__quantum__rt__array_update_alias_count(%Array* %cc, i64 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %cc, i64 1) %96 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 0) %97 = bitcast i8* %96 to %Qubit** %qb__inline__38 = load %Qubit*, %Qubit** %97 call void @__quantum__trc__single_qubit_op_ctl(i64 20, i64 1, %Array* %cc, %Qubit* %qb__inline__38) - call void @__quantum__rt__array_remove_access(%Array* %cc) - call void @__quantum__rt__array_add_access(%Array* %cc) + call void @__quantum__rt__array_update_alias_count(%Array* %cc, i64 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %cc, i64 1) %98 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 1) %99 = bitcast i8* %98 to %Qubit** %qb__inline__39 = load %Qubit*, %Qubit** %99 call void @__quantum__trc__single_qubit_op_ctl(i64 22, i64 1, %Array* %cc, %Qubit* %qb__inline__39) - call void @__quantum__rt__array_remove_access(%Array* %cc) - call void @__quantum__rt__array_add_access(%Array* %cc) + call void @__quantum__rt__array_update_alias_count(%Array* %cc, i64 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %cc, i64 1) %100 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 2) %101 = bitcast i8* %100 to %Qubit** %qb__inline__40 = load %Qubit*, %Qubit** %101 call void @__quantum__trc__single_qubit_op_ctl(i64 25, i64 1, %Array* %cc, %Qubit* %qb__inline__40) - call void @__quantum__rt__array_remove_access(%Array* %cc) - call void @__quantum__rt__array_add_access(%Array* %cc) + call void @__quantum__rt__array_update_alias_count(%Array* %cc, i64 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %cc, i64 1) %102 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 1) %103 = bitcast i8* %102 to %Qubit** %qb__inline__41 = load %Qubit*, %Qubit** %103 - call void @__quantum__rt__array_add_access(%Array* %cc) + call void @__quantum__rt__array_update_alias_count(%Array* %cc, i64 1) call void @__quantum__trc__single_qubit_op_ctl(i64 16, i64 1, %Array* %cc, %Qubit* %qb__inline__41) - call void @__quantum__rt__array_remove_access(%Array* %cc) - call void @__quantum__rt__array_remove_access(%Array* %cc) - call void @__quantum__rt__array_add_access(%Array* %cc) + call void @__quantum__rt__array_update_alias_count(%Array* %cc, i64 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %cc, i64 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %cc, i64 1) %104 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 2) %105 = bitcast i8* %104 to %Qubit** %qb__inline__43 = load %Qubit*, %Qubit** %105 - call void @__quantum__rt__array_add_access(%Array* %cc) + call void @__quantum__rt__array_update_alias_count(%Array* %cc, i64 1) call void @__quantum__trc__single_qubit_op_ctl(i64 12, i64 1, %Array* %cc, %Qubit* %qb__inline__43) - call void @__quantum__rt__array_remove_access(%Array* %cc) - call void @__quantum__rt__array_remove_access(%Array* %cc) + call void @__quantum__rt__array_update_alias_count(%Array* %cc, i64 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %cc, i64 -1) call void @__quantum__rt__qubit_release_array(%Array* %cc) - call void @__quantum__rt__array_remove_access(%Array* %cc) - call void @__quantum__rt__array_unreference(%Array* %cc) + call void @__quantum__rt__array_update_alias_count(%Array* %cc, i64 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %cc, i64 -1) %106 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 0) %107 = bitcast i8* %106 to %Qubit** %qb__inline__45 = load %Qubit*, %Qubit** %107 @@ -1042,7 +1059,7 @@ continue__6: ; preds = %else__6, %then0__6 %117 = load %Qubit*, %Qubit** %116 store %Qubit* %114, %Qubit** %109 store %Qubit* %117, %Qubit** %111 - call void @__quantum__rt__array_add_access(%Array* %qs12) + call void @__quantum__rt__array_update_alias_count(%Array* %qs12, i64 1) %paulis__inline__47 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 2) %118 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis__inline__47, i64 0) %119 = bitcast i8* %118 to i2* @@ -1052,35 +1069,34 @@ continue__6: ; preds = %else__6, %then0__6 %123 = load i2, i2* @PauliX store i2 %122, i2* %119 store i2 %123, i2* %121 - call void @__quantum__rt__array_add_access(%Array* %paulis__inline__47) - call void @__quantum__rt__array_add_access(%Array* %qs12) + call void @__quantum__rt__array_update_alias_count(%Array* %paulis__inline__47, i64 1) + call void @__quantum__rt__array_update_alias_count(%Array* %qs12, i64 1) %124 = load %Result*, %Result** @ResultOne %res__inline__47 = alloca %Result* store %Result* %124, %Result** %res__inline__47 + call void @__quantum__rt__result_update_reference_count(%Result* %124, i64 1) %haveY__inline__47 = alloca i1 store i1 false, i1* %haveY__inline__47 br label %header__1 header__1: ; preds = %exiting__1, %continue__6 - %i__inline__47 = phi i64 [ 0, %continue__6 ], [ %139, %exiting__1 ] - %125 = icmp sge i64 %i__inline__47, 1 - %126 = icmp sle i64 %i__inline__47, 1 - %127 = select i1 true, i1 %126, i1 %125 - br i1 %127, label %body__1, label %exit__1 + %i__inline__47 = phi i64 [ 0, %continue__6 ], [ %137, %exiting__1 ] + %125 = icmp sle i64 %i__inline__47, 1 + br i1 %125, label %body__1, label %exit__1 body__1: ; preds = %header__1 - %128 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis__inline__47, i64 %i__inline__47) - %129 = bitcast i8* %128 to i2* - %130 = load i2, i2* %129 - %131 = load i2, i2* @PauliY - %132 = icmp eq i2 %130, %131 - %133 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis__inline__47, i64 %i__inline__47) - %134 = bitcast i8* %133 to i2* - %135 = load i2, i2* %134 - %136 = load i2, i2* @PauliI - %137 = icmp eq i2 %135, %136 - %138 = or i1 %132, %137 - br i1 %138, label %then0__7, label %continue__7 + %126 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis__inline__47, i64 %i__inline__47) + %127 = bitcast i8* %126 to i2* + %128 = load i2, i2* %127 + %129 = load i2, i2* @PauliY + %130 = icmp eq i2 %128, %129 + %131 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis__inline__47, i64 %i__inline__47) + %132 = bitcast i8* %131 to i2* + %133 = load i2, i2* %132 + %134 = load i2, i2* @PauliI + %135 = icmp eq i2 %133, %134 + %136 = or i1 %130, %135 + br i1 %136, label %then0__7, label %continue__7 then0__7: ; preds = %body__1 store i1 true, i1* %haveY__inline__47 @@ -1090,46 +1106,54 @@ continue__7: ; preds = %then0__7, %body__1 br label %exiting__1 exiting__1: ; preds = %continue__7 - %139 = add i64 %i__inline__47, 1 + %137 = add i64 %i__inline__47, 1 br label %header__1 exit__1: ; preds = %header__1 - %140 = load i1, i1* %haveY__inline__47 - br i1 %140, label %then0__8, label %test1__1 + %138 = load i1, i1* %haveY__inline__47 + br i1 %138, label %then0__8, label %test1__1 then0__8: ; preds = %exit__1 - %141 = call %Result* @__quantum__trc__multi_qubit_measure(i64 106, i64 1, %Array* %qs12) - store %Result* %141, %Result** %res__inline__47 - call void @__quantum__rt__result_unreference(%Result* %141) + %139 = call %Result* @__quantum__trc__multi_qubit_measure(i64 106, i64 1, %Array* %qs12) + call void @__quantum__rt__result_update_reference_count(%Result* %139, i64 1) + store %Result* %139, %Result** %res__inline__47 + call void @__quantum__rt__result_update_reference_count(%Result* %139, i64 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %124, i64 -1) br label %continue__8 test1__1: ; preds = %exit__1 br i1 false, label %then1__1, label %test2__1 then1__1: ; preds = %test1__1 - %142 = call %Result* @__quantum__trc__multi_qubit_measure(i64 107, i64 1, %Array* %qs12) - store %Result* %142, %Result** %res__inline__47 - call void @__quantum__rt__result_unreference(%Result* %142) + %140 = call %Result* @__quantum__trc__multi_qubit_measure(i64 107, i64 1, %Array* %qs12) + call void @__quantum__rt__result_update_reference_count(%Result* %140, i64 1) + %141 = load %Result*, %Result** %res__inline__47 + store %Result* %140, %Result** %res__inline__47 + call void @__quantum__rt__result_update_reference_count(%Result* %140, i64 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %141, i64 -1) br label %continue__8 test2__1: ; preds = %test1__1 br i1 false, label %then2__1, label %test3__1 then2__1: ; preds = %test2__1 - %143 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis__inline__47, i64 0) - %144 = bitcast i8* %143 to i2* - %145 = load i2, i2* %144 - %146 = load i2, i2* @PauliX - %147 = icmp eq i2 %145, %146 - br i1 %147, label %then0__9, label %else__7 + %142 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis__inline__47, i64 0) + %143 = bitcast i8* %142 to i2* + %144 = load i2, i2* %143 + %145 = load i2, i2* @PauliX + %146 = icmp eq i2 %144, %145 + br i1 %146, label %then0__9, label %else__7 then0__9: ; preds = %then2__1 - %148 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs12, i64 0) - %149 = bitcast i8* %148 to %Qubit** - %qb__inline__48 = load %Qubit*, %Qubit** %149 - %150 = call %Result* @__quantum__trc__single_qubit_measure(i64 101, i64 1, %Qubit* %qb__inline__48) - store %Result* %150, %Result** %res__inline__47 - call void @__quantum__rt__result_unreference(%Result* %150) + %147 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs12, i64 0) + %148 = bitcast i8* %147 to %Qubit** + %qb__inline__48 = load %Qubit*, %Qubit** %148 + %149 = call %Result* @__quantum__trc__single_qubit_measure(i64 101, i64 1, %Qubit* %qb__inline__48) + call void @__quantum__rt__result_update_reference_count(%Result* %149, i64 1) + %150 = load %Result*, %Result** %res__inline__47 + store %Result* %149, %Result** %res__inline__47 + call void @__quantum__rt__result_update_reference_count(%Result* %149, i64 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %150, i64 -1) br label %continue__9 else__7: ; preds = %then2__1 @@ -1137,106 +1161,120 @@ else__7: ; preds = %then2__1 %152 = bitcast i8* %151 to %Qubit** %qb__inline__49 = load %Qubit*, %Qubit** %152 %153 = call %Result* @__quantum__trc__single_qubit_measure(i64 100, i64 1, %Qubit* %qb__inline__49) + call void @__quantum__rt__result_update_reference_count(%Result* %153, i64 1) + %154 = load %Result*, %Result** %res__inline__47 store %Result* %153, %Result** %res__inline__47 - call void @__quantum__rt__result_unreference(%Result* %153) + call void @__quantum__rt__result_update_reference_count(%Result* %153, i64 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %154, i64 -1) br label %continue__9 continue__9: ; preds = %else__7, %then0__9 br label %continue__8 test3__1: ; preds = %test2__1 - %154 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis__inline__47, i64 0) - %155 = bitcast i8* %154 to i2* - %156 = load i2, i2* %155 - %157 = load i2, i2* @PauliX - %158 = icmp eq i2 %156, %157 - %159 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis__inline__47, i64 1) - %160 = bitcast i8* %159 to i2* - %161 = load i2, i2* %160 - %162 = load i2, i2* @PauliX - %163 = icmp eq i2 %161, %162 - %164 = and i1 %158, %163 - br i1 %164, label %then3__1, label %test4__1 + %155 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis__inline__47, i64 0) + %156 = bitcast i8* %155 to i2* + %157 = load i2, i2* %156 + %158 = load i2, i2* @PauliX + %159 = icmp eq i2 %157, %158 + %160 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis__inline__47, i64 1) + %161 = bitcast i8* %160 to i2* + %162 = load i2, i2* %161 + %163 = load i2, i2* @PauliX + %164 = icmp eq i2 %162, %163 + %165 = and i1 %159, %164 + br i1 %165, label %then3__1, label %test4__1 then3__1: ; preds = %test3__1 - %165 = call %Result* @__quantum__trc__multi_qubit_measure(i64 108, i64 1, %Array* %qs12) - store %Result* %165, %Result** %res__inline__47 - call void @__quantum__rt__result_unreference(%Result* %165) + %166 = call %Result* @__quantum__trc__multi_qubit_measure(i64 108, i64 1, %Array* %qs12) + call void @__quantum__rt__result_update_reference_count(%Result* %166, i64 1) + %167 = load %Result*, %Result** %res__inline__47 + store %Result* %166, %Result** %res__inline__47 + call void @__quantum__rt__result_update_reference_count(%Result* %166, i64 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %167, i64 -1) br label %continue__8 test4__1: ; preds = %test3__1 - %166 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis__inline__47, i64 0) - %167 = bitcast i8* %166 to i2* - %168 = load i2, i2* %167 - %169 = load i2, i2* @PauliX - %170 = icmp eq i2 %168, %169 - %171 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis__inline__47, i64 1) - %172 = bitcast i8* %171 to i2* - %173 = load i2, i2* %172 - %174 = load i2, i2* @PauliZ - %175 = icmp eq i2 %173, %174 - %176 = and i1 %170, %175 - br i1 %176, label %then4__1, label %test5__1 + %168 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis__inline__47, i64 0) + %169 = bitcast i8* %168 to i2* + %170 = load i2, i2* %169 + %171 = load i2, i2* @PauliX + %172 = icmp eq i2 %170, %171 + %173 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis__inline__47, i64 1) + %174 = bitcast i8* %173 to i2* + %175 = load i2, i2* %174 + %176 = load i2, i2* @PauliZ + %177 = icmp eq i2 %175, %176 + %178 = and i1 %172, %177 + br i1 %178, label %then4__1, label %test5__1 then4__1: ; preds = %test4__1 - %177 = call %Result* @__quantum__trc__multi_qubit_measure(i64 109, i64 1, %Array* %qs12) - store %Result* %177, %Result** %res__inline__47 - call void @__quantum__rt__result_unreference(%Result* %177) + %179 = call %Result* @__quantum__trc__multi_qubit_measure(i64 109, i64 1, %Array* %qs12) + call void @__quantum__rt__result_update_reference_count(%Result* %179, i64 1) + %180 = load %Result*, %Result** %res__inline__47 + store %Result* %179, %Result** %res__inline__47 + call void @__quantum__rt__result_update_reference_count(%Result* %179, i64 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %180, i64 -1) br label %continue__8 test5__1: ; preds = %test4__1 - %178 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis__inline__47, i64 0) - %179 = bitcast i8* %178 to i2* - %180 = load i2, i2* %179 - %181 = load i2, i2* @PauliZ - %182 = icmp eq i2 %180, %181 - %183 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis__inline__47, i64 1) - %184 = bitcast i8* %183 to i2* - %185 = load i2, i2* %184 - %186 = load i2, i2* @PauliX - %187 = icmp eq i2 %185, %186 - %188 = and i1 %182, %187 - br i1 %188, label %then5__1, label %test6__1 + %181 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis__inline__47, i64 0) + %182 = bitcast i8* %181 to i2* + %183 = load i2, i2* %182 + %184 = load i2, i2* @PauliZ + %185 = icmp eq i2 %183, %184 + %186 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis__inline__47, i64 1) + %187 = bitcast i8* %186 to i2* + %188 = load i2, i2* %187 + %189 = load i2, i2* @PauliX + %190 = icmp eq i2 %188, %189 + %191 = and i1 %185, %190 + br i1 %191, label %then5__1, label %test6__1 then5__1: ; preds = %test5__1 - %189 = call %Result* @__quantum__trc__multi_qubit_measure(i64 110, i64 1, %Array* %qs12) - store %Result* %189, %Result** %res__inline__47 - call void @__quantum__rt__result_unreference(%Result* %189) + %192 = call %Result* @__quantum__trc__multi_qubit_measure(i64 110, i64 1, %Array* %qs12) + call void @__quantum__rt__result_update_reference_count(%Result* %192, i64 1) + %193 = load %Result*, %Result** %res__inline__47 + store %Result* %192, %Result** %res__inline__47 + call void @__quantum__rt__result_update_reference_count(%Result* %192, i64 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %193, i64 -1) br label %continue__8 test6__1: ; preds = %test5__1 - %190 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis__inline__47, i64 0) - %191 = bitcast i8* %190 to i2* - %192 = load i2, i2* %191 - %193 = load i2, i2* @PauliZ - %194 = icmp eq i2 %192, %193 - %195 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis__inline__47, i64 1) - %196 = bitcast i8* %195 to i2* - %197 = load i2, i2* %196 - %198 = load i2, i2* @PauliZ - %199 = icmp eq i2 %197, %198 - %200 = and i1 %194, %199 - br i1 %200, label %then6__1, label %continue__8 + %194 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis__inline__47, i64 0) + %195 = bitcast i8* %194 to i2* + %196 = load i2, i2* %195 + %197 = load i2, i2* @PauliZ + %198 = icmp eq i2 %196, %197 + %199 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis__inline__47, i64 1) + %200 = bitcast i8* %199 to i2* + %201 = load i2, i2* %200 + %202 = load i2, i2* @PauliZ + %203 = icmp eq i2 %201, %202 + %204 = and i1 %198, %203 + br i1 %204, label %then6__1, label %continue__8 then6__1: ; preds = %test6__1 - %201 = call %Result* @__quantum__trc__multi_qubit_measure(i64 111, i64 1, %Array* %qs12) - store %Result* %201, %Result** %res__inline__47 - call void @__quantum__rt__result_unreference(%Result* %201) + %205 = call %Result* @__quantum__trc__multi_qubit_measure(i64 111, i64 1, %Array* %qs12) + call void @__quantum__rt__result_update_reference_count(%Result* %205, i64 1) + %206 = load %Result*, %Result** %res__inline__47 + store %Result* %205, %Result** %res__inline__47 + call void @__quantum__rt__result_update_reference_count(%Result* %205, i64 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %206, i64 -1) br label %continue__8 continue__8: ; preds = %then6__1, %test6__1, %then5__1, %then4__1, %then3__1, %continue__9, %then1__1, %then0__8 %r12 = load %Result*, %Result** %res__inline__47 - call void @__quantum__rt__result_reference(%Result* %r12) - call void @__quantum__rt__array_remove_access(%Array* %paulis__inline__47) - call void @__quantum__rt__array_remove_access(%Array* %qs12) - call void @__quantum__rt__array_unreference(%Array* %paulis__inline__47) + call void @__quantum__rt__array_update_alias_count(%Array* %paulis__inline__47, i64 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qs12, i64 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %paulis__inline__47, i64 -1) call void @__quantum__rt__qubit_release_array(%Array* %qs) - call void @__quantum__rt__array_remove_access(%Array* %qs) - call void @__quantum__rt__array_remove_access(%Array* %qs12) - call void @__quantum__rt__array_unreference(%Array* %qs) - call void @__quantum__rt__result_unreference(%Result* %r0) - call void @__quantum__rt__array_unreference(%Array* %qs12) - call void @__quantum__rt__result_unreference(%Result* %r12) + call void @__quantum__rt__array_update_alias_count(%Array* %qs, i64 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qs12, i64 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qs, i64 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %r0, i64 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qs12, i64 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %r12, i64 -1) ret i1 true } diff --git a/src/QirRuntime/test/unittests/TracerTests.cpp b/src/QirRuntime/test/unittests/TracerTests.cpp index 3ec12382cee..bcbdb15d06c 100644 --- a/src/QirRuntime/test/unittests/TracerTests.cpp +++ b/src/QirRuntime/test/unittests/TracerTests.cpp @@ -1,6 +1,10 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. +#include +#include +#include + #include "catch.hpp" #include "CoreTypes.hpp" @@ -11,8 +15,7 @@ using namespace Microsoft::Quantum; TEST_CASE("Layering distinct single-qubit operations of non-zero durations", "[tracer]") { - shared_ptr tr = CreateTracer(); - tr->SetPreferredLayerDuration(3); + shared_ptr tr = CreateTracer(3 /*layer duration*/); Qubit q1 = tr->AllocateQubit(); Qubit q2 = tr->AllocateQubit(); @@ -42,8 +45,7 @@ TEST_CASE("Layering distinct single-qubit operations of non-zero durations", "[t TEST_CASE("Layering single-qubit operations of zero duration", "[tracer]") { - shared_ptr tr = CreateTracer(); - tr->SetPreferredLayerDuration(3); + shared_ptr tr = CreateTracer(3 /*layer duration*/); Qubit q1 = tr->AllocateQubit(); Qubit q2 = tr->AllocateQubit(); @@ -63,8 +65,7 @@ TEST_CASE("Layering single-qubit operations of zero duration", "[tracer]") TEST_CASE("Layering distinct controlled single-qubit operations", "[tracer]") { - shared_ptr tr = CreateTracer(); - tr->SetPreferredLayerDuration(3); + shared_ptr tr = CreateTracer(3 /*layer duration*/); Qubit q1 = tr->AllocateQubit(); Qubit q2 = tr->AllocateQubit(); @@ -120,8 +121,7 @@ TEST_CASE("Layering distinct controlled single-qubit operations", "[tracer]") // TODO: add multi-qubit ops TEST_CASE("Operations with same id are counted together", "[tracer]") { - shared_ptr tr = CreateTracer(); - tr->SetPreferredLayerDuration(3); + shared_ptr tr = CreateTracer(3 /*layer duration*/); Qubit q1 = tr->AllocateQubit(); Qubit q2 = tr->AllocateQubit(); @@ -146,8 +146,7 @@ TEST_CASE("Operations with same id are counted together", "[tracer]") TEST_CASE("Global barrier", "[tracer]") { - shared_ptr tr = CreateTracer(); - tr->SetPreferredLayerDuration(2); + shared_ptr tr = CreateTracer(2 /*layer duration*/); Qubit q1 = tr->AllocateQubit(); Qubit q2 = tr->AllocateQubit(); @@ -199,8 +198,7 @@ TEST_CASE("Global barrier", "[tracer]") // For layering purposes, measurements behave pretty much the same as other operations TEST_CASE("Layering measurements", "[tracer]") { - shared_ptr tr = CreateTracer(); - tr->SetPreferredLayerDuration(1); + shared_ptr tr = CreateTracer(1 /*layer duration*/); Qubit q1 = tr->AllocateQubit(); Qubit q2 = tr->AllocateQubit(); @@ -221,4 +219,87 @@ TEST_CASE("Layering measurements", "[tracer]") CHECK(layers[0].operations.size() == 3); CHECK(layers[1].operations.size() == 2); CHECK(layers[2].operations.size() == 1); +} + +TEST_CASE("Output: to string", "[tracer]") +{ + std::unordered_map opNames = {{1, "X"}, {2, "Y"}, {3, "Z"}, {4, "b"}}; + shared_ptr tr = CreateTracer(1 /*layer duration*/, opNames); + + Qubit q1 = tr->AllocateQubit(); + tr->TraceSingleQubitOp(3, 1, q1); + tr->TraceSingleQubitOp(5, 1, q1); + tr->InjectGlobalBarrier(4, 2); + tr->TraceSingleQubitOp(3, 4, q1); + tr->TraceSingleQubitOp(2, 1, q1); + + { + std::stringstream out; + tr->PrintLayerMetrics(out, ",", true /*printZeroMetrics*/); + std::string metrics = out.str(); + + std::stringstream expected; + expected << "layer_id,name,Y,Z,5" << std::endl; + expected << "0,,0,1,0" << std::endl; + expected << "1,,0,0,1" << std::endl; + expected << "2,b,0,0,0" << std::endl; + expected << "4,,0,1,0" << std::endl; + expected << "8,,1,0,0" << std::endl; + + INFO(metrics); + CHECK(metrics == expected.str()); + } + + { + std::stringstream out; + tr->PrintLayerMetrics(out, ",", false /*printZeroMetrics*/); + std::string metrics = out.str(); + + std::stringstream expected; + expected << "layer_id,name,Y,Z,5" << std::endl; + expected << "0,,,1," << std::endl; + expected << "1,,,,1" << std::endl; + expected << "2,b,,," << std::endl; + expected << "4,,,1," << std::endl; + expected << "8,,1,," << std::endl; + + INFO(metrics); + CHECK(metrics == expected.str()); + } +} + +TEST_CASE("Output: to file", "[tracer]") +{ + std::unordered_map opNames = {{1, "X"}, {2, "Y"}, {3, "Z"}, {4, "b"}}; + shared_ptr tr = CreateTracer(1 /*layer duration*/, opNames); + + Qubit q1 = tr->AllocateQubit(); + tr->TraceSingleQubitOp(3, 1, q1); + tr->TraceSingleQubitOp(5, 1, q1); + tr->InjectGlobalBarrier(4, 2); + tr->TraceSingleQubitOp(3, 4, q1); + tr->TraceSingleQubitOp(2, 1, q1); + + const std::string fileName = "tracer-test.txt"; + std::ofstream out; + out.open(fileName); + tr->PrintLayerMetrics(out, "\t", false /*printZeroMetrics*/); + out.close(); + + std::ifstream in(fileName); + string line; + REQUIRE(in.is_open()); + std::string metrics(std::istreambuf_iterator{in}, {}); + in.close(); + + std::stringstream expected; + expected << "layer_id\tname\tY\tZ\t5" << std::endl; + expected << "0\t\t\t1\t" << std::endl; + expected << "1\t\t\t\t1" << std::endl; + expected << "2\tb\t\t\t" << std::endl; + expected << "4\t\t\t1\t" << std::endl; + expected << "8\t\t1\t\t" << std::endl; + + INFO(metrics); + CHECK(metrics == expected.str()); } \ No newline at end of file From 75348376a027980858991ba262ebc25d921968b4 Mon Sep 17 00:00:00 2001 From: Irina Yatsenko Date: Fri, 5 Feb 2021 17:09:57 -0800 Subject: [PATCH 17/27] Fixed bug in the tracer's tests --- src/QirRuntime/lib/Tracer/tracer.hpp | 9 ++++++--- src/QirRuntime/test/QIR-tracer/qir-tracer-driver.cpp | 2 +- src/QirRuntime/test/unittests/TracerTests.cpp | 8 ++++---- 3 files changed, 11 insertions(+), 8 deletions(-) diff --git a/src/QirRuntime/lib/Tracer/tracer.hpp b/src/QirRuntime/lib/Tracer/tracer.hpp index c060229a9f1..25cea950869 100644 --- a/src/QirRuntime/lib/Tracer/tracer.hpp +++ b/src/QirRuntime/lib/Tracer/tracer.hpp @@ -60,6 +60,9 @@ namespace Quantum Time lastUsedTime = 0; std::vector pendingZeroOps; + + // For now assume that only one kind of frame can be tracked. + bool isFrameOpen = false; }; /*================================================================================================================== @@ -67,10 +70,10 @@ namespace Quantum ==================================================================================================================*/ class CTracer : public ISimulator { - // Start with no reuse of qubits. + // For now the tracer assumes no reuse of qubits. std::vector qubits; - // The preferred duration of a layer. + // The preferred duration of a layer. An operation with longer duration will make the containing layer longer. const int preferredLayerDuration = 0; // The index into the vector is treated as implicit id of the layer. @@ -84,7 +87,7 @@ namespace Quantum // operation ids. std::unordered_map opNames; - // Operations we've seen so far (to be able to trim output to include only these) + // Operations we've seen so far (to be able to trim output to include only those that were encounted). std::unordered_set seenOps; private: diff --git a/src/QirRuntime/test/QIR-tracer/qir-tracer-driver.cpp b/src/QirRuntime/test/QIR-tracer/qir-tracer-driver.cpp index 4a0f358d23a..084b4fd9f69 100644 --- a/src/QirRuntime/test/QIR-tracer/qir-tracer-driver.cpp +++ b/src/QirRuntime/test/QIR-tracer/qir-tracer-driver.cpp @@ -17,7 +17,7 @@ using namespace Microsoft::Quantum; namespace TracerUser { -TEST_CASE("Test that we are building the new components correctly", "[qir-tracer]") +TEST_CASE("Invoke each operator from Q# core once", "[qir-tracer]") { shared_ptr tr = CreateTracer(1 /*layer duration*/, g_operationNames); QirContextScope qirctx(tr.get(), false /*trackAllocatedObjects*/); diff --git a/src/QirRuntime/test/unittests/TracerTests.cpp b/src/QirRuntime/test/unittests/TracerTests.cpp index bcbdb15d06c..eb0eeb77fa0 100644 --- a/src/QirRuntime/test/unittests/TracerTests.cpp +++ b/src/QirRuntime/test/unittests/TracerTests.cpp @@ -95,13 +95,13 @@ TEST_CASE("Layering distinct controlled single-qubit operations", "[tracer]") // should fall through to the first layer Qubit qs46[2] = {q4, q6}; - CHECK(1 == tr->TraceMultiQubitOp(10, 1, 3 /*nFirst*/, &q3 /*first*/, 1 /*nSecond*/, &q5 /*second*/)); - // because of q4, should be added into the second layer + CHECK(1 == tr->TraceMultiQubitOp(10, 1, 2 /*nFirst*/, qs46 /*first*/, 1 /*nSecond*/, &q5 /*second*/)); + // because of the controls, should be added into the second layer const vector& layers = tr->UseLayers(); REQUIRE(layers.size() == 2); - REQUIRE(layers[0].operations.size() == 5); + CHECK(layers[0].operations.size() == 5); const auto& ops0 = layers[0].operations; CHECK(ops0.find(1) != ops0.end()); CHECK(ops0.find(2) != ops0.end()); @@ -302,4 +302,4 @@ TEST_CASE("Output: to file", "[tracer]") INFO(metrics); CHECK(metrics == expected.str()); -} \ No newline at end of file +} From 320a0e14d37703accae483e32cdcba30f9b80432 Mon Sep 17 00:00:00 2001 From: Irina Yatsenko Date: Tue, 9 Feb 2021 10:11:15 -0800 Subject: [PATCH 18/27] trc->qis, test that measure comparison fails right now --- src/QirRuntime/CMakeLists.txt | 4 +- src/QirRuntime/lib/Tracer/CMakeLists.txt | 6 +- src/QirRuntime/lib/Tracer/README.md | 34 +- .../{bridge-trc.ll => tracer-bridge.ll} | 42 +- .../Tracer/{tracer-trc.cpp => tracer-qis.cpp} | 20 +- src/QirRuntime/test/QIR-tracer/generate.py | 4 +- .../test/QIR-tracer/qir-tracer-driver.cpp | 16 +- .../test/QIR-tracer/tracer-config.hpp | 3 +- .../test/QIR-tracer/tracer-intrinsics.qs | 26 +- .../test/QIR-tracer/tracer-measurements.qs | 35 + src/QirRuntime/test/QIR-tracer/tracer-qir.ll | 1313 ++++++++++------- .../test/QIR-tracer/tracer-target.qs | 14 +- 12 files changed, 860 insertions(+), 657 deletions(-) rename src/QirRuntime/lib/Tracer/{bridge-trc.ll => tracer-bridge.ll} (55%) rename src/QirRuntime/lib/Tracer/{tracer-trc.cpp => tracer-qis.cpp} (66%) create mode 100644 src/QirRuntime/test/QIR-tracer/tracer-measurements.qs diff --git a/src/QirRuntime/CMakeLists.txt b/src/QirRuntime/CMakeLists.txt index d109665c0db..63221cd4d5b 100644 --- a/src/QirRuntime/CMakeLists.txt +++ b/src/QirRuntime/CMakeLists.txt @@ -114,11 +114,11 @@ endmacro(compile_from_qir) if (WIN32) set(QIR_BRIDGE_UTILITY_LIB "${PROJECT_BINARY_DIR}/lib/QIR/bridge-rt-u.lib") set(QIR_BRIDGE_QIS_UTILITY_LIB "${PROJECT_BINARY_DIR}/lib/QIR/bridge-qis-u.lib") - set(QIR_BRIDGE_TRACER_UTILITY_LIB "${PROJECT_BINARY_DIR}/lib/Tracer/bridge-trc-u.lib") + set(QIR_BRIDGE_TRACER_UTILITY_LIB "${PROJECT_BINARY_DIR}/lib/Tracer/tracer-bridge-u.lib") else() set(QIR_BRIDGE_UTILITY_LIB "${PROJECT_BINARY_DIR}/lib/QIR/libbridge-rt-u.a") set(QIR_BRIDGE_QIS_UTILITY_LIB "${PROJECT_BINARY_DIR}/lib/QIR/libbridge-qis-u.a") - set(QIR_BRIDGE_TRACER_UTILITY_LIB "${PROJECT_BINARY_DIR}/lib/Tracer/libbridge-trc-u.a") + set(QIR_BRIDGE_TRACER_UTILITY_LIB "${PROJECT_BINARY_DIR}/lib/Tracer/libtracer-bridge-u.a") endif() add_subdirectory(lib) diff --git a/src/QirRuntime/lib/Tracer/CMakeLists.txt b/src/QirRuntime/lib/Tracer/CMakeLists.txt index d79db1802ec..78b5adf145b 100644 --- a/src/QirRuntime/lib/Tracer/CMakeLists.txt +++ b/src/QirRuntime/lib/Tracer/CMakeLists.txt @@ -1,11 +1,11 @@ # build the utility lib for tracer's bridge -compile_from_qir(bridge-trc bridge_trc) +compile_from_qir(tracer-bridge tracer-bridge) # build the native part of the tracer set(component_name "tracer") set(source_files - "tracer-trc.cpp" + "tracer-qis.cpp" "tracer.cpp" ) @@ -17,4 +17,4 @@ set(includes add_library(${component_name} STATIC ${source_files}) target_include_directories(${component_name} PUBLIC ${includes}) -add_dependencies(${component_name} bridge_trc) +add_dependencies(${component_name} tracer-bridge) diff --git a/src/QirRuntime/lib/Tracer/README.md b/src/QirRuntime/lib/Tracer/README.md index 50847e74174..d360299243b 100644 --- a/src/QirRuntime/lib/Tracer/README.md +++ b/src/QirRuntime/lib/Tracer/README.md @@ -6,7 +6,7 @@ The purpose of the Resource Tracer is to provide efficient and flexible way to e In addition to the standard QIR runtime functions, the quantum program will have to: -1. convert _all_ used intrinsic operations into one of the _trc_ operations supported by the tracer (see the list below); +1. convert _all_ used intrinsic operations into one of the _qis_ operations supported by the tracer (see the list below); 1. (_optional_) provide callbacks for handling of conditional branches on a measurement (if not provided, the estimates would cover only one branch of the execution); 1. (_optional_) provide callbacks for start/end of quantum operations (if not provided, all operations will be treated @@ -20,29 +20,29 @@ The last provisions The Resource Tracer will consist of: -1. the bridge for the `__quantum__trc__*` extension methods; +1. the bridge for the `__quantum__qis__*` extension methods; 2. the native implementation to back the extensions; 3. the logic for partitioning gates into layers; 4. the logic for frame tracking; 5. output of the collected statistics; 6. (_lower priority_) the scheduling component to optimize depth and/or width of the circuit. -## List of `__quantum__trc__*` methods ## +## List of `__quantum__qis__*` methods ## ___WIP___ | Signature | Description | | :---------------------------------------------------- | :----------------------------------------------------------- | -| `void __quantum__trc__inject_global_barrier(i32 %id, i32 %duration)` | Function to insert a global barrier. It will be inserted into QIR based on a user defined intrinsic. See [Layering](#layering) section for details. | -| `void __quantum__trc__on_operation_start(i64 %id)` | Function to identify the start of a quantum module. The argument is a unique _id_ of the module. The tracer will have an option to treat module boundaries as barriers between layers and (_lower priority_) and option to cache estimates for a module, executed multiple times. The call to the function will be inserted into QIR by the Q# compiler when Tracer is specified as the compilation target. | -| `void __quantum__trc__on_operation_end(i64 %id)` | Function to identify the end of a quantum module. The argument is a unique _id_ of the module and must match the _id_ supplied on start of the module. The call to the function will be inserted into QIR by the Q# compiler when Tracer is specified as the compilation target. | -| `void __quantum__trc__single_qubit_op(i32 %id, i32 %duration, %Qubit* %q)` | Function for counting operations that involve a single qubit. The first argument is the id of the operation, as assigned by the client. Multiple intrinsics can be assigned the same id, in which case they will be counted together. The second argument is duration to be assigned to the particular invocation of the operation. | -| `void __quantum__trc__multi_qubit_op(i32 %id, i32 %duration, %Array* %qs)` | Function for counting operations that involve multiple qubits.| -| `void __quantum__trc__single_qubit_op__ctl(i32 %id, i32 %duration, %Array* %ctls, %Qubit* %q)` | Function for counting controlled operations with single target qubit. | -| `void __quantum__trc__multi_qubit_op__ctl(i32 %id, i32 %duration, %Array* %ctls, %Array* %qs)` | Function for counting controlled operations with multiple target qubits. | -| `%Result* @__quantum__trc__single_qubit_measure(i32 %id, i32 %duration, %Qubit* %q)` | Function for counting measurements of a single qubit. The user might assign different operation ids for different measurement bases. | -| `%Result* @__quantum__trc__multi_qubit_measure(i32 %id, i32 %duration, %Array* %qs)` | Function for counting joint-measurements of qubits. The user might assign different operation ids for different measurement bases. | -| `void __quantum__trc__swap(%Qubit* %q1, %Qubit* %q2)` | See [Special handling of SWAP](#special-handling-of-swap) for details. | +| `void __quantum__qis__inject_global_barrier(i32 %id, i32 %duration)` | Function to insert a global barrier. It will be inserted into QIR based on a user defined intrinsic. See [Layering](#layering) section for details. | +| `void __quantum__qis__on_operation_start(i64 %id)` | Function to identify the start of a quantum module. The argument is a unique _id_ of the module. The tracer will have an option to treat module boundaries as barriers between layers and (_lower priority_) and option to cache estimates for a module, executed multiple times. The call to the function will be inserted into QIR by the Q# compiler when Tracer is specified as the compilation target. | +| `void __quantum__qis__on_operation_end(i64 %id)` | Function to identify the end of a quantum module. The argument is a unique _id_ of the module and must match the _id_ supplied on start of the module. The call to the function will be inserted into QIR by the Q# compiler when Tracer is specified as the compilation target. | +| `void __quantum__qis__single_qubit_op(i32 %id, i32 %duration, %Qubit* %q)` | Function for counting operations that involve a single qubit. The first argument is the id of the operation, as assigned by the client. Multiple intrinsics can be assigned the same id, in which case they will be counted together. The second argument is duration to be assigned to the particular invocation of the operation. | +| `void __quantum__qis__multi_qubit_op(i32 %id, i32 %duration, %Array* %qs)` | Function for counting operations that involve multiple qubits.| +| `void __quantum__qis__single_qubit_op__ctl(i32 %id, i32 %duration, %Array* %ctls, %Qubit* %q)` | Function for counting controlled operations with single target qubit. | +| `void __quantum__qis__multi_qubit_op__ctl(i32 %id, i32 %duration, %Array* %ctls, %Array* %qs)` | Function for counting controlled operations with multiple target qubits. | +| `%Result* @__quantum__qis__single_qubit_measure(i32 %id, i32 %duration, %Qubit* %q)` | Function for counting measurements of a single qubit. The user might assign different operation ids for different measurement bases. | +| `%Result* @__quantum__qis__joint_measure(i32 %id, i32 %duration, %Array* %qs)` | Function for counting joint-measurements of qubits. The user might assign different operation ids for different measurement bases. | +| `void __quantum__qis__swap(%Qubit* %q1, %Qubit* %q2)` | See [Special handling of SWAP](#special-handling-of-swap) for details. | _Note on operation ids_: The client is responsible for using operation ids in a consistent manner. Operations with the same id will be counted by the tracer as the _same_ operation, even accross invocations with different number of target @@ -55,7 +55,7 @@ The Resource Tracer will reuse qir-rt library as much as possible while extendin __Conditionals on measurements__: The Resource Tracer will execute LLVM IR's branching structures "as is", depending on the values of the corresponding variables at runtime. To enable estimation of branches that depend on a measurement result, the source Q# program will have to be authored in such a way that the Q# compiler will translate the - conditionals into `__quantum__trc__apply_if*` calls. The tracer will add operations from _both branches_ into the + conditionals into `__quantum__qis__apply_if*` calls. The tracer will add operations from _both branches_ into the layers it creates to compute the upper bound estimate. Nested conditionals, conditional measurements and conditional tracked operations will _not_ be supported. @@ -92,7 +92,7 @@ _Definition_: A ___global barrier___ is any operation that acts on _all_ current for the clients to inject global barriers equivalent to an identity operator. This will allow the clients to enforce a particular layering structure (because no later operation can sink below the barrier). -The global barriers will be implemented as calls to `__quantum__trc__global_barrier` function. No additional support from +The global barriers will be implemented as calls to `__quantum__qis__global_barrier` function. No additional support from the compiler should be needed, as the user can define their own intrinsic to represent the barrier and map it to the above runtime function via targets.qs file. The user can choose duration of a barrier which would affect start time of the following layers but no operations will be added to a barrier, independent of its width. @@ -168,8 +168,10 @@ The tracer will have options to output the estimates into command line or into a - Zero counts for any of the statistics _might_ be replaced with empty string. - The global barrier layer lists the name and no statistics. -__TBD__: specify the header that maps operation ids to gate names +The map of operation ids to names can be passed to the tracer's constructor as `std::unordered_map`. + The mapping can be partial, ids will be used in the ouput for unnamed operations. ## Depth vs width optimizations ## TBD but lower priority. + diff --git a/src/QirRuntime/lib/Tracer/bridge-trc.ll b/src/QirRuntime/lib/Tracer/tracer-bridge.ll similarity index 55% rename from src/QirRuntime/lib/Tracer/bridge-trc.ll rename to src/QirRuntime/lib/Tracer/tracer-bridge.ll index 43a40dbe934..6754646d82d 100644 --- a/src/QirRuntime/lib/Tracer/bridge-trc.ll +++ b/src/QirRuntime/lib/Tracer/tracer-bridge.ll @@ -21,65 +21,65 @@ ; declarations of the native methods this bridge delegates to ; -declare void @quantum__trc__single_qubit_op(i32 %id, i32 %duration, %class.QUBIT*) -declare void @quantum__trc__single_qubit_op_ctl(i32 %id, i32 %duration, %struct.QirArray*, %class.QUBIT*) -declare void @quantum__trc__multi_qubit_op(i32 %id, i32 %duration, %struct.QirArray*) -declare void @quantum__trc__multi_qubit_op_ctl(i32 %id, i32 %duration, %struct.QirArray*, %struct.QirArray*) -declare void @quantum__trc__inject_global_barrier(i32 %id, i32 %duration) -declare %class.RESULT* @quantum__trc__single_qubit_measure(i32 %id, i32 %duration, %class.QUBIT*) -declare %class.RESULT* @quantum__trc__multi_qubit_measure(i32 %id, i32 %duration, %struct.QirArray*) +declare void @quantum__qis__single_qubit_op(i32 %id, i32 %duration, %class.QUBIT*) +declare void @quantum__qis__single_qubit_op_ctl(i32 %id, i32 %duration, %struct.QirArray*, %class.QUBIT*) +declare void @quantum__qis__multi_qubit_op(i32 %id, i32 %duration, %struct.QirArray*) +declare void @quantum__qis__multi_qubit_op_ctl(i32 %id, i32 %duration, %struct.QirArray*, %struct.QirArray*) +declare void @quantum__qis__inject_global_barrier(i32 %id, i32 %duration) +declare %class.RESULT* @quantum__qis__single_qubit_measure(i32 %id, i32 %duration, %class.QUBIT*) +declare %class.RESULT* @quantum__qis__joint_measure(i32 %id, i32 %duration, %struct.QirArray*) ;=============================================================================== ; quantum__trc namespace implementations ; -define void @__quantum__trc__single_qubit_op(i32 %id, i32 %duration, %Qubit* %.q) +define void @__quantum__qis__single_qubit_op(i32 %id, i32 %duration, %Qubit* %.q) { %q = bitcast %Qubit* %.q to %class.QUBIT* - call void @quantum__trc__single_qubit_op(i32 %id, i32 %duration, %class.QUBIT* %q) + call void @quantum__qis__single_qubit_op(i32 %id, i32 %duration, %class.QUBIT* %q) ret void } -define void @__quantum__trc__single_qubit_op_ctl(i32 %id, i32 %duration, %Array* %.ctls, %Qubit* %.q) +define void @__quantum__qis__single_qubit_op_ctl(i32 %id, i32 %duration, %Array* %.ctls, %Qubit* %.q) { %q = bitcast %Qubit* %.q to %class.QUBIT* %ctls = bitcast %Array* %.ctls to %struct.QirArray* - call void @quantum__trc__single_qubit_op_ctl(i32 %id, i32 %duration, %struct.QirArray* %ctls, %class.QUBIT* %q) + call void @quantum__qis__single_qubit_op_ctl(i32 %id, i32 %duration, %struct.QirArray* %ctls, %class.QUBIT* %q) ret void } -define void @__quantum__trc__multi_qubit_op(i32 %id, i32 %duration, %Array* %.qs) +define void @__quantum__qis__multi_qubit_op(i32 %id, i32 %duration, %Array* %.qs) { %qs = bitcast %Array* %.qs to %struct.QirArray* - call void @quantum__trc__multi_qubit_op(i32 %id, i32 %duration, %struct.QirArray* %qs) + call void @quantum__qis__multi_qubit_op(i32 %id, i32 %duration, %struct.QirArray* %qs) ret void } -define void @__quantum__trc__multi_qubit_op_ctl(i32 %id, i32 %duration, %Array* %.ctls, %Array* %.qs) +define void @__quantum__qis__multi_qubit_op_ctl(i32 %id, i32 %duration, %Array* %.ctls, %Array* %.qs) { %ctls = bitcast %Array* %.ctls to %struct.QirArray* %qs = bitcast %Array* %.qs to %struct.QirArray* - call void @quantum__trc__multi_qubit_op_ctl(i32 %id, i32 %duration, %struct.QirArray* %ctls, %struct.QirArray* %qs) + call void @quantum__qis__multi_qubit_op_ctl(i32 %id, i32 %duration, %struct.QirArray* %ctls, %struct.QirArray* %qs) ret void } -define void @__quantum__trc__inject_global_barrier(i32 %id, i32 %duration) +define void @__quantum__qis__inject_global_barrier(i32 %id, i32 %duration) { - call void @quantum__trc__inject_global_barrier(i32 %id, i32 %duration) + call void @quantum__qis__inject_global_barrier(i32 %id, i32 %duration) ret void } -define %Result* @__quantum__trc__single_qubit_measure(i32 %id, i32 %duration, %Qubit* %.q) +define %Result* @__quantum__qis__single_qubit_measure(i32 %id, i32 %duration, %Qubit* %.q) { %q = bitcast %Qubit* %.q to %class.QUBIT* - %r = call %class.RESULT* @quantum__trc__single_qubit_measure(i32 %id, i32 %duration, %class.QUBIT* %q) + %r = call %class.RESULT* @quantum__qis__single_qubit_measure(i32 %id, i32 %duration, %class.QUBIT* %q) %.r = bitcast %class.RESULT* %r to %Result* ret %Result* %.r } -define %Result* @__quantum__trc__multi_qubit_measure(i32 %id, i32 %duration, %Array* %.qs) +define %Result* @__quantum__qis__joint_measure(i32 %id, i32 %duration, %Array* %.qs) { %qs = bitcast %Array* %.qs to %struct.QirArray* - %r = call %class.RESULT* @quantum__trc__multi_qubit_measure(i32 %id, i32 %duration, %struct.QirArray* %qs) + %r = call %class.RESULT* @quantum__qis__joint_measure(i32 %id, i32 %duration, %struct.QirArray* %qs) %.r = bitcast %class.RESULT* %r to %Result* ret %Result* %.r } \ No newline at end of file diff --git a/src/QirRuntime/lib/Tracer/tracer-trc.cpp b/src/QirRuntime/lib/Tracer/tracer-qis.cpp similarity index 66% rename from src/QirRuntime/lib/Tracer/tracer-trc.cpp rename to src/QirRuntime/lib/Tracer/tracer-qis.cpp index fb1c9bdf03a..47379deaa9a 100644 --- a/src/QirRuntime/lib/Tracer/tracer-trc.cpp +++ b/src/QirRuntime/lib/Tracer/tracer-qis.cpp @@ -18,48 +18,48 @@ namespace Quantum using namespace Microsoft::Quantum; extern "C" { - void quantum__trc__on_operation_start(int64_t id) // NOLINT + void quantum__qis__on_operation_start(int64_t id) // NOLINT { } - void quantum__trc__on_operation_end(int64_t id) // NOLINT + void quantum__qis__on_operation_end(int64_t id) // NOLINT { } - void quantum__trc__swap(Qubit q1, Qubit q2) // NOLINT + void quantum__qis__swap(Qubit q1, Qubit q2) // NOLINT { } - void quantum__trc__single_qubit_op(int32_t id, int32_t duration, Qubit target) // NOLINT + void quantum__qis__single_qubit_op(int32_t id, int32_t duration, Qubit target) // NOLINT { (void)tracer->TraceSingleQubitOp(id, duration, target); } - void quantum__trc__single_qubit_op_ctl(int32_t id, int32_t duration, QirArray* ctls, Qubit target) // NOLINT + void quantum__qis__single_qubit_op_ctl(int32_t id, int32_t duration, QirArray* ctls, Qubit target) // NOLINT { (void)tracer->TraceMultiQubitOp(id, duration, ctls->count, reinterpret_cast(ctls->buffer), 1, &target); } - void quantum__trc__multi_qubit_op(int32_t id, int32_t duration, QirArray* targets) // NOLINT + void quantum__qis__multi_qubit_op(int32_t id, int32_t duration, QirArray* targets) // NOLINT { (void)tracer->TraceMultiQubitOp( id, duration, 0, nullptr, targets->count, reinterpret_cast(targets->buffer)); } - void quantum__trc__multi_qubit_op_ctl(int32_t id, int32_t duration, QirArray* ctls, QirArray* targets) // NOLINT + void quantum__qis__multi_qubit_op_ctl(int32_t id, int32_t duration, QirArray* ctls, QirArray* targets) // NOLINT { (void)tracer->TraceMultiQubitOp( id, duration, ctls->count, reinterpret_cast(ctls->buffer), targets->count, reinterpret_cast(targets->buffer)); } - void quantum__trc__inject_global_barrier(int32_t id, int32_t duration) // NOLINT + void quantum__qis__inject_global_barrier(int32_t id, int32_t duration) // NOLINT { (void)tracer->InjectGlobalBarrier(id, duration); } - RESULT* quantum__trc__single_qubit_measure(int32_t id, int32_t duration, QUBIT* q) // NOLINT + RESULT* quantum__qis__single_qubit_measure(int32_t id, int32_t duration, QUBIT* q) // NOLINT { return tracer->TraceSingleQubitMeasurement(id, duration, q); } - RESULT* quantum__trc__multi_qubit_measure(int32_t id, int32_t duration, QirArray* qs) // NOLINT + RESULT* quantum__qis__joint_measure(int32_t id, int32_t duration, QirArray* qs) // NOLINT { return tracer->TraceMultiQubitMeasurement(id, duration, qs->count, reinterpret_cast(qs->buffer)); } diff --git a/src/QirRuntime/test/QIR-tracer/generate.py b/src/QirRuntime/test/QIR-tracer/generate.py index bf5a716b017..6f109cbfca9 100644 --- a/src/QirRuntime/test/QIR-tracer/generate.py +++ b/src/QirRuntime/test/QIR-tracer/generate.py @@ -31,7 +31,9 @@ def log(message): if ext == ".qs": files_to_process = files_to_process + " " + file -command = (qsc + " build --qir s --build-exe --input " + files_to_process + " --proj " + output_file) +# Compile as a lib so all functions are retained and don't have to workaround the current limitations of +# @EntryPoint attribute. +command = (qsc + " build --qir s --input " + files_to_process + " --proj " + output_file) log("Executing: " + command) subprocess.run(command, shell = True) diff --git a/src/QirRuntime/test/QIR-tracer/qir-tracer-driver.cpp b/src/QirRuntime/test/QIR-tracer/qir-tracer-driver.cpp index 084b4fd9f69..402ec01aa5b 100644 --- a/src/QirRuntime/test/QIR-tracer/qir-tracer-driver.cpp +++ b/src/QirRuntime/test/QIR-tracer/qir-tracer-driver.cpp @@ -22,12 +22,12 @@ TEST_CASE("Invoke each operator from Q# core once", "[qir-tracer]") shared_ptr tr = CreateTracer(1 /*layer duration*/, g_operationNames); QirContextScope qirctx(tr.get(), false /*trackAllocatedObjects*/); - REQUIRE(Microsoft__Quantum__Testing__Tracer__AllIntrinsics__body()); + REQUIRE_NOTHROW(Microsoft__Quantum__Testing__Tracer__TestCoreIntrinsics__body()); vector layers = tr->UseLayers(); - // AllIntrinsics happens to produce 25 layers right now and we are not checking whether that's expected -- as + // TestCoreIntrinsics happens to produce 24 layers right now and we are not checking whether that's expected -- as // testing of layering logic is better done by unit tests. - CHECK(layers.size() == 25); + CHECK(layers.size() == 24); std::ofstream out; out.open("qir-tracer-test.txt"); @@ -35,4 +35,14 @@ TEST_CASE("Invoke each operator from Q# core once", "[qir-tracer]") out.close(); } +TEST_CASE("Measurements can be counted but cannot be compared", "[qir-tracer]") +{ + shared_ptr tr = CreateTracer(1 /*layer duration*/, g_operationNames); + QirContextScope qirctx(tr.get(), false /*trackAllocatedObjects*/); + + REQUIRE_NOTHROW(Microsoft__Quantum__Testing__Tracer__TestMeasurements__body(false /*compare*/)); + CHECK(tr->UseLayers().size() == 1); + + REQUIRE_THROWS(Microsoft__Quantum__Testing__Tracer__TestMeasurements__body(true /*compare*/)); +} } \ No newline at end of file diff --git a/src/QirRuntime/test/QIR-tracer/tracer-config.hpp b/src/QirRuntime/test/QIR-tracer/tracer-config.hpp index 66b9a747ae3..0587d876416 100644 --- a/src/QirRuntime/test/QIR-tracer/tracer-config.hpp +++ b/src/QirRuntime/test/QIR-tracer/tracer-config.hpp @@ -26,4 +26,5 @@ extern const std::unordered_map g_operati } // namespace TracerUser // Available function in generated QIR -extern "C" bool Microsoft__Quantum__Testing__Tracer__AllIntrinsics__body(); // NOLINT +extern "C" void Microsoft__Quantum__Testing__Tracer__TestCoreIntrinsics__body(); // NOLINT +extern "C" void Microsoft__Quantum__Testing__Tracer__TestMeasurements__body(bool compare); // NOLINT diff --git a/src/QirRuntime/test/QIR-tracer/tracer-intrinsics.qs b/src/QirRuntime/test/QIR-tracer/tracer-intrinsics.qs index 9764f154745..d4a92b4af57 100644 --- a/src/QirRuntime/test/QIR-tracer/tracer-intrinsics.qs +++ b/src/QirRuntime/test/QIR-tracer/tracer-intrinsics.qs @@ -5,19 +5,9 @@ namespace Microsoft.Quantum.Testing.Tracer { open Microsoft.Quantum.Intrinsic; - operation Fixup(qs : Qubit[]) : Unit + operation TestCoreIntrinsics() : Unit { - for (i in 0..Length(qs)-1) - { - X(qs[i]); - } - } - - @EntryPoint() - operation AllIntrinsics() : Bool - { - mutable res = false; - using (qs = Qubit[3]) + use qs = Qubit[3] { X(qs[0]); Y(qs[0]); @@ -45,7 +35,7 @@ namespace Microsoft.Quantum.Testing.Tracer Adjoint S(qs[1]); Adjoint T(qs[2]); - using (c = Qubit()) + use c = Qubit() { Controlled X([c], (qs[0])); Controlled Y([c], (qs[0])); @@ -59,7 +49,7 @@ namespace Microsoft.Quantum.Testing.Tracer Controlled T([c], (qs[2])); } - using (cc = Qubit[2]) + use cc = Qubit[2] { Controlled X(cc, (qs[0])); Controlled Y(cc, (qs[0])); @@ -72,14 +62,6 @@ namespace Microsoft.Quantum.Testing.Tracer Controlled S(cc, (qs[1])); Controlled T(cc, (qs[2])); } - - let r0 = M(qs[0]); - //ApplyIfZero(r0, (Z, qs[0])); - - let qs12 = [qs[1], qs[2]]; - let r12 = Measure([PauliY, PauliX], qs12); - //ApplyIfOne(r12, (Fixup, qs12)); } - return true; } } diff --git a/src/QirRuntime/test/QIR-tracer/tracer-measurements.qs b/src/QirRuntime/test/QIR-tracer/tracer-measurements.qs new file mode 100644 index 00000000000..a46ce7907ab --- /dev/null +++ b/src/QirRuntime/test/QIR-tracer/tracer-measurements.qs @@ -0,0 +1,35 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +namespace Microsoft.Quantum.Testing.Tracer +{ + open Microsoft.Quantum.Intrinsic; + + operation Fixup(qs : Qubit[]) : Unit + { + for i in 0..Length(qs)-1 + { + X(qs[i]); + } + } + + operation TestMeasurements(compare : Bool) : Unit + { + use qs = Qubit[3] + { + let r0 = M(qs[0]); + let qs12 = [qs[1], qs[2]]; + let r12 = Measure([PauliY, PauliX], qs12); + + if (compare) + { + if r0 == Zero + { + X(qs[1]); + } + + //ApplyIfOne(r12, (Fixup, qs12)); + } + } + } +} \ No newline at end of file diff --git a/src/QirRuntime/test/QIR-tracer/tracer-qir.ll b/src/QirRuntime/test/QIR-tracer/tracer-qir.ll index f14da190f2a..ea46d25e9a8 100644 --- a/src/QirRuntime/test/QIR-tracer/tracer-qir.ll +++ b/src/QirRuntime/test/QIR-tracer/tracer-qir.ll @@ -1,8 +1,10 @@ %Result = type opaque %Range = type { i64, i64, i64 } +%Tuple = type opaque %Qubit = type opaque %Array = type opaque +%String = type opaque @ResultZero = external global %Result* @ResultOne = external global %Result* @@ -12,132 +14,41 @@ @PauliZ = constant i2 -2 @EmptyRange = internal constant %Range { i64 0, i64 1, i64 -1 } -@Microsoft__Quantum__Testing__Tracer__AllIntrinsics = alias i1 (), i1 ()* @Microsoft__Quantum__Testing__Tracer__AllIntrinsics__body - -define void @Microsoft__Quantum__Intrinsic__Rz__body(double %theta, %Qubit* %qb) { -entry: - call void @__quantum__trc__single_qubit_op(i64 23, i64 1, %Qubit* %qb) - ret void -} - -declare void @__quantum__trc__single_qubit_op(i64, i64, %Qubit*) - -define void @Microsoft__Quantum__Intrinsic__Rz__adj(double %theta, %Qubit* %qb) { +define %Tuple* @Microsoft__Quantum__Core__Attribute__body() { entry: - call void @__quantum__trc__single_qubit_op(i64 24, i64 1, %Qubit* %qb) - ret void + ret %Tuple* null } -define void @Microsoft__Quantum__Intrinsic__Rz__ctl(%Array* %ctls, { double, %Qubit* }* %0) { +define %Tuple* @Microsoft__Quantum__Core__EntryPoint__body() { entry: - call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 1) - %1 = getelementptr { double, %Qubit* }, { double, %Qubit* }* %0, i64 0, i32 0 - %theta = load double, double* %1 - %2 = getelementptr { double, %Qubit* }, { double, %Qubit* }* %0, i64 0, i32 1 - %qb = load %Qubit*, %Qubit** %2 - call void @__quantum__trc__single_qubit_op_ctl(i64 25, i64 1, %Array* %ctls, %Qubit* %qb) - call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 -1) - ret void + ret %Tuple* null } -declare void @__quantum__rt__array_update_alias_count(%Array*, i64) - -declare void @__quantum__trc__single_qubit_op_ctl(i64, i64, %Array*, %Qubit*) - -define void @Microsoft__Quantum__Intrinsic__Rz__ctladj(%Array* %ctls, { double, %Qubit* }* %0) { +define %Tuple* @Microsoft__Quantum__Core__Inline__body() { entry: - call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 1) - %1 = getelementptr { double, %Qubit* }, { double, %Qubit* }* %0, i64 0, i32 0 - %theta = load double, double* %1 - %2 = getelementptr { double, %Qubit* }, { double, %Qubit* }* %0, i64 0, i32 1 - %qb = load %Qubit*, %Qubit** %2 - call void @__quantum__trc__single_qubit_op_ctl(i64 25, i64 1, %Array* %ctls, %Qubit* %qb) - call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 -1) - ret void -} - -define %Result* @Microsoft__Quantum__Intrinsic__Mz__body(%Qubit* %qb) { -entry: - %0 = call %Result* @__quantum__trc__single_qubit_measure(i64 100, i64 1, %Qubit* %qb) - ret %Result* %0 -} - -declare %Result* @__quantum__trc__single_qubit_measure(i64, i64, %Qubit*) - -define void @Microsoft__Quantum__Intrinsic__Y__body(%Qubit* %qb) { -entry: - call void @__quantum__trc__single_qubit_op(i64 3, i64 1, %Qubit* %qb) - ret void -} - -define void @Microsoft__Quantum__Intrinsic__Y__adj(%Qubit* %qb) { -entry: - call void @__quantum__trc__single_qubit_op(i64 3, i64 1, %Qubit* %qb) - ret void -} - -define void @Microsoft__Quantum__Intrinsic__Y__ctl(%Array* %ctls, %Qubit* %qb) { -entry: - call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 1) - %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %ctls) - %1 = icmp eq i64 %0, 1 - br i1 %1, label %then0__1, label %else__1 - -then0__1: ; preds = %entry - call void @__quantum__trc__single_qubit_op_ctl(i64 4, i64 1, %Array* %ctls, %Qubit* %qb) - br label %continue__1 - -else__1: ; preds = %entry - call void @__quantum__trc__single_qubit_op_ctl(i64 5, i64 1, %Array* %ctls, %Qubit* %qb) - br label %continue__1 - -continue__1: ; preds = %else__1, %then0__1 - call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 -1) - ret void -} - -declare i64 @__quantum__rt__array_get_size_1d(%Array*) - -define void @Microsoft__Quantum__Intrinsic__Y__ctladj(%Array* %ctls, %Qubit* %qb) { -entry: - call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 1) - %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %ctls) - %1 = icmp eq i64 %0, 1 - br i1 %1, label %then0__1, label %else__1 - -then0__1: ; preds = %entry - call void @__quantum__trc__single_qubit_op_ctl(i64 4, i64 1, %Array* %ctls, %Qubit* %qb) - br label %continue__1 - -else__1: ; preds = %entry - call void @__quantum__trc__single_qubit_op_ctl(i64 5, i64 1, %Array* %ctls, %Qubit* %qb) - br label %continue__1 - -continue__1: ; preds = %else__1, %then0__1 - call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 -1) - ret void + ret %Tuple* null } define void @Microsoft__Quantum__Intrinsic__CNOT__body(%Qubit* %control, %Qubit* %target) { entry: - %ctls__inline__1 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) - %0 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ctls__inline__1, i64 0) + %ctls = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %0 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ctls, i64 0) %1 = bitcast i8* %0 to %Qubit** store %Qubit* %control, %Qubit** %1 - call void @__quantum__rt__array_update_alias_count(%Array* %ctls__inline__1, i64 1) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 1) br i1 true, label %then0__1, label %else__1 then0__1: ; preds = %entry - call void @__quantum__trc__single_qubit_op_ctl(i64 1, i64 1, %Array* %ctls__inline__1, %Qubit* %target) + call void @__quantum__qis__single_qubit_op_ctl(i64 1, i64 1, %Array* %ctls, %Qubit* %target) br label %continue__1 else__1: ; preds = %entry - call void @__quantum__trc__single_qubit_op_ctl(i64 2, i64 1, %Array* %ctls__inline__1, %Qubit* %target) + call void @__quantum__qis__single_qubit_op_ctl(i64 2, i64 1, %Array* %ctls, %Qubit* %target) br label %continue__1 continue__1: ; preds = %else__1, %then0__1 - call void @__quantum__rt__array_update_alias_count(%Array* %ctls__inline__1, i64 -1) - call void @__quantum__rt__array_update_reference_count(%Array* %ctls__inline__1, i64 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %ctls, i64 -1) ret void } @@ -145,63 +56,73 @@ declare %Array* @__quantum__rt__array_create_1d(i32, i64) declare i8* @__quantum__rt__array_get_element_ptr_1d(%Array*, i64) +declare void @__quantum__rt__array_update_alias_count(%Array*, i64) + +declare void @__quantum__qis__single_qubit_op_ctl(i64, i64, %Array*, %Qubit*) + declare void @__quantum__rt__array_update_reference_count(%Array*, i64) define void @Microsoft__Quantum__Intrinsic__CNOT__adj(%Qubit* %control, %Qubit* %target) { entry: - %ctls__inline__1 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) - %0 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ctls__inline__1, i64 0) + %ctls = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %0 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ctls, i64 0) %1 = bitcast i8* %0 to %Qubit** store %Qubit* %control, %Qubit** %1 - call void @__quantum__rt__array_update_alias_count(%Array* %ctls__inline__1, i64 1) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 1) br i1 true, label %then0__1, label %else__1 then0__1: ; preds = %entry - call void @__quantum__trc__single_qubit_op_ctl(i64 1, i64 1, %Array* %ctls__inline__1, %Qubit* %target) + call void @__quantum__qis__single_qubit_op_ctl(i64 1, i64 1, %Array* %ctls, %Qubit* %target) br label %continue__1 else__1: ; preds = %entry - call void @__quantum__trc__single_qubit_op_ctl(i64 2, i64 1, %Array* %ctls__inline__1, %Qubit* %target) + call void @__quantum__qis__single_qubit_op_ctl(i64 2, i64 1, %Array* %ctls, %Qubit* %target) br label %continue__1 continue__1: ; preds = %else__1, %then0__1 - call void @__quantum__rt__array_update_alias_count(%Array* %ctls__inline__1, i64 -1) - call void @__quantum__rt__array_update_reference_count(%Array* %ctls__inline__1, i64 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %ctls, i64 -1) ret void } -define void @Microsoft__Quantum__Intrinsic__S__body(%Qubit* %qb) { +define void @Microsoft__Quantum__Intrinsic__H__body(%Qubit* %qb) { entry: - call void @__quantum__trc__single_qubit_op(i64 15, i64 1, %Qubit* %qb) + call void @__quantum__qis__single_qubit_op(i64 9, i64 1, %Qubit* %qb) ret void } -define void @Microsoft__Quantum__Intrinsic__S__adj(%Qubit* %qb) { +declare void @__quantum__qis__single_qubit_op(i64, i64, %Qubit*) + +define void @Microsoft__Quantum__Intrinsic__H__adj(%Qubit* %qb) { entry: - call void @__quantum__trc__single_qubit_op(i64 15, i64 1, %Qubit* %qb) + call void @__quantum__qis__single_qubit_op(i64 9, i64 1, %Qubit* %qb) ret void } -define void @Microsoft__Quantum__Intrinsic__S__ctl(%Array* %ctls, %Qubit* %qb) { +define void @Microsoft__Quantum__Intrinsic__H__ctl(%Array* %ctls, %Qubit* %qb) { entry: call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 1) - call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 1) - call void @__quantum__trc__single_qubit_op_ctl(i64 16, i64 1, %Array* %ctls, %Qubit* %qb) - call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 -1) + call void @__quantum__qis__single_qubit_op_ctl(i64 10, i64 1, %Array* %ctls, %Qubit* %qb) call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 -1) ret void } -define void @Microsoft__Quantum__Intrinsic__S__ctladj(%Array* %ctls, %Qubit* %qb) { +define void @Microsoft__Quantum__Intrinsic__H__ctladj(%Array* %ctls, %Qubit* %qb) { entry: call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 1) - call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 1) - call void @__quantum__trc__single_qubit_op_ctl(i64 16, i64 1, %Array* %ctls, %Qubit* %qb) - call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 -1) + call void @__quantum__qis__single_qubit_op_ctl(i64 10, i64 1, %Array* %ctls, %Qubit* %qb) call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 -1) ret void } +define %Result* @Microsoft__Quantum__Intrinsic__M__body(%Qubit* %qb) { +entry: + %0 = call %Result* @__quantum__qis__single_qubit_measure(i64 100, i64 1, %Qubit* %qb) + ret %Result* %0 +} + +declare %Result* @__quantum__qis__single_qubit_measure(i64, i64, %Qubit*) + define %Result* @Microsoft__Quantum__Intrinsic__Measure__body(%Array* %paulis, %Array* %qubits) { entry: call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i64 1) @@ -251,7 +172,7 @@ exit__1: ; preds = %header__1 br i1 %16, label %then0__2, label %test1__1 then0__2: ; preds = %exit__1 - %17 = call %Result* @__quantum__trc__multi_qubit_measure(i64 106, i64 1, %Array* %qubits) + %17 = call %Result* @__quantum__qis__joint_measure(i64 106, i64 1, %Array* %qubits) call void @__quantum__rt__result_update_reference_count(%Result* %17, i64 1) store %Result* %17, %Result** %res call void @__quantum__rt__result_update_reference_count(%Result* %17, i64 -1) @@ -263,7 +184,7 @@ test1__1: ; preds = %exit__1 br i1 %18, label %then1__1, label %test2__1 then1__1: ; preds = %test1__1 - %19 = call %Result* @__quantum__trc__multi_qubit_measure(i64 107, i64 1, %Array* %qubits) + %19 = call %Result* @__quantum__qis__joint_measure(i64 107, i64 1, %Array* %qubits) call void @__quantum__rt__result_update_reference_count(%Result* %19, i64 1) %20 = load %Result*, %Result** %res store %Result* %19, %Result** %res @@ -286,8 +207,8 @@ then2__1: ; preds = %test2__1 then0__3: ; preds = %then2__1 %27 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qubits, i64 0) %28 = bitcast i8* %27 to %Qubit** - %qb__inline__1 = load %Qubit*, %Qubit** %28 - %29 = call %Result* @__quantum__trc__single_qubit_measure(i64 101, i64 1, %Qubit* %qb__inline__1) + %qb = load %Qubit*, %Qubit** %28 + %29 = call %Result* @__quantum__qis__single_qubit_measure(i64 101, i64 1, %Qubit* %qb) call void @__quantum__rt__result_update_reference_count(%Result* %29, i64 1) %30 = load %Result*, %Result** %res store %Result* %29, %Result** %res @@ -298,8 +219,8 @@ then0__3: ; preds = %then2__1 else__1: ; preds = %then2__1 %31 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qubits, i64 0) %32 = bitcast i8* %31 to %Qubit** - %qb__inline__2 = load %Qubit*, %Qubit** %32 - %33 = call %Result* @__quantum__trc__single_qubit_measure(i64 100, i64 1, %Qubit* %qb__inline__2) + %qb__1 = load %Qubit*, %Qubit** %32 + %33 = call %Result* @__quantum__qis__single_qubit_measure(i64 100, i64 1, %Qubit* %qb__1) call void @__quantum__rt__result_update_reference_count(%Result* %33, i64 1) %34 = load %Result*, %Result** %res store %Result* %33, %Result** %res @@ -325,7 +246,7 @@ test3__1: ; preds = %test2__1 br i1 %45, label %then3__1, label %test4__1 then3__1: ; preds = %test3__1 - %46 = call %Result* @__quantum__trc__multi_qubit_measure(i64 108, i64 1, %Array* %qubits) + %46 = call %Result* @__quantum__qis__joint_measure(i64 108, i64 1, %Array* %qubits) call void @__quantum__rt__result_update_reference_count(%Result* %46, i64 1) %47 = load %Result*, %Result** %res store %Result* %46, %Result** %res @@ -348,7 +269,7 @@ test4__1: ; preds = %test3__1 br i1 %58, label %then4__1, label %test5__1 then4__1: ; preds = %test4__1 - %59 = call %Result* @__quantum__trc__multi_qubit_measure(i64 109, i64 1, %Array* %qubits) + %59 = call %Result* @__quantum__qis__joint_measure(i64 109, i64 1, %Array* %qubits) call void @__quantum__rt__result_update_reference_count(%Result* %59, i64 1) %60 = load %Result*, %Result** %res store %Result* %59, %Result** %res @@ -371,7 +292,7 @@ test5__1: ; preds = %test4__1 br i1 %71, label %then5__1, label %test6__1 then5__1: ; preds = %test5__1 - %72 = call %Result* @__quantum__trc__multi_qubit_measure(i64 110, i64 1, %Array* %qubits) + %72 = call %Result* @__quantum__qis__joint_measure(i64 110, i64 1, %Array* %qubits) call void @__quantum__rt__result_update_reference_count(%Result* %72, i64 1) %73 = load %Result*, %Result** %res store %Result* %72, %Result** %res @@ -394,7 +315,7 @@ test6__1: ; preds = %test5__1 br i1 %84, label %then6__1, label %continue__2 then6__1: ; preds = %test6__1 - %85 = call %Result* @__quantum__trc__multi_qubit_measure(i64 111, i64 1, %Array* %qubits) + %85 = call %Result* @__quantum__qis__joint_measure(i64 111, i64 1, %Array* %qubits) call void @__quantum__rt__result_update_reference_count(%Result* %85, i64 1) %86 = load %Result*, %Result** %res store %Result* %85, %Result** %res @@ -411,137 +332,259 @@ continue__2: ; preds = %then6__1, %test6__1 declare void @__quantum__rt__result_update_reference_count(%Result*, i64) -declare %Result* @__quantum__trc__multi_qubit_measure(i64, i64, %Array*) +declare i64 @__quantum__rt__array_get_size_1d(%Array*) -define void @Microsoft__Quantum__Intrinsic__Z__body(%Qubit* %qb) { +declare %Result* @__quantum__qis__joint_measure(i64, i64, %Array*) + +define %Result* @Microsoft__Quantum__Intrinsic__Mx__body(%Qubit* %qb) { +entry: + %0 = call %Result* @__quantum__qis__single_qubit_measure(i64 101, i64 1, %Qubit* %qb) + ret %Result* %0 +} + +define %Result* @Microsoft__Quantum__Intrinsic__Mxx__body(%Array* %qubits) { entry: - call void @__quantum__trc__single_qubit_op(i64 6, i64 1, %Qubit* %qb) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i64 1) + %0 = call %Result* @__quantum__qis__joint_measure(i64 105, i64 1, %Array* %qubits) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i64 -1) + ret %Result* %0 +} + +define %Result* @Microsoft__Quantum__Intrinsic__Mxz__body(%Array* %qubits) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i64 1) + %0 = call %Result* @__quantum__qis__joint_measure(i64 103, i64 1, %Array* %qubits) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i64 -1) + ret %Result* %0 +} + +define %Result* @Microsoft__Quantum__Intrinsic__Mz__body(%Qubit* %qb) { +entry: + %0 = call %Result* @__quantum__qis__single_qubit_measure(i64 100, i64 1, %Qubit* %qb) + ret %Result* %0 +} + +define %Result* @Microsoft__Quantum__Intrinsic__Mzx__body(%Array* %qubits) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i64 1) + %0 = call %Result* @__quantum__qis__joint_measure(i64 104, i64 1, %Array* %qubits) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i64 -1) + ret %Result* %0 +} + +define %Result* @Microsoft__Quantum__Intrinsic__Mzz__body(%Array* %qubits) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i64 1) + %0 = call %Result* @__quantum__qis__joint_measure(i64 102, i64 1, %Array* %qubits) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i64 -1) + ret %Result* %0 +} + +define void @Microsoft__Quantum__Intrinsic__Rx__body(double %theta, %Qubit* %qb) { +entry: + call void @__quantum__qis__single_qubit_op(i64 19, i64 1, %Qubit* %qb) ret void } -define void @Microsoft__Quantum__Intrinsic__Z__adj(%Qubit* %qb) { +define void @Microsoft__Quantum__Intrinsic__Rx__adj(double %theta, %Qubit* %qb) { entry: - call void @__quantum__trc__single_qubit_op(i64 6, i64 1, %Qubit* %qb) + call void @__quantum__qis__single_qubit_op(i64 19, i64 1, %Qubit* %qb) ret void } -define void @Microsoft__Quantum__Intrinsic__Z__ctl(%Array* %ctls, %Qubit* %qb) { +define void @Microsoft__Quantum__Intrinsic__Rx__ctl(%Array* %ctls, { double, %Qubit* }* %0) { entry: call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 1) - %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %ctls) - %1 = icmp eq i64 %0, 1 - br i1 %1, label %then0__1, label %else__1 + %1 = getelementptr { double, %Qubit* }, { double, %Qubit* }* %0, i64 0, i32 0 + %theta = load double, double* %1 + %2 = getelementptr { double, %Qubit* }, { double, %Qubit* }* %0, i64 0, i32 1 + %qb = load %Qubit*, %Qubit** %2 + call void @__quantum__qis__single_qubit_op_ctl(i64 20, i64 1, %Array* %ctls, %Qubit* %qb) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 -1) + ret void +} -then0__1: ; preds = %entry - call void @__quantum__trc__single_qubit_op_ctl(i64 7, i64 1, %Array* %ctls, %Qubit* %qb) - br label %continue__1 +define void @Microsoft__Quantum__Intrinsic__Rx__ctladj(%Array* %ctls, { double, %Qubit* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 1) + %1 = getelementptr { double, %Qubit* }, { double, %Qubit* }* %0, i64 0, i32 0 + %theta = load double, double* %1 + %2 = getelementptr { double, %Qubit* }, { double, %Qubit* }* %0, i64 0, i32 1 + %qb = load %Qubit*, %Qubit** %2 + call void @__quantum__qis__single_qubit_op_ctl(i64 20, i64 1, %Array* %ctls, %Qubit* %qb) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 -1) + ret void +} -else__1: ; preds = %entry - call void @__quantum__trc__single_qubit_op_ctl(i64 8, i64 1, %Array* %ctls, %Qubit* %qb) - br label %continue__1 +define void @Microsoft__Quantum__Intrinsic__Ry__body(double %theta, %Qubit* %qb) { +entry: + call void @__quantum__qis__single_qubit_op(i64 21, i64 1, %Qubit* %qb) + ret void +} -continue__1: ; preds = %else__1, %then0__1 +define void @Microsoft__Quantum__Intrinsic__Ry__adj(double %theta, %Qubit* %qb) { +entry: + call void @__quantum__qis__single_qubit_op(i64 21, i64 1, %Qubit* %qb) + ret void +} + +define void @Microsoft__Quantum__Intrinsic__Ry__ctl(%Array* %ctls, { double, %Qubit* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 1) + %1 = getelementptr { double, %Qubit* }, { double, %Qubit* }* %0, i64 0, i32 0 + %theta = load double, double* %1 + %2 = getelementptr { double, %Qubit* }, { double, %Qubit* }* %0, i64 0, i32 1 + %qb = load %Qubit*, %Qubit** %2 + call void @__quantum__qis__single_qubit_op_ctl(i64 22, i64 1, %Array* %ctls, %Qubit* %qb) call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 -1) ret void } -define void @Microsoft__Quantum__Intrinsic__Z__ctladj(%Array* %ctls, %Qubit* %qb) { +define void @Microsoft__Quantum__Intrinsic__Ry__ctladj(%Array* %ctls, { double, %Qubit* }* %0) { entry: call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 1) - %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %ctls) - %1 = icmp eq i64 %0, 1 - br i1 %1, label %then0__1, label %else__1 + %1 = getelementptr { double, %Qubit* }, { double, %Qubit* }* %0, i64 0, i32 0 + %theta = load double, double* %1 + %2 = getelementptr { double, %Qubit* }, { double, %Qubit* }* %0, i64 0, i32 1 + %qb = load %Qubit*, %Qubit** %2 + call void @__quantum__qis__single_qubit_op_ctl(i64 22, i64 1, %Array* %ctls, %Qubit* %qb) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 -1) + ret void +} -then0__1: ; preds = %entry - call void @__quantum__trc__single_qubit_op_ctl(i64 7, i64 1, %Array* %ctls, %Qubit* %qb) - br label %continue__1 +define void @Microsoft__Quantum__Intrinsic__Rz__body(double %theta, %Qubit* %qb) { +entry: + call void @__quantum__qis__single_qubit_op(i64 23, i64 1, %Qubit* %qb) + ret void +} -else__1: ; preds = %entry - call void @__quantum__trc__single_qubit_op_ctl(i64 8, i64 1, %Array* %ctls, %Qubit* %qb) - br label %continue__1 +define void @Microsoft__Quantum__Intrinsic__Rz__adj(double %theta, %Qubit* %qb) { +entry: + call void @__quantum__qis__single_qubit_op(i64 24, i64 1, %Qubit* %qb) + ret void +} -continue__1: ; preds = %else__1, %then0__1 +define void @Microsoft__Quantum__Intrinsic__Rz__ctl(%Array* %ctls, { double, %Qubit* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 1) + %1 = getelementptr { double, %Qubit* }, { double, %Qubit* }* %0, i64 0, i32 0 + %theta = load double, double* %1 + %2 = getelementptr { double, %Qubit* }, { double, %Qubit* }* %0, i64 0, i32 1 + %qb = load %Qubit*, %Qubit** %2 + call void @__quantum__qis__single_qubit_op_ctl(i64 25, i64 1, %Array* %ctls, %Qubit* %qb) call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 -1) ret void } -define void @Microsoft__Quantum__Intrinsic__Tz__body(%Qubit* %qb) { +define void @Microsoft__Quantum__Intrinsic__Rz__ctladj(%Array* %ctls, { double, %Qubit* }* %0) { entry: - call void @__quantum__trc__single_qubit_op(i64 11, i64 1, %Qubit* %qb) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 1) + %1 = getelementptr { double, %Qubit* }, { double, %Qubit* }* %0, i64 0, i32 0 + %theta = load double, double* %1 + %2 = getelementptr { double, %Qubit* }, { double, %Qubit* }* %0, i64 0, i32 1 + %qb = load %Qubit*, %Qubit** %2 + call void @__quantum__qis__single_qubit_op_ctl(i64 25, i64 1, %Array* %ctls, %Qubit* %qb) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 -1) ret void } -define void @Microsoft__Quantum__Intrinsic__Tz__adj(%Qubit* %qb) { +define void @Microsoft__Quantum__Intrinsic__S__body(%Qubit* %qb) { entry: - call void @__quantum__trc__single_qubit_op(i64 11, i64 1, %Qubit* %qb) + call void @__quantum__qis__single_qubit_op(i64 15, i64 1, %Qubit* %qb) ret void } -define void @Microsoft__Quantum__Intrinsic__Tz__ctl(%Array* %ctls, %Qubit* %qb) { +define void @Microsoft__Quantum__Intrinsic__S__adj(%Qubit* %qb) { +entry: + call void @__quantum__qis__single_qubit_op(i64 15, i64 1, %Qubit* %qb) + ret void +} + +define void @Microsoft__Quantum__Intrinsic__S__ctl(%Array* %ctls, %Qubit* %qb) { entry: call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 1) - call void @__quantum__trc__single_qubit_op_ctl(i64 12, i64 1, %Array* %ctls, %Qubit* %qb) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 1) + call void @__quantum__qis__single_qubit_op_ctl(i64 16, i64 1, %Array* %ctls, %Qubit* %qb) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 -1) call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 -1) ret void } -define void @Microsoft__Quantum__Intrinsic__Tz__ctladj(%Array* %ctls, %Qubit* %qb) { +define void @Microsoft__Quantum__Intrinsic__S__ctladj(%Array* %ctls, %Qubit* %qb) { entry: call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 1) - call void @__quantum__trc__single_qubit_op_ctl(i64 12, i64 1, %Array* %ctls, %Qubit* %qb) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 1) + call void @__quantum__qis__single_qubit_op_ctl(i64 16, i64 1, %Array* %ctls, %Qubit* %qb) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 -1) call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 -1) ret void } -define void @Microsoft__Quantum__Intrinsic__H__body(%Qubit* %qb) { +define void @Microsoft__Quantum__Intrinsic__Sx__body(%Qubit* %qb) { entry: - call void @__quantum__trc__single_qubit_op(i64 9, i64 1, %Qubit* %qb) + call void @__quantum__qis__single_qubit_op(i64 17, i64 1, %Qubit* %qb) ret void } -define void @Microsoft__Quantum__Intrinsic__H__adj(%Qubit* %qb) { +define void @Microsoft__Quantum__Intrinsic__Sx__adj(%Qubit* %qb) { entry: - call void @__quantum__trc__single_qubit_op(i64 9, i64 1, %Qubit* %qb) + call void @__quantum__qis__single_qubit_op(i64 17, i64 1, %Qubit* %qb) ret void } -define void @Microsoft__Quantum__Intrinsic__H__ctl(%Array* %ctls, %Qubit* %qb) { +define void @Microsoft__Quantum__Intrinsic__Sx__ctl(%Array* %ctls, %Qubit* %qb) { entry: call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 1) - call void @__quantum__trc__single_qubit_op_ctl(i64 10, i64 1, %Array* %ctls, %Qubit* %qb) + call void @__quantum__qis__single_qubit_op_ctl(i64 18, i64 1, %Array* %ctls, %Qubit* %qb) call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 -1) ret void } -define void @Microsoft__Quantum__Intrinsic__H__ctladj(%Array* %ctls, %Qubit* %qb) { +define void @Microsoft__Quantum__Intrinsic__Sx__ctladj(%Array* %ctls, %Qubit* %qb) { entry: call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 1) - call void @__quantum__trc__single_qubit_op_ctl(i64 10, i64 1, %Array* %ctls, %Qubit* %qb) + call void @__quantum__qis__single_qubit_op_ctl(i64 18, i64 1, %Array* %ctls, %Qubit* %qb) call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 -1) ret void } -define %Result* @Microsoft__Quantum__Intrinsic__Mx__body(%Qubit* %qb) { +define void @Microsoft__Quantum__Intrinsic__Sz__body(%Qubit* %qb) { entry: - %0 = call %Result* @__quantum__trc__single_qubit_measure(i64 101, i64 1, %Qubit* %qb) - ret %Result* %0 + call void @__quantum__qis__single_qubit_op(i64 15, i64 1, %Qubit* %qb) + ret void } -define %Result* @Microsoft__Quantum__Intrinsic__M__body(%Qubit* %qb) { +define void @Microsoft__Quantum__Intrinsic__Sz__adj(%Qubit* %qb) { entry: - %0 = call %Result* @__quantum__trc__single_qubit_measure(i64 100, i64 1, %Qubit* %qb) - ret %Result* %0 + call void @__quantum__qis__single_qubit_op(i64 15, i64 1, %Qubit* %qb) + ret void +} + +define void @Microsoft__Quantum__Intrinsic__Sz__ctl(%Array* %ctls, %Qubit* %qb) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 1) + call void @__quantum__qis__single_qubit_op_ctl(i64 16, i64 1, %Array* %ctls, %Qubit* %qb) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 -1) + ret void +} + +define void @Microsoft__Quantum__Intrinsic__Sz__ctladj(%Array* %ctls, %Qubit* %qb) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 1) + call void @__quantum__qis__single_qubit_op_ctl(i64 16, i64 1, %Array* %ctls, %Qubit* %qb) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 -1) + ret void } define void @Microsoft__Quantum__Intrinsic__T__body(%Qubit* %qb) { entry: - call void @__quantum__trc__single_qubit_op(i64 11, i64 1, %Qubit* %qb) + call void @__quantum__qis__single_qubit_op(i64 11, i64 1, %Qubit* %qb) ret void } define void @Microsoft__Quantum__Intrinsic__T__adj(%Qubit* %qb) { entry: - call void @__quantum__trc__single_qubit_op(i64 11, i64 1, %Qubit* %qb) + call void @__quantum__qis__single_qubit_op(i64 11, i64 1, %Qubit* %qb) ret void } @@ -549,7 +592,7 @@ define void @Microsoft__Quantum__Intrinsic__T__ctl(%Array* %ctls, %Qubit* %qb) { entry: call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 1) call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 1) - call void @__quantum__trc__single_qubit_op_ctl(i64 12, i64 1, %Array* %ctls, %Qubit* %qb) + call void @__quantum__qis__single_qubit_op_ctl(i64 12, i64 1, %Array* %ctls, %Qubit* %qb) call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 -1) call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 -1) ret void @@ -559,21 +602,77 @@ define void @Microsoft__Quantum__Intrinsic__T__ctladj(%Array* %ctls, %Qubit* %qb entry: call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 1) call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 1) - call void @__quantum__trc__single_qubit_op_ctl(i64 12, i64 1, %Array* %ctls, %Qubit* %qb) + call void @__quantum__qis__single_qubit_op_ctl(i64 12, i64 1, %Array* %ctls, %Qubit* %qb) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 -1) + ret void +} + +define void @Microsoft__Quantum__Intrinsic__Tx__body(%Qubit* %qb) { +entry: + call void @__quantum__qis__single_qubit_op(i64 13, i64 1, %Qubit* %qb) + ret void +} + +define void @Microsoft__Quantum__Intrinsic__Tx__adj(%Qubit* %qb) { +entry: + call void @__quantum__qis__single_qubit_op(i64 13, i64 1, %Qubit* %qb) + ret void +} + +define void @Microsoft__Quantum__Intrinsic__Tx__ctl(%Array* %ctls, %Qubit* %qb) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 1) + call void @__quantum__qis__single_qubit_op_ctl(i64 14, i64 1, %Array* %ctls, %Qubit* %qb) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 -1) + ret void +} + +define void @Microsoft__Quantum__Intrinsic__Tx__ctladj(%Array* %ctls, %Qubit* %qb) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 1) + call void @__quantum__qis__single_qubit_op_ctl(i64 14, i64 1, %Array* %ctls, %Qubit* %qb) call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 -1) + ret void +} + +define void @Microsoft__Quantum__Intrinsic__Tz__body(%Qubit* %qb) { +entry: + call void @__quantum__qis__single_qubit_op(i64 11, i64 1, %Qubit* %qb) + ret void +} + +define void @Microsoft__Quantum__Intrinsic__Tz__adj(%Qubit* %qb) { +entry: + call void @__quantum__qis__single_qubit_op(i64 11, i64 1, %Qubit* %qb) + ret void +} + +define void @Microsoft__Quantum__Intrinsic__Tz__ctl(%Array* %ctls, %Qubit* %qb) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 1) + call void @__quantum__qis__single_qubit_op_ctl(i64 12, i64 1, %Array* %ctls, %Qubit* %qb) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 -1) + ret void +} + +define void @Microsoft__Quantum__Intrinsic__Tz__ctladj(%Array* %ctls, %Qubit* %qb) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 1) + call void @__quantum__qis__single_qubit_op_ctl(i64 12, i64 1, %Array* %ctls, %Qubit* %qb) call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 -1) ret void } define void @Microsoft__Quantum__Intrinsic__X__body(%Qubit* %qb) { entry: - call void @__quantum__trc__single_qubit_op(i64 0, i64 1, %Qubit* %qb) + call void @__quantum__qis__single_qubit_op(i64 0, i64 1, %Qubit* %qb) ret void } define void @Microsoft__Quantum__Intrinsic__X__adj(%Qubit* %qb) { entry: - call void @__quantum__trc__single_qubit_op(i64 0, i64 1, %Qubit* %qb) + call void @__quantum__qis__single_qubit_op(i64 0, i64 1, %Qubit* %qb) ret void } @@ -585,11 +684,11 @@ entry: br i1 %1, label %then0__1, label %else__1 then0__1: ; preds = %entry - call void @__quantum__trc__single_qubit_op_ctl(i64 1, i64 1, %Array* %ctls, %Qubit* %qb) + call void @__quantum__qis__single_qubit_op_ctl(i64 1, i64 1, %Array* %ctls, %Qubit* %qb) br label %continue__1 else__1: ; preds = %entry - call void @__quantum__trc__single_qubit_op_ctl(i64 2, i64 1, %Array* %ctls, %Qubit* %qb) + call void @__quantum__qis__single_qubit_op_ctl(i64 2, i64 1, %Array* %ctls, %Qubit* %qb) br label %continue__1 continue__1: ; preds = %else__1, %then0__1 @@ -605,11 +704,11 @@ entry: br i1 %1, label %then0__1, label %else__1 then0__1: ; preds = %entry - call void @__quantum__trc__single_qubit_op_ctl(i64 1, i64 1, %Array* %ctls, %Qubit* %qb) + call void @__quantum__qis__single_qubit_op_ctl(i64 1, i64 1, %Array* %ctls, %Qubit* %qb) br label %continue__1 else__1: ; preds = %entry - call void @__quantum__trc__single_qubit_op_ctl(i64 2, i64 1, %Array* %ctls, %Qubit* %qb) + call void @__quantum__qis__single_qubit_op_ctl(i64 2, i64 1, %Array* %ctls, %Qubit* %qb) br label %continue__1 continue__1: ; preds = %else__1, %then0__1 @@ -617,128 +716,158 @@ continue__1: ; preds = %else__1, %then0__1 ret void } -define void @Microsoft__Quantum__Intrinsic__Sz__body(%Qubit* %qb) { +define void @Microsoft__Quantum__Intrinsic__Y__body(%Qubit* %qb) { entry: - call void @__quantum__trc__single_qubit_op(i64 15, i64 1, %Qubit* %qb) + call void @__quantum__qis__single_qubit_op(i64 3, i64 1, %Qubit* %qb) ret void } -define void @Microsoft__Quantum__Intrinsic__Sz__adj(%Qubit* %qb) { +define void @Microsoft__Quantum__Intrinsic__Y__adj(%Qubit* %qb) { entry: - call void @__quantum__trc__single_qubit_op(i64 15, i64 1, %Qubit* %qb) + call void @__quantum__qis__single_qubit_op(i64 3, i64 1, %Qubit* %qb) ret void } -define void @Microsoft__Quantum__Intrinsic__Sz__ctl(%Array* %ctls, %Qubit* %qb) { +define void @Microsoft__Quantum__Intrinsic__Y__ctl(%Array* %ctls, %Qubit* %qb) { entry: call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 1) - call void @__quantum__trc__single_qubit_op_ctl(i64 16, i64 1, %Array* %ctls, %Qubit* %qb) + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %ctls) + %1 = icmp eq i64 %0, 1 + br i1 %1, label %then0__1, label %else__1 + +then0__1: ; preds = %entry + call void @__quantum__qis__single_qubit_op_ctl(i64 4, i64 1, %Array* %ctls, %Qubit* %qb) + br label %continue__1 + +else__1: ; preds = %entry + call void @__quantum__qis__single_qubit_op_ctl(i64 5, i64 1, %Array* %ctls, %Qubit* %qb) + br label %continue__1 + +continue__1: ; preds = %else__1, %then0__1 call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 -1) ret void } -define void @Microsoft__Quantum__Intrinsic__Sz__ctladj(%Array* %ctls, %Qubit* %qb) { +define void @Microsoft__Quantum__Intrinsic__Y__ctladj(%Array* %ctls, %Qubit* %qb) { entry: call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 1) - call void @__quantum__trc__single_qubit_op_ctl(i64 16, i64 1, %Array* %ctls, %Qubit* %qb) + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %ctls) + %1 = icmp eq i64 %0, 1 + br i1 %1, label %then0__1, label %else__1 + +then0__1: ; preds = %entry + call void @__quantum__qis__single_qubit_op_ctl(i64 4, i64 1, %Array* %ctls, %Qubit* %qb) + br label %continue__1 + +else__1: ; preds = %entry + call void @__quantum__qis__single_qubit_op_ctl(i64 5, i64 1, %Array* %ctls, %Qubit* %qb) + br label %continue__1 + +continue__1: ; preds = %else__1, %then0__1 call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 -1) ret void } -define void @Microsoft__Quantum__Intrinsic__Ry__body(double %theta, %Qubit* %qb) { +define void @Microsoft__Quantum__Intrinsic__Z__body(%Qubit* %qb) { entry: - call void @__quantum__trc__single_qubit_op(i64 21, i64 1, %Qubit* %qb) + call void @__quantum__qis__single_qubit_op(i64 6, i64 1, %Qubit* %qb) ret void } -define void @Microsoft__Quantum__Intrinsic__Ry__adj(double %theta, %Qubit* %qb) { +define void @Microsoft__Quantum__Intrinsic__Z__adj(%Qubit* %qb) { entry: - call void @__quantum__trc__single_qubit_op(i64 21, i64 1, %Qubit* %qb) + call void @__quantum__qis__single_qubit_op(i64 6, i64 1, %Qubit* %qb) ret void } -define void @Microsoft__Quantum__Intrinsic__Ry__ctl(%Array* %ctls, { double, %Qubit* }* %0) { +define void @Microsoft__Quantum__Intrinsic__Z__ctl(%Array* %ctls, %Qubit* %qb) { entry: call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 1) - %1 = getelementptr { double, %Qubit* }, { double, %Qubit* }* %0, i64 0, i32 0 - %theta = load double, double* %1 - %2 = getelementptr { double, %Qubit* }, { double, %Qubit* }* %0, i64 0, i32 1 - %qb = load %Qubit*, %Qubit** %2 - call void @__quantum__trc__single_qubit_op_ctl(i64 22, i64 1, %Array* %ctls, %Qubit* %qb) + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %ctls) + %1 = icmp eq i64 %0, 1 + br i1 %1, label %then0__1, label %else__1 + +then0__1: ; preds = %entry + call void @__quantum__qis__single_qubit_op_ctl(i64 7, i64 1, %Array* %ctls, %Qubit* %qb) + br label %continue__1 + +else__1: ; preds = %entry + call void @__quantum__qis__single_qubit_op_ctl(i64 8, i64 1, %Array* %ctls, %Qubit* %qb) + br label %continue__1 + +continue__1: ; preds = %else__1, %then0__1 call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 -1) ret void } -define void @Microsoft__Quantum__Intrinsic__Ry__ctladj(%Array* %ctls, { double, %Qubit* }* %0) { +define void @Microsoft__Quantum__Intrinsic__Z__ctladj(%Array* %ctls, %Qubit* %qb) { entry: call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 1) - %1 = getelementptr { double, %Qubit* }, { double, %Qubit* }* %0, i64 0, i32 0 - %theta = load double, double* %1 - %2 = getelementptr { double, %Qubit* }, { double, %Qubit* }* %0, i64 0, i32 1 - %qb = load %Qubit*, %Qubit** %2 - call void @__quantum__trc__single_qubit_op_ctl(i64 22, i64 1, %Array* %ctls, %Qubit* %qb) - call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 -1) - ret void -} + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %ctls) + %1 = icmp eq i64 %0, 1 + br i1 %1, label %then0__1, label %else__1 -define void @Microsoft__Quantum__Intrinsic__Rx__body(double %theta, %Qubit* %qb) { -entry: - call void @__quantum__trc__single_qubit_op(i64 19, i64 1, %Qubit* %qb) - ret void -} +then0__1: ; preds = %entry + call void @__quantum__qis__single_qubit_op_ctl(i64 7, i64 1, %Array* %ctls, %Qubit* %qb) + br label %continue__1 -define void @Microsoft__Quantum__Intrinsic__Rx__adj(double %theta, %Qubit* %qb) { -entry: - call void @__quantum__trc__single_qubit_op(i64 19, i64 1, %Qubit* %qb) - ret void -} +else__1: ; preds = %entry + call void @__quantum__qis__single_qubit_op_ctl(i64 8, i64 1, %Array* %ctls, %Qubit* %qb) + br label %continue__1 -define void @Microsoft__Quantum__Intrinsic__Rx__ctl(%Array* %ctls, { double, %Qubit* }* %0) { -entry: - call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 1) - %1 = getelementptr { double, %Qubit* }, { double, %Qubit* }* %0, i64 0, i32 0 - %theta = load double, double* %1 - %2 = getelementptr { double, %Qubit* }, { double, %Qubit* }* %0, i64 0, i32 1 - %qb = load %Qubit*, %Qubit** %2 - call void @__quantum__trc__single_qubit_op_ctl(i64 20, i64 1, %Array* %ctls, %Qubit* %qb) +continue__1: ; preds = %else__1, %then0__1 call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 -1) ret void } -define void @Microsoft__Quantum__Intrinsic__Rx__ctladj(%Array* %ctls, { double, %Qubit* }* %0) { +define void @Microsoft__Quantum__Testing__Tracer__Fixup__body(%Array* %qs) { entry: - call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 1) - %1 = getelementptr { double, %Qubit* }, { double, %Qubit* }* %0, i64 0, i32 0 - %theta = load double, double* %1 - %2 = getelementptr { double, %Qubit* }, { double, %Qubit* }* %0, i64 0, i32 1 - %qb = load %Qubit*, %Qubit** %2 - call void @__quantum__trc__single_qubit_op_ctl(i64 20, i64 1, %Array* %ctls, %Qubit* %qb) - call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qs, i64 1) + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %qs) + %1 = sub i64 %0, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %i = phi i64 [ 0, %entry ], [ %5, %exiting__1 ] + %2 = icmp sle i64 %i, %1 + br i1 %2, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %3 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 %i) + %4 = bitcast i8* %3 to %Qubit** + %qb = load %Qubit*, %Qubit** %4 + call void @__quantum__qis__single_qubit_op(i64 0, i64 1, %Qubit* %qb) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %5 = add i64 %i, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %qs, i64 -1) ret void } -define i1 @Microsoft__Quantum__Testing__Tracer__AllIntrinsics__body() #0 { +define void @Microsoft__Quantum__Testing__Tracer__TestCoreIntrinsics__body() { entry: - %res = alloca i1 - store i1 false, i1* %res %qs = call %Array* @__quantum__rt__qubit_allocate_array(i64 3) call void @__quantum__rt__array_update_alias_count(%Array* %qs, i64 1) %0 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 0) %1 = bitcast i8* %0 to %Qubit** - %qb__inline__1 = load %Qubit*, %Qubit** %1 - call void @__quantum__trc__single_qubit_op(i64 0, i64 1, %Qubit* %qb__inline__1) + %qb = load %Qubit*, %Qubit** %1 + call void @__quantum__qis__single_qubit_op(i64 0, i64 1, %Qubit* %qb) %2 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 0) %3 = bitcast i8* %2 to %Qubit** - %qb__inline__2 = load %Qubit*, %Qubit** %3 - call void @__quantum__trc__single_qubit_op(i64 3, i64 1, %Qubit* %qb__inline__2) + %qb__1 = load %Qubit*, %Qubit** %3 + call void @__quantum__qis__single_qubit_op(i64 3, i64 1, %Qubit* %qb__1) %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 1) %5 = bitcast i8* %4 to %Qubit** - %qb__inline__3 = load %Qubit*, %Qubit** %5 - call void @__quantum__trc__single_qubit_op(i64 6, i64 1, %Qubit* %qb__inline__3) + %qb__2 = load %Qubit*, %Qubit** %5 + call void @__quantum__qis__single_qubit_op(i64 6, i64 1, %Qubit* %qb__2) %6 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 1) %7 = bitcast i8* %6 to %Qubit** - %qb__inline__4 = load %Qubit*, %Qubit** %7 - call void @__quantum__trc__single_qubit_op(i64 9, i64 1, %Qubit* %qb__inline__4) + %qb__3 = load %Qubit*, %Qubit** %7 + call void @__quantum__qis__single_qubit_op(i64 9, i64 1, %Qubit* %qb__3) %8 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 1) %9 = bitcast i8* %8 to %Qubit** %10 = load %Qubit*, %Qubit** %9 @@ -748,41 +877,41 @@ entry: call void @Microsoft__Quantum__Intrinsic__CNOT__body(%Qubit* %10, %Qubit* %13) %14 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 0) %15 = bitcast i8* %14 to %Qubit** - %qb__inline__5 = load %Qubit*, %Qubit** %15 - call void @__quantum__trc__single_qubit_op(i64 19, i64 1, %Qubit* %qb__inline__5) + %qb__4 = load %Qubit*, %Qubit** %15 + call void @__quantum__qis__single_qubit_op(i64 19, i64 1, %Qubit* %qb__4) %16 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 1) %17 = bitcast i8* %16 to %Qubit** - %qb__inline__6 = load %Qubit*, %Qubit** %17 - call void @__quantum__trc__single_qubit_op(i64 21, i64 1, %Qubit* %qb__inline__6) + %qb__5 = load %Qubit*, %Qubit** %17 + call void @__quantum__qis__single_qubit_op(i64 21, i64 1, %Qubit* %qb__5) %18 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 2) %19 = bitcast i8* %18 to %Qubit** - %qb__inline__7 = load %Qubit*, %Qubit** %19 - call void @__quantum__trc__single_qubit_op(i64 23, i64 1, %Qubit* %qb__inline__7) + %qb__6 = load %Qubit*, %Qubit** %19 + call void @__quantum__qis__single_qubit_op(i64 23, i64 1, %Qubit* %qb__6) %20 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 1) %21 = bitcast i8* %20 to %Qubit** - %qb__inline__8 = load %Qubit*, %Qubit** %21 - call void @__quantum__trc__single_qubit_op(i64 15, i64 1, %Qubit* %qb__inline__8) + %qb__7 = load %Qubit*, %Qubit** %21 + call void @__quantum__qis__single_qubit_op(i64 15, i64 1, %Qubit* %qb__7) %22 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 2) %23 = bitcast i8* %22 to %Qubit** - %qb__inline__10 = load %Qubit*, %Qubit** %23 - call void @__quantum__trc__single_qubit_op(i64 11, i64 1, %Qubit* %qb__inline__10) - call void @__quantum__trc__inject_global_barrier(i64 42, i64 1) + %qb__9 = load %Qubit*, %Qubit** %23 + call void @__quantum__qis__single_qubit_op(i64 11, i64 1, %Qubit* %qb__9) + call void @__quantum__qis__inject_global_barrier(i64 42, i64 1) %24 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 0) %25 = bitcast i8* %24 to %Qubit** - %qb__inline__12 = load %Qubit*, %Qubit** %25 - call void @__quantum__trc__single_qubit_op(i64 0, i64 1, %Qubit* %qb__inline__12) + %qb__11 = load %Qubit*, %Qubit** %25 + call void @__quantum__qis__single_qubit_op(i64 0, i64 1, %Qubit* %qb__11) %26 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 0) %27 = bitcast i8* %26 to %Qubit** - %qb__inline__13 = load %Qubit*, %Qubit** %27 - call void @__quantum__trc__single_qubit_op(i64 3, i64 1, %Qubit* %qb__inline__13) + %qb__12 = load %Qubit*, %Qubit** %27 + call void @__quantum__qis__single_qubit_op(i64 3, i64 1, %Qubit* %qb__12) %28 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 1) %29 = bitcast i8* %28 to %Qubit** - %qb__inline__14 = load %Qubit*, %Qubit** %29 - call void @__quantum__trc__single_qubit_op(i64 6, i64 1, %Qubit* %qb__inline__14) + %qb__13 = load %Qubit*, %Qubit** %29 + call void @__quantum__qis__single_qubit_op(i64 6, i64 1, %Qubit* %qb__13) %30 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 1) %31 = bitcast i8* %30 to %Qubit** - %qb__inline__15 = load %Qubit*, %Qubit** %31 - call void @__quantum__trc__single_qubit_op(i64 9, i64 1, %Qubit* %qb__inline__15) + %qb__14 = load %Qubit*, %Qubit** %31 + call void @__quantum__qis__single_qubit_op(i64 9, i64 1, %Qubit* %qb__14) %32 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 1) %33 = bitcast i8* %32 to %Qubit** %34 = load %Qubit*, %Qubit** %33 @@ -792,175 +921,175 @@ entry: call void @Microsoft__Quantum__Intrinsic__CNOT__adj(%Qubit* %34, %Qubit* %37) %38 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 0) %39 = bitcast i8* %38 to %Qubit** - %qb__inline__16 = load %Qubit*, %Qubit** %39 - call void @__quantum__trc__single_qubit_op(i64 19, i64 1, %Qubit* %qb__inline__16) + %qb__15 = load %Qubit*, %Qubit** %39 + call void @__quantum__qis__single_qubit_op(i64 19, i64 1, %Qubit* %qb__15) %40 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 1) %41 = bitcast i8* %40 to %Qubit** - %qb__inline__17 = load %Qubit*, %Qubit** %41 - call void @__quantum__trc__single_qubit_op(i64 21, i64 1, %Qubit* %qb__inline__17) + %qb__16 = load %Qubit*, %Qubit** %41 + call void @__quantum__qis__single_qubit_op(i64 21, i64 1, %Qubit* %qb__16) %42 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 2) %43 = bitcast i8* %42 to %Qubit** - %qb__inline__18 = load %Qubit*, %Qubit** %43 - call void @__quantum__trc__single_qubit_op(i64 24, i64 1, %Qubit* %qb__inline__18) + %qb__17 = load %Qubit*, %Qubit** %43 + call void @__quantum__qis__single_qubit_op(i64 24, i64 1, %Qubit* %qb__17) %44 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 1) %45 = bitcast i8* %44 to %Qubit** - %qb__inline__19 = load %Qubit*, %Qubit** %45 - call void @__quantum__trc__single_qubit_op(i64 15, i64 1, %Qubit* %qb__inline__19) + %qb__18 = load %Qubit*, %Qubit** %45 + call void @__quantum__qis__single_qubit_op(i64 15, i64 1, %Qubit* %qb__18) %46 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 2) %47 = bitcast i8* %46 to %Qubit** - %qb__inline__21 = load %Qubit*, %Qubit** %47 - call void @__quantum__trc__single_qubit_op(i64 11, i64 1, %Qubit* %qb__inline__21) + %qb__20 = load %Qubit*, %Qubit** %47 + call void @__quantum__qis__single_qubit_op(i64 11, i64 1, %Qubit* %qb__20) %c = call %Qubit* @__quantum__rt__qubit_allocate() - %ctls__inline__23 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) - %48 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ctls__inline__23, i64 0) + %ctls = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %48 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ctls, i64 0) %49 = bitcast i8* %48 to %Qubit** store %Qubit* %c, %Qubit** %49 - call void @__quantum__rt__array_update_alias_count(%Array* %ctls__inline__23, i64 1) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 1) %50 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 0) %51 = bitcast i8* %50 to %Qubit** - %qb__inline__23 = load %Qubit*, %Qubit** %51 + %qb__22 = load %Qubit*, %Qubit** %51 br i1 true, label %then0__1, label %else__1 then0__1: ; preds = %entry - call void @__quantum__trc__single_qubit_op_ctl(i64 1, i64 1, %Array* %ctls__inline__23, %Qubit* %qb__inline__23) + call void @__quantum__qis__single_qubit_op_ctl(i64 1, i64 1, %Array* %ctls, %Qubit* %qb__22) br label %continue__1 else__1: ; preds = %entry - call void @__quantum__trc__single_qubit_op_ctl(i64 2, i64 1, %Array* %ctls__inline__23, %Qubit* %qb__inline__23) + call void @__quantum__qis__single_qubit_op_ctl(i64 2, i64 1, %Array* %ctls, %Qubit* %qb__22) br label %continue__1 continue__1: ; preds = %else__1, %then0__1 - call void @__quantum__rt__array_update_alias_count(%Array* %ctls__inline__23, i64 -1) - call void @__quantum__rt__array_update_reference_count(%Array* %ctls__inline__23, i64 -1) - %ctls__inline__24 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) - %52 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ctls__inline__24, i64 0) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %ctls, i64 -1) + %ctls__1 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %52 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ctls__1, i64 0) %53 = bitcast i8* %52 to %Qubit** store %Qubit* %c, %Qubit** %53 - call void @__quantum__rt__array_update_alias_count(%Array* %ctls__inline__24, i64 1) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls__1, i64 1) %54 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 0) %55 = bitcast i8* %54 to %Qubit** - %qb__inline__24 = load %Qubit*, %Qubit** %55 + %qb__23 = load %Qubit*, %Qubit** %55 br i1 true, label %then0__2, label %else__2 then0__2: ; preds = %continue__1 - call void @__quantum__trc__single_qubit_op_ctl(i64 4, i64 1, %Array* %ctls__inline__24, %Qubit* %qb__inline__24) + call void @__quantum__qis__single_qubit_op_ctl(i64 4, i64 1, %Array* %ctls__1, %Qubit* %qb__23) br label %continue__2 else__2: ; preds = %continue__1 - call void @__quantum__trc__single_qubit_op_ctl(i64 5, i64 1, %Array* %ctls__inline__24, %Qubit* %qb__inline__24) + call void @__quantum__qis__single_qubit_op_ctl(i64 5, i64 1, %Array* %ctls__1, %Qubit* %qb__23) br label %continue__2 continue__2: ; preds = %else__2, %then0__2 - call void @__quantum__rt__array_update_alias_count(%Array* %ctls__inline__24, i64 -1) - call void @__quantum__rt__array_update_reference_count(%Array* %ctls__inline__24, i64 -1) - %ctls__inline__25 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) - %56 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ctls__inline__25, i64 0) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls__1, i64 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %ctls__1, i64 -1) + %ctls__2 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %56 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ctls__2, i64 0) %57 = bitcast i8* %56 to %Qubit** store %Qubit* %c, %Qubit** %57 - call void @__quantum__rt__array_update_alias_count(%Array* %ctls__inline__25, i64 1) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls__2, i64 1) %58 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 1) %59 = bitcast i8* %58 to %Qubit** - %qb__inline__25 = load %Qubit*, %Qubit** %59 + %qb__24 = load %Qubit*, %Qubit** %59 br i1 true, label %then0__3, label %else__3 then0__3: ; preds = %continue__2 - call void @__quantum__trc__single_qubit_op_ctl(i64 7, i64 1, %Array* %ctls__inline__25, %Qubit* %qb__inline__25) + call void @__quantum__qis__single_qubit_op_ctl(i64 7, i64 1, %Array* %ctls__2, %Qubit* %qb__24) br label %continue__3 else__3: ; preds = %continue__2 - call void @__quantum__trc__single_qubit_op_ctl(i64 8, i64 1, %Array* %ctls__inline__25, %Qubit* %qb__inline__25) + call void @__quantum__qis__single_qubit_op_ctl(i64 8, i64 1, %Array* %ctls__2, %Qubit* %qb__24) br label %continue__3 continue__3: ; preds = %else__3, %then0__3 - call void @__quantum__rt__array_update_alias_count(%Array* %ctls__inline__25, i64 -1) - call void @__quantum__rt__array_update_reference_count(%Array* %ctls__inline__25, i64 -1) - %ctls__inline__26 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) - %60 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ctls__inline__26, i64 0) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls__2, i64 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %ctls__2, i64 -1) + %ctls__3 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %60 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ctls__3, i64 0) %61 = bitcast i8* %60 to %Qubit** store %Qubit* %c, %Qubit** %61 - call void @__quantum__rt__array_update_alias_count(%Array* %ctls__inline__26, i64 1) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls__3, i64 1) %62 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 1) %63 = bitcast i8* %62 to %Qubit** - %qb__inline__26 = load %Qubit*, %Qubit** %63 - call void @__quantum__trc__single_qubit_op_ctl(i64 10, i64 1, %Array* %ctls__inline__26, %Qubit* %qb__inline__26) - call void @__quantum__rt__array_update_alias_count(%Array* %ctls__inline__26, i64 -1) - call void @__quantum__rt__array_update_reference_count(%Array* %ctls__inline__26, i64 -1) - %ctls__inline__27 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) - %64 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ctls__inline__27, i64 0) + %qb__25 = load %Qubit*, %Qubit** %63 + call void @__quantum__qis__single_qubit_op_ctl(i64 10, i64 1, %Array* %ctls__3, %Qubit* %qb__25) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls__3, i64 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %ctls__3, i64 -1) + %ctls__4 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %64 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ctls__4, i64 0) %65 = bitcast i8* %64 to %Qubit** store %Qubit* %c, %Qubit** %65 - call void @__quantum__rt__array_update_alias_count(%Array* %ctls__inline__27, i64 1) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls__4, i64 1) %66 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 0) %67 = bitcast i8* %66 to %Qubit** - %qb__inline__27 = load %Qubit*, %Qubit** %67 - call void @__quantum__trc__single_qubit_op_ctl(i64 20, i64 1, %Array* %ctls__inline__27, %Qubit* %qb__inline__27) - call void @__quantum__rt__array_update_alias_count(%Array* %ctls__inline__27, i64 -1) - call void @__quantum__rt__array_update_reference_count(%Array* %ctls__inline__27, i64 -1) - %ctls__inline__28 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) - %68 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ctls__inline__28, i64 0) + %qb__26 = load %Qubit*, %Qubit** %67 + call void @__quantum__qis__single_qubit_op_ctl(i64 20, i64 1, %Array* %ctls__4, %Qubit* %qb__26) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls__4, i64 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %ctls__4, i64 -1) + %ctls__5 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %68 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ctls__5, i64 0) %69 = bitcast i8* %68 to %Qubit** store %Qubit* %c, %Qubit** %69 - call void @__quantum__rt__array_update_alias_count(%Array* %ctls__inline__28, i64 1) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls__5, i64 1) %70 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 1) %71 = bitcast i8* %70 to %Qubit** - %qb__inline__28 = load %Qubit*, %Qubit** %71 - call void @__quantum__trc__single_qubit_op_ctl(i64 22, i64 1, %Array* %ctls__inline__28, %Qubit* %qb__inline__28) - call void @__quantum__rt__array_update_alias_count(%Array* %ctls__inline__28, i64 -1) - call void @__quantum__rt__array_update_reference_count(%Array* %ctls__inline__28, i64 -1) - %ctls__inline__29 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) - %72 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ctls__inline__29, i64 0) + %qb__27 = load %Qubit*, %Qubit** %71 + call void @__quantum__qis__single_qubit_op_ctl(i64 22, i64 1, %Array* %ctls__5, %Qubit* %qb__27) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls__5, i64 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %ctls__5, i64 -1) + %ctls__6 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %72 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ctls__6, i64 0) %73 = bitcast i8* %72 to %Qubit** store %Qubit* %c, %Qubit** %73 - call void @__quantum__rt__array_update_alias_count(%Array* %ctls__inline__29, i64 1) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls__6, i64 1) %74 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 2) %75 = bitcast i8* %74 to %Qubit** - %qb__inline__29 = load %Qubit*, %Qubit** %75 - call void @__quantum__trc__single_qubit_op_ctl(i64 25, i64 1, %Array* %ctls__inline__29, %Qubit* %qb__inline__29) - call void @__quantum__rt__array_update_alias_count(%Array* %ctls__inline__29, i64 -1) - call void @__quantum__rt__array_update_reference_count(%Array* %ctls__inline__29, i64 -1) - %ctls__inline__30 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) - %76 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ctls__inline__30, i64 0) + %qb__28 = load %Qubit*, %Qubit** %75 + call void @__quantum__qis__single_qubit_op_ctl(i64 25, i64 1, %Array* %ctls__6, %Qubit* %qb__28) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls__6, i64 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %ctls__6, i64 -1) + %ctls__7 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %76 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ctls__7, i64 0) %77 = bitcast i8* %76 to %Qubit** store %Qubit* %c, %Qubit** %77 - call void @__quantum__rt__array_update_alias_count(%Array* %ctls__inline__30, i64 1) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls__7, i64 1) %78 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 1) %79 = bitcast i8* %78 to %Qubit** - %qb__inline__30 = load %Qubit*, %Qubit** %79 - call void @__quantum__rt__array_update_alias_count(%Array* %ctls__inline__30, i64 1) - call void @__quantum__trc__single_qubit_op_ctl(i64 16, i64 1, %Array* %ctls__inline__30, %Qubit* %qb__inline__30) - call void @__quantum__rt__array_update_alias_count(%Array* %ctls__inline__30, i64 -1) - call void @__quantum__rt__array_update_alias_count(%Array* %ctls__inline__30, i64 -1) - call void @__quantum__rt__array_update_reference_count(%Array* %ctls__inline__30, i64 -1) - %ctls__inline__32 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) - %80 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ctls__inline__32, i64 0) + %qb__29 = load %Qubit*, %Qubit** %79 + call void @__quantum__rt__array_update_alias_count(%Array* %ctls__7, i64 1) + call void @__quantum__qis__single_qubit_op_ctl(i64 16, i64 1, %Array* %ctls__7, %Qubit* %qb__29) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls__7, i64 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls__7, i64 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %ctls__7, i64 -1) + %ctls__9 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %80 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ctls__9, i64 0) %81 = bitcast i8* %80 to %Qubit** store %Qubit* %c, %Qubit** %81 - call void @__quantum__rt__array_update_alias_count(%Array* %ctls__inline__32, i64 1) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls__9, i64 1) %82 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 2) %83 = bitcast i8* %82 to %Qubit** - %qb__inline__32 = load %Qubit*, %Qubit** %83 - call void @__quantum__rt__array_update_alias_count(%Array* %ctls__inline__32, i64 1) - call void @__quantum__trc__single_qubit_op_ctl(i64 12, i64 1, %Array* %ctls__inline__32, %Qubit* %qb__inline__32) - call void @__quantum__rt__array_update_alias_count(%Array* %ctls__inline__32, i64 -1) - call void @__quantum__rt__array_update_alias_count(%Array* %ctls__inline__32, i64 -1) - call void @__quantum__rt__array_update_reference_count(%Array* %ctls__inline__32, i64 -1) + %qb__31 = load %Qubit*, %Qubit** %83 + call void @__quantum__rt__array_update_alias_count(%Array* %ctls__9, i64 1) + call void @__quantum__qis__single_qubit_op_ctl(i64 12, i64 1, %Array* %ctls__9, %Qubit* %qb__31) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls__9, i64 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls__9, i64 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %ctls__9, i64 -1) call void @__quantum__rt__qubit_release(%Qubit* %c) %cc = call %Array* @__quantum__rt__qubit_allocate_array(i64 2) call void @__quantum__rt__array_update_alias_count(%Array* %cc, i64 1) call void @__quantum__rt__array_update_alias_count(%Array* %cc, i64 1) %84 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 0) %85 = bitcast i8* %84 to %Qubit** - %qb__inline__34 = load %Qubit*, %Qubit** %85 + %qb__33 = load %Qubit*, %Qubit** %85 %86 = call i64 @__quantum__rt__array_get_size_1d(%Array* %cc) %87 = icmp eq i64 %86, 1 br i1 %87, label %then0__4, label %else__4 then0__4: ; preds = %continue__3 - call void @__quantum__trc__single_qubit_op_ctl(i64 1, i64 1, %Array* %cc, %Qubit* %qb__inline__34) + call void @__quantum__qis__single_qubit_op_ctl(i64 1, i64 1, %Array* %cc, %Qubit* %qb__33) br label %continue__4 else__4: ; preds = %continue__3 - call void @__quantum__trc__single_qubit_op_ctl(i64 2, i64 1, %Array* %cc, %Qubit* %qb__inline__34) + call void @__quantum__qis__single_qubit_op_ctl(i64 2, i64 1, %Array* %cc, %Qubit* %qb__33) br label %continue__4 continue__4: ; preds = %else__4, %then0__4 @@ -968,16 +1097,16 @@ continue__4: ; preds = %else__4, %then0__4 call void @__quantum__rt__array_update_alias_count(%Array* %cc, i64 1) %88 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 0) %89 = bitcast i8* %88 to %Qubit** - %qb__inline__35 = load %Qubit*, %Qubit** %89 + %qb__34 = load %Qubit*, %Qubit** %89 %90 = icmp eq i64 %86, 1 br i1 %90, label %then0__5, label %else__5 then0__5: ; preds = %continue__4 - call void @__quantum__trc__single_qubit_op_ctl(i64 4, i64 1, %Array* %cc, %Qubit* %qb__inline__35) + call void @__quantum__qis__single_qubit_op_ctl(i64 4, i64 1, %Array* %cc, %Qubit* %qb__34) br label %continue__5 else__5: ; preds = %continue__4 - call void @__quantum__trc__single_qubit_op_ctl(i64 5, i64 1, %Array* %cc, %Qubit* %qb__inline__35) + call void @__quantum__qis__single_qubit_op_ctl(i64 5, i64 1, %Array* %cc, %Qubit* %qb__34) br label %continue__5 continue__5: ; preds = %else__5, %then0__5 @@ -985,16 +1114,16 @@ continue__5: ; preds = %else__5, %then0__5 call void @__quantum__rt__array_update_alias_count(%Array* %cc, i64 1) %91 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 1) %92 = bitcast i8* %91 to %Qubit** - %qb__inline__36 = load %Qubit*, %Qubit** %92 + %qb__35 = load %Qubit*, %Qubit** %92 %93 = icmp eq i64 %86, 1 br i1 %93, label %then0__6, label %else__6 then0__6: ; preds = %continue__5 - call void @__quantum__trc__single_qubit_op_ctl(i64 7, i64 1, %Array* %cc, %Qubit* %qb__inline__36) + call void @__quantum__qis__single_qubit_op_ctl(i64 7, i64 1, %Array* %cc, %Qubit* %qb__35) br label %continue__6 else__6: ; preds = %continue__5 - call void @__quantum__trc__single_qubit_op_ctl(i64 8, i64 1, %Array* %cc, %Qubit* %qb__inline__36) + call void @__quantum__qis__single_qubit_op_ctl(i64 8, i64 1, %Array* %cc, %Qubit* %qb__35) br label %continue__6 continue__6: ; preds = %else__6, %then0__6 @@ -1002,272 +1131,310 @@ continue__6: ; preds = %else__6, %then0__6 call void @__quantum__rt__array_update_alias_count(%Array* %cc, i64 1) %94 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 1) %95 = bitcast i8* %94 to %Qubit** - %qb__inline__37 = load %Qubit*, %Qubit** %95 - call void @__quantum__trc__single_qubit_op_ctl(i64 10, i64 1, %Array* %cc, %Qubit* %qb__inline__37) + %qb__36 = load %Qubit*, %Qubit** %95 + call void @__quantum__qis__single_qubit_op_ctl(i64 10, i64 1, %Array* %cc, %Qubit* %qb__36) call void @__quantum__rt__array_update_alias_count(%Array* %cc, i64 -1) call void @__quantum__rt__array_update_alias_count(%Array* %cc, i64 1) %96 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 0) %97 = bitcast i8* %96 to %Qubit** - %qb__inline__38 = load %Qubit*, %Qubit** %97 - call void @__quantum__trc__single_qubit_op_ctl(i64 20, i64 1, %Array* %cc, %Qubit* %qb__inline__38) + %qb__37 = load %Qubit*, %Qubit** %97 + call void @__quantum__qis__single_qubit_op_ctl(i64 20, i64 1, %Array* %cc, %Qubit* %qb__37) call void @__quantum__rt__array_update_alias_count(%Array* %cc, i64 -1) call void @__quantum__rt__array_update_alias_count(%Array* %cc, i64 1) %98 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 1) %99 = bitcast i8* %98 to %Qubit** - %qb__inline__39 = load %Qubit*, %Qubit** %99 - call void @__quantum__trc__single_qubit_op_ctl(i64 22, i64 1, %Array* %cc, %Qubit* %qb__inline__39) + %qb__38 = load %Qubit*, %Qubit** %99 + call void @__quantum__qis__single_qubit_op_ctl(i64 22, i64 1, %Array* %cc, %Qubit* %qb__38) call void @__quantum__rt__array_update_alias_count(%Array* %cc, i64 -1) call void @__quantum__rt__array_update_alias_count(%Array* %cc, i64 1) %100 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 2) %101 = bitcast i8* %100 to %Qubit** - %qb__inline__40 = load %Qubit*, %Qubit** %101 - call void @__quantum__trc__single_qubit_op_ctl(i64 25, i64 1, %Array* %cc, %Qubit* %qb__inline__40) + %qb__39 = load %Qubit*, %Qubit** %101 + call void @__quantum__qis__single_qubit_op_ctl(i64 25, i64 1, %Array* %cc, %Qubit* %qb__39) call void @__quantum__rt__array_update_alias_count(%Array* %cc, i64 -1) call void @__quantum__rt__array_update_alias_count(%Array* %cc, i64 1) %102 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 1) %103 = bitcast i8* %102 to %Qubit** - %qb__inline__41 = load %Qubit*, %Qubit** %103 + %qb__40 = load %Qubit*, %Qubit** %103 call void @__quantum__rt__array_update_alias_count(%Array* %cc, i64 1) - call void @__quantum__trc__single_qubit_op_ctl(i64 16, i64 1, %Array* %cc, %Qubit* %qb__inline__41) + call void @__quantum__qis__single_qubit_op_ctl(i64 16, i64 1, %Array* %cc, %Qubit* %qb__40) call void @__quantum__rt__array_update_alias_count(%Array* %cc, i64 -1) call void @__quantum__rt__array_update_alias_count(%Array* %cc, i64 -1) call void @__quantum__rt__array_update_alias_count(%Array* %cc, i64 1) %104 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 2) %105 = bitcast i8* %104 to %Qubit** - %qb__inline__43 = load %Qubit*, %Qubit** %105 + %qb__42 = load %Qubit*, %Qubit** %105 call void @__quantum__rt__array_update_alias_count(%Array* %cc, i64 1) - call void @__quantum__trc__single_qubit_op_ctl(i64 12, i64 1, %Array* %cc, %Qubit* %qb__inline__43) + call void @__quantum__qis__single_qubit_op_ctl(i64 12, i64 1, %Array* %cc, %Qubit* %qb__42) call void @__quantum__rt__array_update_alias_count(%Array* %cc, i64 -1) call void @__quantum__rt__array_update_alias_count(%Array* %cc, i64 -1) call void @__quantum__rt__qubit_release_array(%Array* %cc) call void @__quantum__rt__array_update_alias_count(%Array* %cc, i64 -1) call void @__quantum__rt__array_update_reference_count(%Array* %cc, i64 -1) - %106 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 0) - %107 = bitcast i8* %106 to %Qubit** - %qb__inline__45 = load %Qubit*, %Qubit** %107 - %r0 = call %Result* @__quantum__trc__single_qubit_measure(i64 100, i64 1, %Qubit* %qb__inline__45) + call void @__quantum__rt__qubit_release_array(%Array* %qs) + call void @__quantum__rt__array_update_alias_count(%Array* %qs, i64 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qs, i64 -1) + ret void +} + +declare %Qubit* @__quantum__rt__qubit_allocate() + +declare %Array* @__quantum__rt__qubit_allocate_array(i64) + +declare void @__quantum__qis__inject_global_barrier(i64, i64) + +declare void @__quantum__rt__qubit_release(%Qubit*) + +declare void @__quantum__rt__qubit_release_array(%Array*) + +define void @Microsoft__Quantum__Testing__Tracer__TestMeasurements__body(i1 %compare) { +entry: + %qs = call %Array* @__quantum__rt__qubit_allocate_array(i64 3) + call void @__quantum__rt__array_update_alias_count(%Array* %qs, i64 1) + %0 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 0) + %1 = bitcast i8* %0 to %Qubit** + %qb = load %Qubit*, %Qubit** %1 + %r0 = call %Result* @__quantum__qis__single_qubit_measure(i64 100, i64 1, %Qubit* %qb) %qs12 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 2) - %108 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs12, i64 0) - %109 = bitcast i8* %108 to %Qubit** - %110 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs12, i64 1) - %111 = bitcast i8* %110 to %Qubit** - %112 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 1) - %113 = bitcast i8* %112 to %Qubit** - %114 = load %Qubit*, %Qubit** %113 - %115 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 2) - %116 = bitcast i8* %115 to %Qubit** - %117 = load %Qubit*, %Qubit** %116 - store %Qubit* %114, %Qubit** %109 - store %Qubit* %117, %Qubit** %111 + %2 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs12, i64 0) + %3 = bitcast i8* %2 to %Qubit** + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs12, i64 1) + %5 = bitcast i8* %4 to %Qubit** + %6 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 1) + %7 = bitcast i8* %6 to %Qubit** + %8 = load %Qubit*, %Qubit** %7 + %9 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 2) + %10 = bitcast i8* %9 to %Qubit** + %11 = load %Qubit*, %Qubit** %10 + store %Qubit* %8, %Qubit** %3 + store %Qubit* %11, %Qubit** %5 call void @__quantum__rt__array_update_alias_count(%Array* %qs12, i64 1) - %paulis__inline__47 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 2) - %118 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis__inline__47, i64 0) - %119 = bitcast i8* %118 to i2* - %120 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis__inline__47, i64 1) - %121 = bitcast i8* %120 to i2* - %122 = load i2, i2* @PauliY - %123 = load i2, i2* @PauliX - store i2 %122, i2* %119 - store i2 %123, i2* %121 - call void @__quantum__rt__array_update_alias_count(%Array* %paulis__inline__47, i64 1) + %paulis = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 2) + %12 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 0) + %13 = bitcast i8* %12 to i2* + %14 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 1) + %15 = bitcast i8* %14 to i2* + %16 = load i2, i2* @PauliY + %17 = load i2, i2* @PauliX + store i2 %16, i2* %13 + store i2 %17, i2* %15 + call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i64 1) call void @__quantum__rt__array_update_alias_count(%Array* %qs12, i64 1) - %124 = load %Result*, %Result** @ResultOne - %res__inline__47 = alloca %Result* - store %Result* %124, %Result** %res__inline__47 - call void @__quantum__rt__result_update_reference_count(%Result* %124, i64 1) - %haveY__inline__47 = alloca i1 - store i1 false, i1* %haveY__inline__47 + %18 = load %Result*, %Result** @ResultOne + %res = alloca %Result* + store %Result* %18, %Result** %res + call void @__quantum__rt__result_update_reference_count(%Result* %18, i64 1) + %haveY = alloca i1 + store i1 false, i1* %haveY br label %header__1 -header__1: ; preds = %exiting__1, %continue__6 - %i__inline__47 = phi i64 [ 0, %continue__6 ], [ %137, %exiting__1 ] - %125 = icmp sle i64 %i__inline__47, 1 - br i1 %125, label %body__1, label %exit__1 +header__1: ; preds = %exiting__1, %entry + %i = phi i64 [ 0, %entry ], [ %31, %exiting__1 ] + %19 = icmp sle i64 %i, 1 + br i1 %19, label %body__1, label %exit__1 body__1: ; preds = %header__1 - %126 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis__inline__47, i64 %i__inline__47) - %127 = bitcast i8* %126 to i2* - %128 = load i2, i2* %127 - %129 = load i2, i2* @PauliY - %130 = icmp eq i2 %128, %129 - %131 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis__inline__47, i64 %i__inline__47) - %132 = bitcast i8* %131 to i2* - %133 = load i2, i2* %132 - %134 = load i2, i2* @PauliI - %135 = icmp eq i2 %133, %134 - %136 = or i1 %130, %135 - br i1 %136, label %then0__7, label %continue__7 - -then0__7: ; preds = %body__1 - store i1 true, i1* %haveY__inline__47 - br label %continue__7 - -continue__7: ; preds = %then0__7, %body__1 + %20 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 %i) + %21 = bitcast i8* %20 to i2* + %22 = load i2, i2* %21 + %23 = load i2, i2* @PauliY + %24 = icmp eq i2 %22, %23 + %25 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 %i) + %26 = bitcast i8* %25 to i2* + %27 = load i2, i2* %26 + %28 = load i2, i2* @PauliI + %29 = icmp eq i2 %27, %28 + %30 = or i1 %24, %29 + br i1 %30, label %then0__1, label %continue__1 + +then0__1: ; preds = %body__1 + store i1 true, i1* %haveY + br label %continue__1 + +continue__1: ; preds = %then0__1, %body__1 br label %exiting__1 -exiting__1: ; preds = %continue__7 - %137 = add i64 %i__inline__47, 1 +exiting__1: ; preds = %continue__1 + %31 = add i64 %i, 1 br label %header__1 exit__1: ; preds = %header__1 - %138 = load i1, i1* %haveY__inline__47 - br i1 %138, label %then0__8, label %test1__1 + %32 = load i1, i1* %haveY + br i1 %32, label %then0__2, label %test1__1 -then0__8: ; preds = %exit__1 - %139 = call %Result* @__quantum__trc__multi_qubit_measure(i64 106, i64 1, %Array* %qs12) - call void @__quantum__rt__result_update_reference_count(%Result* %139, i64 1) - store %Result* %139, %Result** %res__inline__47 - call void @__quantum__rt__result_update_reference_count(%Result* %139, i64 -1) - call void @__quantum__rt__result_update_reference_count(%Result* %124, i64 -1) - br label %continue__8 +then0__2: ; preds = %exit__1 + %33 = call %Result* @__quantum__qis__joint_measure(i64 106, i64 1, %Array* %qs12) + call void @__quantum__rt__result_update_reference_count(%Result* %33, i64 1) + store %Result* %33, %Result** %res + call void @__quantum__rt__result_update_reference_count(%Result* %33, i64 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %18, i64 -1) + br label %continue__2 test1__1: ; preds = %exit__1 br i1 false, label %then1__1, label %test2__1 then1__1: ; preds = %test1__1 - %140 = call %Result* @__quantum__trc__multi_qubit_measure(i64 107, i64 1, %Array* %qs12) - call void @__quantum__rt__result_update_reference_count(%Result* %140, i64 1) - %141 = load %Result*, %Result** %res__inline__47 - store %Result* %140, %Result** %res__inline__47 - call void @__quantum__rt__result_update_reference_count(%Result* %140, i64 -1) - call void @__quantum__rt__result_update_reference_count(%Result* %141, i64 -1) - br label %continue__8 + %34 = call %Result* @__quantum__qis__joint_measure(i64 107, i64 1, %Array* %qs12) + call void @__quantum__rt__result_update_reference_count(%Result* %34, i64 1) + %35 = load %Result*, %Result** %res + store %Result* %34, %Result** %res + call void @__quantum__rt__result_update_reference_count(%Result* %34, i64 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %35, i64 -1) + br label %continue__2 test2__1: ; preds = %test1__1 br i1 false, label %then2__1, label %test3__1 then2__1: ; preds = %test2__1 - %142 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis__inline__47, i64 0) - %143 = bitcast i8* %142 to i2* - %144 = load i2, i2* %143 - %145 = load i2, i2* @PauliX - %146 = icmp eq i2 %144, %145 - br i1 %146, label %then0__9, label %else__7 - -then0__9: ; preds = %then2__1 - %147 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs12, i64 0) - %148 = bitcast i8* %147 to %Qubit** - %qb__inline__48 = load %Qubit*, %Qubit** %148 - %149 = call %Result* @__quantum__trc__single_qubit_measure(i64 101, i64 1, %Qubit* %qb__inline__48) - call void @__quantum__rt__result_update_reference_count(%Result* %149, i64 1) - %150 = load %Result*, %Result** %res__inline__47 - store %Result* %149, %Result** %res__inline__47 - call void @__quantum__rt__result_update_reference_count(%Result* %149, i64 -1) - call void @__quantum__rt__result_update_reference_count(%Result* %150, i64 -1) - br label %continue__9 - -else__7: ; preds = %then2__1 - %151 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs12, i64 0) - %152 = bitcast i8* %151 to %Qubit** - %qb__inline__49 = load %Qubit*, %Qubit** %152 - %153 = call %Result* @__quantum__trc__single_qubit_measure(i64 100, i64 1, %Qubit* %qb__inline__49) - call void @__quantum__rt__result_update_reference_count(%Result* %153, i64 1) - %154 = load %Result*, %Result** %res__inline__47 - store %Result* %153, %Result** %res__inline__47 - call void @__quantum__rt__result_update_reference_count(%Result* %153, i64 -1) - call void @__quantum__rt__result_update_reference_count(%Result* %154, i64 -1) - br label %continue__9 - -continue__9: ; preds = %else__7, %then0__9 - br label %continue__8 + %36 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 0) + %37 = bitcast i8* %36 to i2* + %38 = load i2, i2* %37 + %39 = load i2, i2* @PauliX + %40 = icmp eq i2 %38, %39 + br i1 %40, label %then0__3, label %else__1 + +then0__3: ; preds = %then2__1 + %41 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs12, i64 0) + %42 = bitcast i8* %41 to %Qubit** + %qb__2 = load %Qubit*, %Qubit** %42 + %43 = call %Result* @__quantum__qis__single_qubit_measure(i64 101, i64 1, %Qubit* %qb__2) + call void @__quantum__rt__result_update_reference_count(%Result* %43, i64 1) + %44 = load %Result*, %Result** %res + store %Result* %43, %Result** %res + call void @__quantum__rt__result_update_reference_count(%Result* %43, i64 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %44, i64 -1) + br label %continue__3 + +else__1: ; preds = %then2__1 + %45 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs12, i64 0) + %46 = bitcast i8* %45 to %Qubit** + %qb__3 = load %Qubit*, %Qubit** %46 + %47 = call %Result* @__quantum__qis__single_qubit_measure(i64 100, i64 1, %Qubit* %qb__3) + call void @__quantum__rt__result_update_reference_count(%Result* %47, i64 1) + %48 = load %Result*, %Result** %res + store %Result* %47, %Result** %res + call void @__quantum__rt__result_update_reference_count(%Result* %47, i64 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %48, i64 -1) + br label %continue__3 + +continue__3: ; preds = %else__1, %then0__3 + br label %continue__2 test3__1: ; preds = %test2__1 - %155 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis__inline__47, i64 0) - %156 = bitcast i8* %155 to i2* - %157 = load i2, i2* %156 - %158 = load i2, i2* @PauliX - %159 = icmp eq i2 %157, %158 - %160 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis__inline__47, i64 1) - %161 = bitcast i8* %160 to i2* - %162 = load i2, i2* %161 - %163 = load i2, i2* @PauliX - %164 = icmp eq i2 %162, %163 - %165 = and i1 %159, %164 - br i1 %165, label %then3__1, label %test4__1 + %49 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 0) + %50 = bitcast i8* %49 to i2* + %51 = load i2, i2* %50 + %52 = load i2, i2* @PauliX + %53 = icmp eq i2 %51, %52 + %54 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 1) + %55 = bitcast i8* %54 to i2* + %56 = load i2, i2* %55 + %57 = load i2, i2* @PauliX + %58 = icmp eq i2 %56, %57 + %59 = and i1 %53, %58 + br i1 %59, label %then3__1, label %test4__1 then3__1: ; preds = %test3__1 - %166 = call %Result* @__quantum__trc__multi_qubit_measure(i64 108, i64 1, %Array* %qs12) - call void @__quantum__rt__result_update_reference_count(%Result* %166, i64 1) - %167 = load %Result*, %Result** %res__inline__47 - store %Result* %166, %Result** %res__inline__47 - call void @__quantum__rt__result_update_reference_count(%Result* %166, i64 -1) - call void @__quantum__rt__result_update_reference_count(%Result* %167, i64 -1) - br label %continue__8 + %60 = call %Result* @__quantum__qis__joint_measure(i64 108, i64 1, %Array* %qs12) + call void @__quantum__rt__result_update_reference_count(%Result* %60, i64 1) + %61 = load %Result*, %Result** %res + store %Result* %60, %Result** %res + call void @__quantum__rt__result_update_reference_count(%Result* %60, i64 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %61, i64 -1) + br label %continue__2 test4__1: ; preds = %test3__1 - %168 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis__inline__47, i64 0) - %169 = bitcast i8* %168 to i2* - %170 = load i2, i2* %169 - %171 = load i2, i2* @PauliX - %172 = icmp eq i2 %170, %171 - %173 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis__inline__47, i64 1) - %174 = bitcast i8* %173 to i2* - %175 = load i2, i2* %174 - %176 = load i2, i2* @PauliZ - %177 = icmp eq i2 %175, %176 - %178 = and i1 %172, %177 - br i1 %178, label %then4__1, label %test5__1 + %62 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 0) + %63 = bitcast i8* %62 to i2* + %64 = load i2, i2* %63 + %65 = load i2, i2* @PauliX + %66 = icmp eq i2 %64, %65 + %67 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 1) + %68 = bitcast i8* %67 to i2* + %69 = load i2, i2* %68 + %70 = load i2, i2* @PauliZ + %71 = icmp eq i2 %69, %70 + %72 = and i1 %66, %71 + br i1 %72, label %then4__1, label %test5__1 then4__1: ; preds = %test4__1 - %179 = call %Result* @__quantum__trc__multi_qubit_measure(i64 109, i64 1, %Array* %qs12) - call void @__quantum__rt__result_update_reference_count(%Result* %179, i64 1) - %180 = load %Result*, %Result** %res__inline__47 - store %Result* %179, %Result** %res__inline__47 - call void @__quantum__rt__result_update_reference_count(%Result* %179, i64 -1) - call void @__quantum__rt__result_update_reference_count(%Result* %180, i64 -1) - br label %continue__8 + %73 = call %Result* @__quantum__qis__joint_measure(i64 109, i64 1, %Array* %qs12) + call void @__quantum__rt__result_update_reference_count(%Result* %73, i64 1) + %74 = load %Result*, %Result** %res + store %Result* %73, %Result** %res + call void @__quantum__rt__result_update_reference_count(%Result* %73, i64 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %74, i64 -1) + br label %continue__2 test5__1: ; preds = %test4__1 - %181 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis__inline__47, i64 0) - %182 = bitcast i8* %181 to i2* - %183 = load i2, i2* %182 - %184 = load i2, i2* @PauliZ - %185 = icmp eq i2 %183, %184 - %186 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis__inline__47, i64 1) - %187 = bitcast i8* %186 to i2* - %188 = load i2, i2* %187 - %189 = load i2, i2* @PauliX - %190 = icmp eq i2 %188, %189 - %191 = and i1 %185, %190 - br i1 %191, label %then5__1, label %test6__1 + %75 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 0) + %76 = bitcast i8* %75 to i2* + %77 = load i2, i2* %76 + %78 = load i2, i2* @PauliZ + %79 = icmp eq i2 %77, %78 + %80 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 1) + %81 = bitcast i8* %80 to i2* + %82 = load i2, i2* %81 + %83 = load i2, i2* @PauliX + %84 = icmp eq i2 %82, %83 + %85 = and i1 %79, %84 + br i1 %85, label %then5__1, label %test6__1 then5__1: ; preds = %test5__1 - %192 = call %Result* @__quantum__trc__multi_qubit_measure(i64 110, i64 1, %Array* %qs12) - call void @__quantum__rt__result_update_reference_count(%Result* %192, i64 1) - %193 = load %Result*, %Result** %res__inline__47 - store %Result* %192, %Result** %res__inline__47 - call void @__quantum__rt__result_update_reference_count(%Result* %192, i64 -1) - call void @__quantum__rt__result_update_reference_count(%Result* %193, i64 -1) - br label %continue__8 + %86 = call %Result* @__quantum__qis__joint_measure(i64 110, i64 1, %Array* %qs12) + call void @__quantum__rt__result_update_reference_count(%Result* %86, i64 1) + %87 = load %Result*, %Result** %res + store %Result* %86, %Result** %res + call void @__quantum__rt__result_update_reference_count(%Result* %86, i64 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %87, i64 -1) + br label %continue__2 test6__1: ; preds = %test5__1 - %194 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis__inline__47, i64 0) - %195 = bitcast i8* %194 to i2* - %196 = load i2, i2* %195 - %197 = load i2, i2* @PauliZ - %198 = icmp eq i2 %196, %197 - %199 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis__inline__47, i64 1) - %200 = bitcast i8* %199 to i2* - %201 = load i2, i2* %200 - %202 = load i2, i2* @PauliZ - %203 = icmp eq i2 %201, %202 - %204 = and i1 %198, %203 - br i1 %204, label %then6__1, label %continue__8 + %88 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 0) + %89 = bitcast i8* %88 to i2* + %90 = load i2, i2* %89 + %91 = load i2, i2* @PauliZ + %92 = icmp eq i2 %90, %91 + %93 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 1) + %94 = bitcast i8* %93 to i2* + %95 = load i2, i2* %94 + %96 = load i2, i2* @PauliZ + %97 = icmp eq i2 %95, %96 + %98 = and i1 %92, %97 + br i1 %98, label %then6__1, label %continue__2 then6__1: ; preds = %test6__1 - %205 = call %Result* @__quantum__trc__multi_qubit_measure(i64 111, i64 1, %Array* %qs12) - call void @__quantum__rt__result_update_reference_count(%Result* %205, i64 1) - %206 = load %Result*, %Result** %res__inline__47 - store %Result* %205, %Result** %res__inline__47 - call void @__quantum__rt__result_update_reference_count(%Result* %205, i64 -1) - call void @__quantum__rt__result_update_reference_count(%Result* %206, i64 -1) - br label %continue__8 - -continue__8: ; preds = %then6__1, %test6__1, %then5__1, %then4__1, %then3__1, %continue__9, %then1__1, %then0__8 - %r12 = load %Result*, %Result** %res__inline__47 - call void @__quantum__rt__array_update_alias_count(%Array* %paulis__inline__47, i64 -1) + %99 = call %Result* @__quantum__qis__joint_measure(i64 111, i64 1, %Array* %qs12) + call void @__quantum__rt__result_update_reference_count(%Result* %99, i64 1) + %100 = load %Result*, %Result** %res + store %Result* %99, %Result** %res + call void @__quantum__rt__result_update_reference_count(%Result* %99, i64 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %100, i64 -1) + br label %continue__2 + +continue__2: ; preds = %then6__1, %test6__1, %then5__1, %then4__1, %then3__1, %continue__3, %then1__1, %then0__2 + %r12 = load %Result*, %Result** %res + call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i64 -1) call void @__quantum__rt__array_update_alias_count(%Array* %qs12, i64 -1) - call void @__quantum__rt__array_update_reference_count(%Array* %paulis__inline__47, i64 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %paulis, i64 -1) + br i1 %compare, label %then0__4, label %continue__4 + +then0__4: ; preds = %continue__2 + %101 = load %Result*, %Result** @ResultZero + %102 = call i1 @__quantum__rt__result_equal(%Result* %r0, %Result* %101) + br i1 %102, label %then0__5, label %continue__5 + +then0__5: ; preds = %then0__4 + %103 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 1) + %104 = bitcast i8* %103 to %Qubit** + %qb__4 = load %Qubit*, %Qubit** %104 + call void @__quantum__qis__single_qubit_op(i64 0, i64 1, %Qubit* %qb__4) + br label %continue__5 + +continue__5: ; preds = %then0__5, %then0__4 + br label %continue__4 + +continue__4: ; preds = %continue__5, %continue__2 call void @__quantum__rt__qubit_release_array(%Array* %qs) call void @__quantum__rt__array_update_alias_count(%Array* %qs, i64 -1) call void @__quantum__rt__array_update_alias_count(%Array* %qs12, i64 -1) @@ -1275,17 +1442,21 @@ continue__8: ; preds = %then6__1, %test6__1 call void @__quantum__rt__result_update_reference_count(%Result* %r0, i64 -1) call void @__quantum__rt__array_update_reference_count(%Array* %qs12, i64 -1) call void @__quantum__rt__result_update_reference_count(%Result* %r12, i64 -1) - ret i1 true + ret void } -declare %Qubit* @__quantum__rt__qubit_allocate() +declare i1 @__quantum__rt__result_equal(%Result*, %Result*) -declare %Array* @__quantum__rt__qubit_allocate_array(i64) - -declare void @__quantum__trc__inject_global_barrier(i64, i64) - -declare void @__quantum__rt__qubit_release(%Qubit*) +define { %String* }* @Microsoft__Quantum__Targeting__TargetInstruction__body(%String* %__Item1__) { +entry: + %0 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64)) + %1 = bitcast %Tuple* %0 to { %String* }* + %2 = getelementptr { %String* }, { %String* }* %1, i64 0, i32 0 + store %String* %__Item1__, %String** %2 + call void @__quantum__rt__string_update_reference_count(%String* %__Item1__, i64 1) + ret { %String* }* %1 +} -declare void @__quantum__rt__qubit_release_array(%Array*) +declare %Tuple* @__quantum__rt__tuple_create(i64) -attributes #0 = { "EntryPoint" } +declare void @__quantum__rt__string_update_reference_count(%String*, i64) diff --git a/src/QirRuntime/test/QIR-tracer/tracer-target.qs b/src/QirRuntime/test/QIR-tracer/tracer-target.qs index c9854547b14..96a92b3b6f7 100644 --- a/src/QirRuntime/test/QIR-tracer/tracer-target.qs +++ b/src/QirRuntime/test/QIR-tracer/tracer-target.qs @@ -30,7 +30,7 @@ namespace Microsoft.Quantum.Instructions { body intrinsic; } - @TargetInstruction("multi_qubit_measure") + @TargetInstruction("joint_measure") operation multi_qubit_measure(op_id: Int, duration: Int, qbs : Qubit[]) : Result { body intrinsic; } @@ -250,7 +250,7 @@ namespace Microsoft.Quantum.Intrinsic { mutable res = One; mutable haveY = false; // Measurements that involve PauliY or PauliI - for (i in 0..Length(paulis)-1) + for i in 0..Length(paulis)-1 { if (paulis[i] == PauliY or paulis[i] == PauliI) { @@ -287,11 +287,11 @@ namespace Microsoft.Quantum.Intrinsic { body intrinsic; } - operation SWAP(a : Qubit, b : Qubit) : Unit - is Adj { - body intrinsic; - adjoint self; - } + // operation SWAP(a : Qubit, b : Qubit) : Unit + // is Adj { + // body intrinsic; + // adjoint self; + // } } From 798ba24b18b79ce1149c63a163e21aa774512970 Mon Sep 17 00:00:00 2001 From: Irina Yatsenko Date: Thu, 11 Feb 2021 10:20:45 -0800 Subject: [PATCH 19/27] Improving the readme per PR feedback --- src/QirRuntime/lib/Tracer/README.md | 136 ++++++++++++++-------------- 1 file changed, 67 insertions(+), 69 deletions(-) diff --git a/src/QirRuntime/lib/Tracer/README.md b/src/QirRuntime/lib/Tracer/README.md index d360299243b..ac0e6f79281 100644 --- a/src/QirRuntime/lib/Tracer/README.md +++ b/src/QirRuntime/lib/Tracer/README.md @@ -2,72 +2,36 @@ The purpose of the Resource Tracer is to provide efficient and flexible way to estimate resources of a quantum program in QIR representation. The estimates are calculated by simulating execution of the program (as opposed to the static - analysis). + analysis). Please see [Resource Estimator](https://docs.microsoft.com/en-us/azure/quantum/user-guide/machines/resources-estimator) + for more background on resource estimation for quantum programs. -In addition to the standard QIR runtime functions, the quantum program will have to: +To run against the tracer, the quantum program should comply with the + [QIR specifications](https://github.com/microsoft/qsharp-language/tree/main/Specifications/QIR) as well as: -1. convert _all_ used intrinsic operations into one of the _qis_ operations supported by the tracer (see the list below); +1. convert _each_ used intrinsic operation into one of the _qis_ operations supported by the tracer (see the list below); 1. (_optional_) provide callbacks for handling of conditional branches on a measurement (if not provided, the estimates would cover only one branch of the execution); 1. (_optional_) provide callbacks for start/end of quantum operations (if not provided, all operations will be treated as inlined as if the whole program consisted of a single operation); 1. (_optional_) provide callbacks for global barriers; 1. (_optional_) provide description of mapping for frame tracking; -1. (_optional_) provide names of operations for user friendly output (in the form of `tracer-config.hpp|cpp` files that, - ideally, can be generated by the Q# compiler from the mappings described in target.qs). +1. (_optional_) provide names of operations for output (in the form of `tracer-config.hpp|cpp` files). The last provisions The Resource Tracer will consist of: -1. the bridge for the `__quantum__qis__*` extension methods; -2. the native implementation to back the extensions; +1. the bridge for the `__quantum__qis__*` methods listed below; +2. the native implementation to back the `__quantum__qis__*` methods; 3. the logic for partitioning gates into layers; 4. the logic for frame tracking; 5. output of the collected statistics; 6. (_lower priority_) the scheduling component to optimize depth and/or width of the circuit. -## List of `__quantum__qis__*` methods ## - -___WIP___ - -| Signature | Description | -| :---------------------------------------------------- | :----------------------------------------------------------- | -| `void __quantum__qis__inject_global_barrier(i32 %id, i32 %duration)` | Function to insert a global barrier. It will be inserted into QIR based on a user defined intrinsic. See [Layering](#layering) section for details. | -| `void __quantum__qis__on_operation_start(i64 %id)` | Function to identify the start of a quantum module. The argument is a unique _id_ of the module. The tracer will have an option to treat module boundaries as barriers between layers and (_lower priority_) and option to cache estimates for a module, executed multiple times. The call to the function will be inserted into QIR by the Q# compiler when Tracer is specified as the compilation target. | -| `void __quantum__qis__on_operation_end(i64 %id)` | Function to identify the end of a quantum module. The argument is a unique _id_ of the module and must match the _id_ supplied on start of the module. The call to the function will be inserted into QIR by the Q# compiler when Tracer is specified as the compilation target. | -| `void __quantum__qis__single_qubit_op(i32 %id, i32 %duration, %Qubit* %q)` | Function for counting operations that involve a single qubit. The first argument is the id of the operation, as assigned by the client. Multiple intrinsics can be assigned the same id, in which case they will be counted together. The second argument is duration to be assigned to the particular invocation of the operation. | -| `void __quantum__qis__multi_qubit_op(i32 %id, i32 %duration, %Array* %qs)` | Function for counting operations that involve multiple qubits.| -| `void __quantum__qis__single_qubit_op__ctl(i32 %id, i32 %duration, %Array* %ctls, %Qubit* %q)` | Function for counting controlled operations with single target qubit. | -| `void __quantum__qis__multi_qubit_op__ctl(i32 %id, i32 %duration, %Array* %ctls, %Array* %qs)` | Function for counting controlled operations with multiple target qubits. | -| `%Result* @__quantum__qis__single_qubit_measure(i32 %id, i32 %duration, %Qubit* %q)` | Function for counting measurements of a single qubit. The user might assign different operation ids for different measurement bases. | -| `%Result* @__quantum__qis__joint_measure(i32 %id, i32 %duration, %Array* %qs)` | Function for counting joint-measurements of qubits. The user might assign different operation ids for different measurement bases. | -| `void __quantum__qis__swap(%Qubit* %q1, %Qubit* %q2)` | See [Special handling of SWAP](#special-handling-of-swap) for details. | - -_Note on operation ids_: The client is responsible for using operation ids in a consistent manner. Operations with the - same id will be counted by the tracer as the _same_ operation, even accross invocations with different number of target - qubits or when different functors are applied. - -## Native backing of the extension methods ## - -The Resource Tracer will reuse qir-rt library as much as possible while extending it with the callbacks specified above. - -__Conditionals on measurements__: The Resource Tracer will execute LLVM IR's branching structures "as is", depending on - the values of the corresponding variables at runtime. To enable estimation of branches that depend on a measurement - result, the source Q# program will have to be authored in such a way that the Q# compiler will translate the - conditionals into `__quantum__qis__apply_if*` calls. The tracer will add operations from _both branches_ into the - layers it creates to compute the upper bound estimate. - -Nested conditionals, conditional measurements and conditional tracked operations will _not_ be supported. - -__Caching__ (lower priority): It might be a huge perf win if the Resource Tracer could cache statistics for repeated - computations. The Tracer will have an option to cache layering results per quantum module if the boundaries of modules - are treated as layering barriers. - ## Layering ## _Definition_: ___Time___ is an integer-valued function on all quantum operations in a program (gates, measurements, - qubits allocation/release). For each gate there is start and end times. For each qubit, there are times when the qubit + qubits allocation/release). For each gate there are start and end times. For each qubit, there are times when the qubit is allocated and released. Start time of a gate cannot be less than allocation time of any of the qubits the gate uses. If two gates or measurements use the same qubit, one of the gates must have start time greater than or equal to the end time of the other. @@ -88,21 +52,19 @@ A sequential program can be trivially layered such that each layer contains exac zero time to execute, those gates can be added to a layer even if they act on the same qubit another gate in this layer is acting on and have to be executed sequentially within the layer. -_Definition_: A ___global barrier___ is any operation that acts on _all_ currently allocated qubits. We'll provide means - for the clients to inject global barriers equivalent to an identity operator. This will allow the clients to enforce a - particular layering structure (because no later operation can sink below the barrier). - -The global barriers will be implemented as calls to `__quantum__qis__global_barrier` function. No additional support from - the compiler should be needed, as the user can define their own intrinsic to represent the barrier and map it to the above - runtime function via targets.qs file. The user can choose duration of a barrier which would affect start time of the - following layers but no operations will be added to a barrier, independent of its width. - ### The Resource Tracer's Layering Algorithm ### As the tracer is executing a sequential quantum program, it will compute a time function and corresponding layering using the _conceptual_ algorithm, described below (aka "tetris algorithm"). The actual implementation of layering might be done differently, as long as the resulting layering is the same as if running the conceptual algorithm. +_Definition_: A ___barrier___ is ?? + +The global barriers will be implemented as calls to `__quantum__qis__global_barrier` function. No additional support from + the compiler should be needed, as the user can define their own intrinsic to represent the barrier and map it to the + above runtime function via targets.qs file. The user can choose duration of a barrier which would affect start time of + the following layers but no operations will be added to a barrier, independent of its width. + 1. The tracer will have a setting for preferred layer duration: P. 1. The first encountered operation of __non-zero__ duration N is added into layer L(0, max(P,N)). The value for _conditional barrier_ is set to 0. @@ -113,34 +75,34 @@ As the tracer is executing a sequential quantum program, it will compute a time 1. Suppose, there are already layers L(0,N0), ... , L(k,Nk) and the operation being executed is a single-qubit _op_ of duration __0__ (controlled and multi-qubit operations of duration 0 are treated the same as non-zero operations). Starting at L(k, Nk) and scanning backwards to L(conditional barrier, Nb) find the _first_ layer that contains an - operation that acts on at least one of the qubits _op_ is acting on. Add _op_ into this layer. If no such layer found, - remember that the qubits have pending 0-duration _op_ and commit it to the layer that first uses any of the qubits in a - an operation of non-zero duration. At the end of the program commit all pending operations of duration zero into a new + operation that acts on the qubit of _op_. Add _op_ into this layer. If no such layer is found, add _op_ to the list of + pending operations on the qubit. At the end of the program commit all pending operations of duration zero into a new layer. -1. Suppose, there are already layers L(0,N0), ... , L(k,Nk) and the operation being executed is _op_ of duration _N > 0_. - Starting at L(k, Nk) and scanning backwards to L(conditional barrier, Nb) find the _last_ layer L(t, Nt) such that - Qubits(t, Nt) don't contain any of the _op_'s qubits and find the _first_ layer L(w, Nw) such that Qubits(w, Nw) contains - some of _op_'s qubits but Nw + N <= P. Add _op_ into one of the two layer with later time. If neither such layers found, - add _op_ into a new layer L(k+1, max(P, N)). +1. Suppose, there are already layers L(0,N0), ... , L(k,Nk) and the operation being executed is _op_ of duration _N > 0_ + or it involves more than one qubit. Starting at L(k, Nk) and scanning backwards to L(conditional barrier, Nb) find the + _last_ layer L(t, Nt) such that Qubits(t, Nt) don't contain any of the _op_'s qubits and find the _first_ layer L(w, Nw) + such that Qubits(w, Nw) contains some of _op_'s qubits but Nw + N <= P. Add _op_ into one of the two layer with later + time. If neither such layers is found, add _op_ into a new layer L(k+1, max(P, N)). Add the pending operations of all + involved qubits into the same layer and clear the pending lists. ## Special handling of SWAP ## -The tracer will provide a way to handle SWAP as, effectively, renaming of the involved qubits. The client will have the +The tracer will provide a way to handle SWAP as, effectively, renaming of the involved qubits. The users will have the choice of using the special handling versus treating the gate as a standard counted intrinsic. ## Frame tracking ## -The client might want to count differently operations that are applied in a different state. For example, if Hadamard gate - is applied to a qubit and then Rz and Mx gates, the client might want to count the sequence as if Rz as Mz were executed. +A user might want to count differently operations that are applied in a different state. For example, if Hadamard gate + is applied to a qubit and then Rz and Mx gates, a user might want to count the sequence as if Rz as Mz were executed. The frame is closed when the state of the qubit is reset (in Hadamard's case, another Hadamard operator is applied to - the qubit). The client will be able to register the required frame tracking with the tracer via a C++ registration + the qubit). The user will be able to register the required frame tracking with the tracer via a C++ registration callback. The descriptor of the frame will contain the following information and will be provided to the Tracer when initializing it in C++. - openingOp: the operation id that opens the frame on the qubits this operation is applied to -- closingOp: the operation is that closes the the frame on the qubits this operation is applied to +- closingOp: the operation id that closes the frame on the qubits this operation is applied to - vector of: { bitmask_ctls, bitmask_targets, operationIdOriginal, operationIdMapped } The closing operation will be ignored if the frame on the qubit hasn't been open. The bitmasks define which of the qubits @@ -161,8 +123,7 @@ The tracer will have options to output the estimates into command line or into a - the remaining columns contain counts per operation in the layer - The first row is a header row: `layer_id\tname(\t[a-zA-Z]+)*`, where specific operation names are listed, such as - CNOT, Mz, etc., if provided by the user alongside with the target.qs file (if not provided, the header row will list - operation ids). + CNOT, Mz, etc., if provided by the user (if not provided, the header row will list operation ids). - All following rows contain statistics per layer: `[0-9]+\t[a-zA-Z]*(\t([0-9]*))*`. - The rows are sorted in order of increasing layer time. - Zero counts for any of the statistics _might_ be replaced with empty string. @@ -175,3 +136,40 @@ The map of operation ids to names can be passed to the tracer's constructor as ` TBD but lower priority. +## List of `__quantum__qis__*` methods, supported by the Tracer ## + +| Signature | Description | +| :---------------------------------------------------- | :----------------------------------------------------------- | +| `void __quantum__qis__inject_global_barrier(i32 %id, i32 %duration)` | Function to insert a global barrier between layers. The first argument is the id of the barrier and the second item specifies the duration of the barrier. See [Layering](#layering) section for details. | +| `void __quantum__qis__on_module_start(i64 %id)` | Function to identify the start of a quantum module. The argument is a unique _id_ of the module. The tracer will have an option to treat module boundaries as barriers between layers and (_lower priority_) option to cache estimates for a module, executed multiple times. For example, a call to the function might be inserted into QIR, generated by the Q# compiler, immediately before the body code of a Q# `operation`. | +| `void __quantum__qis__on_module_end(i64 %id)` | Function to identify the end of a quantum module. The argument is a unique _id_ of the module and must match the _id_ supplied on start of the module. For example, a call to the function might be inserted into QIR, generated by the Q# compiler, immediately after the body code of a Q# `operation`. | +| `void __quantum__qis__single_qubit_op(i32 %id, i32 %duration, %Qubit* %q)` | Function for counting operations that involve a single qubit. The first argument is the id of the operation. Multiple intrinsics can be assigned the same id, in which case they will be counted together. The second argument is duration to be assigned to the particular invocation of the operation. | +| `void __quantum__qis__multi_qubit_op(i32 %id, i32 %duration, %Array* %qs)` | Function for counting operations that involve multiple qubits.| +| `void __quantum__qis__single_qubit_op__ctl(i32 %id, i32 %duration, %Array* %ctls, %Qubit* %q)` | Function for counting controlled operations with single target qubit. | +| `void __quantum__qis__multi_qubit_op__ctl(i32 %id, i32 %duration, %Array* %ctls, %Array* %qs)` | Function for counting controlled operations with multiple target qubits. | +| `%Result* @__quantum__qis__single_qubit_measure(i32 %id, i32 %duration, %Qubit* %q)` | Function for counting measurements of a single qubit. The user might assign different operation ids for different measurement bases. | +| `%Result* @__quantum__qis__joint_measure(i32 %id, i32 %duration, %Array* %qs)` | Function for counting joint-measurements of qubits. The user might assign different operation ids for different measurement bases. | +| `void __quantum__qis__swap(%Qubit* %q1, %Qubit* %q2)` | See [Special handling of SWAP](#special-handling-of-swap) for details. | +| TODO: handling of conditionals on measurement results | | + +_Note on operation ids_: The user is responsible for using operation ids in a consistent manner. Operations with the + same id will be counted by the tracer as the _same_ operation, even accross invocations with different number of target + qubits or when different functors are applied. + +_Note on mapping Q# intrinsics to the methods above_: Q# compiler will support Tracer as a special target and will let + the user to either choose some default mapping or specify their custom mapping. For example, see QIR-tracer tests in + this project (`tracer-target.qs` specifies the mapping). + +The Resource Tracer will reuse qir-rt library while implementing the qis methods specified above. + +__Conditionals on measurements__: The Resource Tracer will execute LLVM IR's branching structures "as is", depending on + the values of the corresponding variables at runtime. To enable estimation of branches that depend on a measurement + result, the source Q# program will have to be authored in such a way that the Q# compiler will translate the + conditionals into `__quantum__qis__apply_if*` calls. The tracer will add operations from _both branches_ into the + layers it creates to compute the upper bound estimate. + +Nested conditionals, conditional measurements and conditional tracked operations will _not_ be supported. + +__Caching__ (lower priority): It might be a huge perf win if the Resource Tracer could cache statistics for repeated + computations. The Tracer will have an option to cache layering results per quantum module if the boundaries of modules + are treated as layering barriers. \ No newline at end of file From d63ffa8f6498471f23c607e1f425174727508eec Mon Sep 17 00:00:00 2001 From: Irina Yatsenko Date: Fri, 12 Feb 2021 17:33:28 -0800 Subject: [PATCH 20/27] More PR feedback --- src/QirRuntime/lib/Tracer/tracer-bridge.ll | 2 +- src/QirRuntime/lib/Tracer/tracer-qis.cpp | 4 +-- src/QirRuntime/lib/Tracer/tracer.cpp | 28 +++++++++++-------- src/QirRuntime/lib/Tracer/tracer.hpp | 2 +- .../test/QIR-tracer/qir-tracer-driver.cpp | 4 +-- .../test/QIR-tracer/tracer-config.cpp | 2 +- .../test/QIR-tracer/tracer-config.hpp | 2 +- src/QirRuntime/test/QIR-tracer/tracer-core.qs | 2 +- .../test/QIR-tracer/tracer-intrinsics.qs | 2 +- .../test/QIR-tracer/tracer-measurements.qs | 2 +- .../test/QIR-tracer/tracer-target.qs | 2 +- src/QirRuntime/test/unittests/TracerTests.cpp | 2 +- 12 files changed, 29 insertions(+), 25 deletions(-) diff --git a/src/QirRuntime/lib/Tracer/tracer-bridge.ll b/src/QirRuntime/lib/Tracer/tracer-bridge.ll index 6754646d82d..b851de0bf43 100644 --- a/src/QirRuntime/lib/Tracer/tracer-bridge.ll +++ b/src/QirRuntime/lib/Tracer/tracer-bridge.ll @@ -1,4 +1,4 @@ -; Copyright (c) Microsoft Corporation. All rights reserved. +; Copyright (c) Microsoft Corporation. ; Licensed under the MIT License. ;======================================================================================================================= diff --git a/src/QirRuntime/lib/Tracer/tracer-qis.cpp b/src/QirRuntime/lib/Tracer/tracer-qis.cpp index 47379deaa9a..737807f1036 100644 --- a/src/QirRuntime/lib/Tracer/tracer-qis.cpp +++ b/src/QirRuntime/lib/Tracer/tracer-qis.cpp @@ -1,7 +1,7 @@ -// Copyright (c) // NOLINT{} Microsoft Corporation. All rights reserved. +// Copyright (c) Microsoft Corporation. // Licensed under the MIT License. -#include +#include #include "CoreTypes.hpp" #include "qirTypes.hpp" diff --git a/src/QirRuntime/lib/Tracer/tracer.cpp b/src/QirRuntime/lib/Tracer/tracer.cpp index 9ee0e340391..99ea765a1d0 100644 --- a/src/QirRuntime/lib/Tracer/tracer.cpp +++ b/src/QirRuntime/lib/Tracer/tracer.cpp @@ -1,4 +1,4 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. +// Copyright (c) Microsoft Corporation. // Licensed under the MIT License. #include @@ -34,10 +34,13 @@ namespace Quantum qubits.emplace_back(QubitState{}); return reinterpret_cast(qubit); } + void CTracer::ReleaseQubit(Qubit /*qubit*/) { // nothing for now } + + // TODO: what would be meaningful information we could printout for a qubit? std::string CTracer::QubitToString(Qubit q) { size_t qubitIndex = reinterpret_cast(q); @@ -47,16 +50,19 @@ namespace Quantum str << " last used in layer " << qstate.layer << "(pending zero ops: " << qstate.pendingZeroOps.size() << ")"; return str.str(); } + void CTracer::ReleaseResult(Result /*result*/) { - // nothing to do, we don't allocate results on measurement + // nothing to do, we don't allocate results on measurement [yet] } + // Although the tracer should never compare results or get their values, it still has to implement UseZero and // UseOne methods as they are invoked by the QIR initialization. Result CTracer::UseZero() { return reinterpret_cast(INVALID); } + Result CTracer::UseOne() { return reinterpret_cast(INVALID); @@ -195,17 +201,15 @@ namespace Quantum assert(nFirstGroup >= 0); assert(nSecondGroup > 0); - this->seenOps.insert(id); - - // Operations that involve a single qubit can special case duration zero. - if (nFirstGroup == 0 && nSecondGroup == 1) + // Special-casing operations of duration zero enables potentially better reuse of qubits, when we'll start + // optimizing for circuit width. However, tracking _the same_ pending operation across _multiple_ qubits is + // tricky and not worth the effort, so we only do single qubit case. + if (opDuration == 0 && nFirstGroup == 0 && nSecondGroup == 1) { return this->TraceSingleQubitOp(id, opDuration, secondGroup[0]); } - // Special-casing operations of duration zero enables potentially better reuse of qubits, when we'll start - // optimizing for circuit width. However, tracking _the same_ pending operation across _multiple_ qubits is - // tricky and not worth the effort, so we don't do it. + this->seenOps.insert(id); // Figure out the layer this operation should go into. LayerId layerToInsertInto = this->FindLayerToInsertOperationInto(secondGroup[0], opDuration); @@ -275,11 +279,11 @@ namespace Quantum void CTracer::PrintLayerMetrics(std::ostream& out, const std::string& separator, bool printZeroMetrics) const { // Sort the operations by id so the output is deterministic. - std::set seenOpsOrederedById(this->seenOps.begin(), this->seenOps.end()); + std::set seenOpsOrderedById(this->seenOps.begin(), this->seenOps.end()); // header row out << "layer_id" << separator << "name"; - for (OpId opId : seenOpsOrederedById) + for (OpId opId : seenOpsOrderedById) { out << separator << GetOperationName(opId, this->opNames); } @@ -292,7 +296,7 @@ namespace Quantum out << layer.startTime; out << separator << GetOperationName(layer.barrierId, this->opNames); - for (OpId opId : seenOpsOrederedById) + for (OpId opId : seenOpsOrderedById) { auto foundInLayer = layer.operations.find(opId); out << separator diff --git a/src/QirRuntime/lib/Tracer/tracer.hpp b/src/QirRuntime/lib/Tracer/tracer.hpp index 25cea950869..fcdb6453d49 100644 --- a/src/QirRuntime/lib/Tracer/tracer.hpp +++ b/src/QirRuntime/lib/Tracer/tracer.hpp @@ -1,4 +1,4 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. +// Copyright (c) Microsoft Corporation. // Licensed under the MIT License. #include diff --git a/src/QirRuntime/test/QIR-tracer/qir-tracer-driver.cpp b/src/QirRuntime/test/QIR-tracer/qir-tracer-driver.cpp index 402ec01aa5b..7cb90a364ba 100644 --- a/src/QirRuntime/test/QIR-tracer/qir-tracer-driver.cpp +++ b/src/QirRuntime/test/QIR-tracer/qir-tracer-driver.cpp @@ -1,7 +1,7 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. +// Copyright (c) Microsoft Corporation. // Licensed under the MIT License. -#include +#include #include #define CATCH_CONFIG_MAIN // This tells Catch to provide a main() - only do this in one cpp file diff --git a/src/QirRuntime/test/QIR-tracer/tracer-config.cpp b/src/QirRuntime/test/QIR-tracer/tracer-config.cpp index b337fa903ec..c4320f8ae17 100644 --- a/src/QirRuntime/test/QIR-tracer/tracer-config.cpp +++ b/src/QirRuntime/test/QIR-tracer/tracer-config.cpp @@ -1,4 +1,4 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. +// Copyright (c) Microsoft Corporation. // Licensed under the MIT License. // TODO: ideally, this file should be generated by the Q# compiler alongside the qir, using the mappings specified in diff --git a/src/QirRuntime/test/QIR-tracer/tracer-config.hpp b/src/QirRuntime/test/QIR-tracer/tracer-config.hpp index 0587d876416..7c1b654c43e 100644 --- a/src/QirRuntime/test/QIR-tracer/tracer-config.hpp +++ b/src/QirRuntime/test/QIR-tracer/tracer-config.hpp @@ -1,4 +1,4 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. +// Copyright (c) Microsoft Corporation. // Licensed under the MIT License. // TODO: ideally, this file should be generated by the Q# compiler alongside the qir diff --git a/src/QirRuntime/test/QIR-tracer/tracer-core.qs b/src/QirRuntime/test/QIR-tracer/tracer-core.qs index de169cab594..57048e98950 100644 --- a/src/QirRuntime/test/QIR-tracer/tracer-core.qs +++ b/src/QirRuntime/test/QIR-tracer/tracer-core.qs @@ -1,4 +1,4 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. +// Copyright (c) Microsoft Corporation. // Licensed under the MIT License. namespace Microsoft.Quantum.Core{ diff --git a/src/QirRuntime/test/QIR-tracer/tracer-intrinsics.qs b/src/QirRuntime/test/QIR-tracer/tracer-intrinsics.qs index d4a92b4af57..97a2fee3131 100644 --- a/src/QirRuntime/test/QIR-tracer/tracer-intrinsics.qs +++ b/src/QirRuntime/test/QIR-tracer/tracer-intrinsics.qs @@ -1,4 +1,4 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. +// Copyright (c) Microsoft Corporation. // Licensed under the MIT License. namespace Microsoft.Quantum.Testing.Tracer diff --git a/src/QirRuntime/test/QIR-tracer/tracer-measurements.qs b/src/QirRuntime/test/QIR-tracer/tracer-measurements.qs index a46ce7907ab..fab708e4cd4 100644 --- a/src/QirRuntime/test/QIR-tracer/tracer-measurements.qs +++ b/src/QirRuntime/test/QIR-tracer/tracer-measurements.qs @@ -1,4 +1,4 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. +// Copyright (c) Microsoft Corporation. // Licensed under the MIT License. namespace Microsoft.Quantum.Testing.Tracer diff --git a/src/QirRuntime/test/QIR-tracer/tracer-target.qs b/src/QirRuntime/test/QIR-tracer/tracer-target.qs index 96a92b3b6f7..da389210063 100644 --- a/src/QirRuntime/test/QIR-tracer/tracer-target.qs +++ b/src/QirRuntime/test/QIR-tracer/tracer-target.qs @@ -1,4 +1,4 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. +// Copyright (c) Microsoft Corporation. // Licensed under the MIT License. namespace Microsoft.Quantum.Instructions { diff --git a/src/QirRuntime/test/unittests/TracerTests.cpp b/src/QirRuntime/test/unittests/TracerTests.cpp index eb0eeb77fa0..37f8bc57ad1 100644 --- a/src/QirRuntime/test/unittests/TracerTests.cpp +++ b/src/QirRuntime/test/unittests/TracerTests.cpp @@ -1,4 +1,4 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. +// Copyright (c) Microsoft Corporation. // Licensed under the MIT License. #include From eb823d2425218018e291dcb943270b67051ed1b5 Mon Sep 17 00:00:00 2001 From: Irina Yatsenko Date: Tue, 16 Feb 2021 16:25:33 -0800 Subject: [PATCH 21/27] PR feedback (not complete yet) --- src/QirRuntime/lib/Tracer/README.md | 26 ++-- .../test/QIR-tracer/qir-tracer-driver.cpp | 15 +-- src/QirRuntime/test/QIR-tracer/tracer-core.qs | 2 +- .../test/QIR-tracer/tracer-intrinsics.qs | 105 ++++++++------- .../test/QIR-tracer/tracer-measurements.qs | 34 ++--- .../test/QIR-tracer/tracer-target.qs | 121 ++++++------------ 6 files changed, 121 insertions(+), 182 deletions(-) diff --git a/src/QirRuntime/lib/Tracer/README.md b/src/QirRuntime/lib/Tracer/README.md index ac0e6f79281..753ec5db260 100644 --- a/src/QirRuntime/lib/Tracer/README.md +++ b/src/QirRuntime/lib/Tracer/README.md @@ -58,28 +58,30 @@ As the tracer is executing a sequential quantum program, it will compute a time using the _conceptual_ algorithm, described below (aka "tetris algorithm"). The actual implementation of layering might be done differently, as long as the resulting layering is the same as if running the conceptual algorithm. -_Definition_: A ___barrier___ is ?? +_Definition_: A ___barrier___ is a layer that no more operations can be added into. -The global barriers will be implemented as calls to `__quantum__qis__global_barrier` function. No additional support from - the compiler should be needed, as the user can define their own intrinsic to represent the barrier and map it to the - above runtime function via targets.qs file. The user can choose duration of a barrier which would affect start time of - the following layers but no operations will be added to a barrier, independent of its width. +A user will be able to inject global barriers by calling `__quantum__qis__global_barrier` function. The user can choose + duration of a barrier which would affect start time of the following layers but no operations will be added to a barrier, + independent of its width. -1. The tracer will have a setting for preferred layer duration: P. +__The conceptual algorithm__: + +1. The tracer must be set the preferred layer duration: P. 1. The first encountered operation of __non-zero__ duration N is added into layer L(0, max(P,N)). The value - for _conditional barrier_ is set to 0. + of _conditional barrier_ variable on the tracer is set to 0. 1. When conditional callback is encountered, the layer L(t,N) of the measurement that produced the result the conditional - is dependent on, is looked up and the _conditional barrier_ is set to _t + N_. At the end of the conditional scope the - barrier is reset to 0. (Effectively, no operations, conditioned on the result of a measurement, can happen before or in - the same layer as the measurement, even if they don't involve the measured qubits.) + is dependent on, is looked up and the _conditional barrier_ is set to _t + N_. At the end of the conditional scope + _conditional barrier_ is reset to 0. (Effectively, no operations, conditioned on the result of a measurement, can happen + before or in the same layer as the measurement, even if they don't involve the measured qubits.) + TODO: is it OK for later operations to be added to the layers with ops _inside_ conditional branches? 1. Suppose, there are already layers L(0,N0), ... , L(k,Nk) and the operation being executed is a single-qubit _op_ of duration __0__ (controlled and multi-qubit operations of duration 0 are treated the same as non-zero operations). - Starting at L(k, Nk) and scanning backwards to L(conditional barrier, Nb) find the _first_ layer that contains an + Starting at L(k, Nk) and scanning backwards to L(_conditional barrier_, Nb) find the _first_ layer that contains an operation that acts on the qubit of _op_. Add _op_ into this layer. If no such layer is found, add _op_ to the list of pending operations on the qubit. At the end of the program commit all pending operations of duration zero into a new layer. 1. Suppose, there are already layers L(0,N0), ... , L(k,Nk) and the operation being executed is _op_ of duration _N > 0_ - or it involves more than one qubit. Starting at L(k, Nk) and scanning backwards to L(conditional barrier, Nb) find the + or it involves more than one qubit. Starting at L(k, Nk) and scanning backwards to L(_conditional barrier_, Nb) find the _last_ layer L(t, Nt) such that Qubits(t, Nt) don't contain any of the _op_'s qubits and find the _first_ layer L(w, Nw) such that Qubits(w, Nw) contains some of _op_'s qubits but Nw + N <= P. Add _op_ into one of the two layer with later time. If neither such layers is found, add _op_ into a new layer L(k+1, max(P, N)). Add the pending operations of all diff --git a/src/QirRuntime/test/QIR-tracer/qir-tracer-driver.cpp b/src/QirRuntime/test/QIR-tracer/qir-tracer-driver.cpp index 7cb90a364ba..33dda3ccaea 100644 --- a/src/QirRuntime/test/QIR-tracer/qir-tracer-driver.cpp +++ b/src/QirRuntime/test/QIR-tracer/qir-tracer-driver.cpp @@ -2,7 +2,7 @@ // Licensed under the MIT License. #include -#include +#include #define CATCH_CONFIG_MAIN // This tells Catch to provide a main() - only do this in one cpp file #include "catch.hpp" @@ -17,22 +17,21 @@ using namespace Microsoft::Quantum; namespace TracerUser { -TEST_CASE("Invoke each operator from Q# core once", "[qir-tracer]") +TEST_CASE("Invoke each intrinsic from Q# core once", "[qir-tracer]") { shared_ptr tr = CreateTracer(1 /*layer duration*/, g_operationNames); QirContextScope qirctx(tr.get(), false /*trackAllocatedObjects*/); REQUIRE_NOTHROW(Microsoft__Quantum__Testing__Tracer__TestCoreIntrinsics__body()); - vector layers = tr->UseLayers(); + const vector& layers = tr->UseLayers(); + + std::stringstream out; + tr->PrintLayerMetrics(out, ",", true /*printZeroMetrics*/); + INFO(out.str()); // TestCoreIntrinsics happens to produce 24 layers right now and we are not checking whether that's expected -- as // testing of layering logic is better done by unit tests. CHECK(layers.size() == 24); - - std::ofstream out; - out.open("qir-tracer-test.txt"); - tr->PrintLayerMetrics(out, "\t", false /*printZeroMetrics*/); - out.close(); } TEST_CASE("Measurements can be counted but cannot be compared", "[qir-tracer]") diff --git a/src/QirRuntime/test/QIR-tracer/tracer-core.qs b/src/QirRuntime/test/QIR-tracer/tracer-core.qs index 57048e98950..84e57ae32e3 100644 --- a/src/QirRuntime/test/QIR-tracer/tracer-core.qs +++ b/src/QirRuntime/test/QIR-tracer/tracer-core.qs @@ -1,7 +1,7 @@ // Copyright (c) Microsoft Corporation. // Licensed under the MIT License. -namespace Microsoft.Quantum.Core{ +namespace Microsoft.Quantum.Core { @Attribute() newtype Attribute = Unit; diff --git a/src/QirRuntime/test/QIR-tracer/tracer-intrinsics.qs b/src/QirRuntime/test/QIR-tracer/tracer-intrinsics.qs index 97a2fee3131..ec8c48888ff 100644 --- a/src/QirRuntime/test/QIR-tracer/tracer-intrinsics.qs +++ b/src/QirRuntime/test/QIR-tracer/tracer-intrinsics.qs @@ -1,67 +1,62 @@ // Copyright (c) Microsoft Corporation. // Licensed under the MIT License. -namespace Microsoft.Quantum.Testing.Tracer -{ +namespace Microsoft.Quantum.Testing.Tracer { open Microsoft.Quantum.Intrinsic; - operation TestCoreIntrinsics() : Unit - { - use qs = Qubit[3] - { - X(qs[0]); - Y(qs[0]); - Z(qs[1]); - H(qs[1]); - CNOT(qs[1], qs[2]); - Rx(0.3, qs[0]); - Ry(0.4, qs[1]); - Rz(0.5, qs[2]); - //SWAP(qs[0], qs[2]); - S(qs[1]); - T(qs[2]); + operation TestCoreIntrinsics() : Unit { + use qs = Qubit[3]; - Barrier(42, 1); + X(qs[0]); + Y(qs[0]); + Z(qs[1]); + H(qs[1]); + CNOT(qs[1], qs[2]); + Rx(0.3, qs[0]); + Ry(0.4, qs[1]); + Rz(0.5, qs[2]); + //SWAP(qs[0], qs[2]); + S(qs[1]); + T(qs[2]); - Adjoint X(qs[0]); - Adjoint Y(qs[0]); - Adjoint Z(qs[1]); - Adjoint H(qs[1]); - Adjoint CNOT(qs[1], qs[2]); - Adjoint Rx(0.3, qs[0]); - Adjoint Ry(0.4, qs[1]); - Adjoint Rz(0.5, qs[2]); - //Adjoint SWAP(qs[0], qs[2]); - Adjoint S(qs[1]); - Adjoint T(qs[2]); + Barrier(42, 1); - use c = Qubit() - { - Controlled X([c], (qs[0])); - Controlled Y([c], (qs[0])); - Controlled Z([c], (qs[1])); - Controlled H([c], (qs[1])); - Controlled Rx([c], (0.3, qs[0])); - Controlled Ry([c], (0.4, qs[1])); - Controlled Rz([c], (0.5, qs[2])); - //Controlled SWAP([c], (qs[0], qs[2])); - Controlled S([c], (qs[1])); - Controlled T([c], (qs[2])); - } + Adjoint X(qs[0]); + Adjoint Y(qs[0]); + Adjoint Z(qs[1]); + Adjoint H(qs[1]); + Adjoint CNOT(qs[1], qs[2]); + Adjoint Rx(0.3, qs[0]); + Adjoint Ry(0.4, qs[1]); + Adjoint Rz(0.5, qs[2]); + //Adjoint SWAP(qs[0], qs[2]); + Adjoint S(qs[1]); + Adjoint T(qs[2]); - use cc = Qubit[2] - { - Controlled X(cc, (qs[0])); - Controlled Y(cc, (qs[0])); - Controlled Z(cc, (qs[1])); - Controlled H(cc, (qs[1])); - Controlled Rx(cc, (0.3, qs[0])); - Controlled Ry(cc, (0.4, qs[1])); - Controlled Rz(cc, (0.5, qs[2])); - //Controlled SWAP(cc, (qs[0], qs[2])); - Controlled S(cc, (qs[1])); - Controlled T(cc, (qs[2])); - } + use c = Qubit() { + Controlled X([c], (qs[0])); + Controlled Y([c], (qs[0])); + Controlled Z([c], (qs[1])); + Controlled H([c], (qs[1])); + Controlled Rx([c], (0.3, qs[0])); + Controlled Ry([c], (0.4, qs[1])); + Controlled Rz([c], (0.5, qs[2])); + //Controlled SWAP([c], (qs[0], qs[2])); + Controlled S([c], (qs[1])); + Controlled T([c], (qs[2])); + } + + use cc = Qubit[2] { + Controlled X(cc, (qs[0])); + Controlled Y(cc, (qs[0])); + Controlled Z(cc, (qs[1])); + Controlled H(cc, (qs[1])); + Controlled Rx(cc, (0.3, qs[0])); + Controlled Ry(cc, (0.4, qs[1])); + Controlled Rz(cc, (0.5, qs[2])); + //Controlled SWAP(cc, (qs[0], qs[2])); + Controlled S(cc, (qs[1])); + Controlled T(cc, (qs[2])); } } } diff --git a/src/QirRuntime/test/QIR-tracer/tracer-measurements.qs b/src/QirRuntime/test/QIR-tracer/tracer-measurements.qs index fab708e4cd4..7c4aab5eea1 100644 --- a/src/QirRuntime/test/QIR-tracer/tracer-measurements.qs +++ b/src/QirRuntime/test/QIR-tracer/tracer-measurements.qs @@ -1,35 +1,27 @@ // Copyright (c) Microsoft Corporation. // Licensed under the MIT License. -namespace Microsoft.Quantum.Testing.Tracer -{ +namespace Microsoft.Quantum.Testing.Tracer { open Microsoft.Quantum.Intrinsic; - operation Fixup(qs : Qubit[]) : Unit - { - for i in 0..Length(qs)-1 - { + operation Fixup(qs : Qubit[]) : Unit { + for i in 0..Length(qs)-1 { X(qs[i]); } } - operation TestMeasurements(compare : Bool) : Unit - { - use qs = Qubit[3] - { - let r0 = M(qs[0]); - let qs12 = [qs[1], qs[2]]; - let r12 = Measure([PauliY, PauliX], qs12); + operation TestMeasurements(compare : Bool) : Unit { + use qs = Qubit[3]; + let r0 = M(qs[0]); + let qs12 = [qs[1], qs[2]]; + let r12 = Measure([PauliY, PauliX], qs12); - if (compare) - { - if r0 == Zero - { - X(qs[1]); - } - - //ApplyIfOne(r12, (Fixup, qs12)); + if compare { + if r0 == Zero { + X(qs[1]); } + + //ApplyIfOne(r12, (Fixup, qs12)); } } } \ No newline at end of file diff --git a/src/QirRuntime/test/QIR-tracer/tracer-target.qs b/src/QirRuntime/test/QIR-tracer/tracer-target.qs index da389210063..d721f750c9b 100644 --- a/src/QirRuntime/test/QIR-tracer/tracer-target.qs +++ b/src/QirRuntime/test/QIR-tracer/tracer-target.qs @@ -3,35 +3,27 @@ namespace Microsoft.Quantum.Instructions { - open Microsoft.Quantum.Targeting; - - @TargetInstruction("single_qubit_op") operation single_qubit_op(op_id: Int, duration: Int, qb : Qubit) : Unit { body intrinsic; } - @TargetInstruction("multi_qubit_op") operation multi_qubit_op(op_id: Int, duration: Int, qbs : Qubit[]) : Unit { body intrinsic; } - @TargetInstruction("single_qubit_op_ctl") operation single_qubit_op_ctl(op_id: Int, duration: Int, ctl : Qubit[], qb : Qubit) : Unit { body intrinsic; } - @TargetInstruction("multi_qubit_op_ctl") operation multi_qubit_op_ctl(op_id: Int, duration: Int, ctl : Qubit[], qbs : Qubit[]) : Unit { body intrinsic; } - @TargetInstruction("single_qubit_measure") operation single_qubit_measure(op_id: Int, duration: Int, qb : Qubit) : Result { body intrinsic; } - @TargetInstruction("joint_measure") - operation multi_qubit_measure(op_id: Int, duration: Int, qbs : Qubit[]) : Result { + operation joint_measure(op_id: Int, duration: Int, qbs : Qubit[]) : Result { body intrinsic; } } @@ -47,38 +39,27 @@ namespace Microsoft.Quantum.Intrinsic { operation X(qb : Qubit) : Unit is Adj + Ctl { body (...) { Phys.single_qubit_op(0, 1, qb); } - adjoint (...) { Phys.single_qubit_op(0, 1, qb); } - controlled (ctls, ...) - { - if (Length(ctls) == 1) { Phys.single_qubit_op_ctl(1, 1, ctls, qb); } - else { Phys.single_qubit_op_ctl(2, 1, ctls, qb); } - } - controlled adjoint (ctls, ...) - { - if (Length(ctls) == 1) { Phys.single_qubit_op_ctl(1, 1, ctls, qb); } + adjoint self; + controlled (ctls, ...) { + if Length(ctls) == 1 { Phys.single_qubit_op_ctl(1, 1, ctls, qb); } else { Phys.single_qubit_op_ctl(2, 1, ctls, qb); } } } operation CNOT(control : Qubit, target : Qubit) : Unit - is Adj { + is Adj + Ctl { body (...) { Controlled X([control], target); } - adjoint (...) { Controlled X([control], target); } + adjoint self; + controlled (ctls, ...) { Controlled X(ctls + control, target); } } @Inline() operation Y(qb : Qubit) : Unit is Adj + Ctl{ body (...) { Phys.single_qubit_op(3, 1, qb); } - adjoint (...) { Phys.single_qubit_op(3, 1, qb); } - controlled (ctls, ...) - { - if (Length(ctls) == 1) { Phys.single_qubit_op_ctl(4, 1, ctls, qb); } - else { Phys.single_qubit_op_ctl(5, 1, ctls, qb); } - } - controlled adjoint (ctls, ...) - { - if (Length(ctls) == 1) { Phys.single_qubit_op_ctl(4, 1, ctls, qb); } + adjoint self; + controlled (ctls, ...) { + if Length(ctls) == 1 { Phys.single_qubit_op_ctl(4, 1, ctls, qb); } else { Phys.single_qubit_op_ctl(5, 1, ctls, qb); } } } @@ -87,15 +68,9 @@ namespace Microsoft.Quantum.Intrinsic { operation Z(qb : Qubit) : Unit is Adj + Ctl { body (...) { Phys.single_qubit_op(6, 1, qb); } - adjoint (...) { Phys.single_qubit_op(6, 1, qb); } - controlled (ctls, ...) - { - if (Length(ctls) == 1) { Phys.single_qubit_op_ctl(7, 1, ctls, qb); } - else { Phys.single_qubit_op_ctl(8, 1, ctls, qb); } - } - controlled adjoint (ctls, ...) - { - if (Length(ctls) == 1) { Phys.single_qubit_op_ctl(7, 1, ctls, qb); } + adjoint self; + controlled (ctls, ...) { + if Length(ctls) == 1 { Phys.single_qubit_op_ctl(7, 1, ctls, qb); } else { Phys.single_qubit_op_ctl(8, 1, ctls, qb); } } } @@ -104,15 +79,8 @@ namespace Microsoft.Quantum.Intrinsic { operation H(qb : Qubit) : Unit is Adj + Ctl { body (...) { Phys.single_qubit_op(9, 1, qb); } - adjoint (...) { Phys.single_qubit_op(9, 1, qb); } - controlled (ctls, ...) - { - Phys.single_qubit_op_ctl(10, 1, ctls, qb); - } - controlled adjoint (ctls, ...) - { - Phys.single_qubit_op_ctl(10, 1, ctls, qb); - } + adjoint self; + controlled (ctls, ...) { Phys.single_qubit_op_ctl(10, 1, ctls, qb); } } @Inline() @@ -120,14 +88,8 @@ namespace Microsoft.Quantum.Intrinsic { is Adj + Ctl { body (...) { Phys.single_qubit_op(11, 1, qb); } adjoint (...) { Phys.single_qubit_op(11, 1, qb); } - controlled (ctls, ...) - { - Phys.single_qubit_op_ctl(12, 1, ctls, qb); - } - controlled adjoint (ctls, ...) - { - Phys.single_qubit_op_ctl(12, 1, ctls, qb); - } + controlled (ctls, ...) { Phys.single_qubit_op_ctl(12, 1, ctls, qb); } + controlled adjoint (ctls, ...) { Phys.single_qubit_op_ctl(12, 1, ctls, qb); } } @Inline() @@ -135,21 +97,15 @@ namespace Microsoft.Quantum.Intrinsic { is Adj + Ctl { body (...) { Phys.single_qubit_op(13, 1, qb); } adjoint (...) { Phys.single_qubit_op(13, 1, qb); } - controlled (ctls, ...) - { - Phys.single_qubit_op_ctl(14, 1, ctls, qb); - } - controlled adjoint (ctls, ...) - { - Phys.single_qubit_op_ctl(14, 1, ctls, qb); - } + controlled (ctls, ...) { Phys.single_qubit_op_ctl(14, 1, ctls, qb); } + controlled adjoint (ctls, ...) { Phys.single_qubit_op_ctl(14, 1, ctls, qb); } } @Inline() operation T(qb : Qubit) : Unit is Adj + Ctl { body (...) { Tz(qb); } - adjoint (...) { Tz(qb); } + adjoint (...) { Adjoint Tz(qb); } controlled (ctls, ...) { Controlled Tz(ctls, qb); } controlled adjoint (ctls, ...) { Controlled Adjoint Tz(ctls, qb); } } @@ -176,7 +132,7 @@ namespace Microsoft.Quantum.Intrinsic { operation S(qb : Qubit) : Unit is Adj + Ctl { body (...) { Sz(qb); } - adjoint (...) { Sz(qb); } + adjoint (...) { Adjoint Sz(qb); } controlled (ctls, ...) { Controlled Sz(ctls, qb); } controlled adjoint (ctls, ...) { Controlled Adjoint Sz(ctls, qb); } } @@ -225,55 +181,51 @@ namespace Microsoft.Quantum.Intrinsic { @Inline() operation Mzz(qubits : Qubit[]) : Result { - body (...) { return Phys.multi_qubit_measure(102, 1, qubits); } + body (...) { return Phys.joint_measure(102, 1, qubits); } } @Inline() operation Mxz(qubits : Qubit[]) : Result { - body (...) { return Phys.multi_qubit_measure(103, 1, qubits); } + body (...) { return Phys.joint_measure(103, 1, qubits); } } @Inline() operation Mzx(qubits : Qubit[]) : Result { - body (...) { return Phys.multi_qubit_measure(104, 1, qubits); } + body (...) { return Phys.joint_measure(104, 1, qubits); } } @Inline() operation Mxx(qubits : Qubit[]) : Result { - body (...) { return Phys.multi_qubit_measure(105, 1, qubits); } + body (...) { return Phys.joint_measure(105, 1, qubits); } } @Inline() operation Measure(paulis : Pauli[], qubits : Qubit[]) : Result { - body (...) - { + body (...) { mutable res = One; mutable haveY = false; // Measurements that involve PauliY or PauliI - for i in 0..Length(paulis)-1 - { - if (paulis[i] == PauliY or paulis[i] == PauliI) - { + for i in 0..Length(paulis)-1 { + if paulis[i] == PauliY or paulis[i] == PauliI { set haveY = true; } } - if (haveY) { set res = Phys.multi_qubit_measure(106, 1, qubits); } + if haveY { set res = Phys.joint_measure(106, 1, qubits); } // More than two qubits (but no PauliY or PauliI) - elif (Length(paulis) > 2) { set res = Phys.multi_qubit_measure(107, 1, qubits); } + elif Length(paulis) > 2 { set res = Phys.joint_measure(107, 1, qubits); } // Single qubit measurement -- differentiate between Mx and Mz - elif (Length(paulis) == 1) - { + elif Length(paulis) == 1 { if (paulis[0] == PauliX) { set res = Mx(qubits[0]); } else { set res = Mz(qubits[0]); } } // Specialize for two-qubit measurements: Mxx, Mxz, Mzx, Mzz - elif (paulis[0] == PauliX and paulis[1] == PauliX) { set res = Phys.multi_qubit_measure(108, 1, qubits); } - elif (paulis[0] == PauliX and paulis[1] == PauliZ) { set res = Phys.multi_qubit_measure(109, 1, qubits); } - elif (paulis[0] == PauliZ and paulis[1] == PauliX) { set res = Phys.multi_qubit_measure(110, 1, qubits); } - elif (paulis[0] == PauliZ and paulis[1] == PauliZ) { set res = Phys.multi_qubit_measure(111, 1, qubits); } + elif paulis[0] == PauliX and paulis[1] == PauliX { set res = Phys.joint_measure(108, 1, qubits); } + elif paulis[0] == PauliX and paulis[1] == PauliZ { set res = Phys.joint_measure(109, 1, qubits); } + elif paulis[0] == PauliZ and paulis[1] == PauliX { set res = Phys.joint_measure(110, 1, qubits); } + elif paulis[0] == PauliZ and paulis[1] == PauliZ { set res = Phys.joint_measure(111, 1, qubits); } //shouldn't get here return res; @@ -282,8 +234,7 @@ namespace Microsoft.Quantum.Intrinsic { @TargetInstruction("inject_global_barrier") - operation Barrier(id : Int, duration : Int) : Unit - { + operation Barrier(id : Int, duration : Int) : Unit { body intrinsic; } From 5752c95b459fcf1cf758f8e9398f1b2cac8ca61f Mon Sep 17 00:00:00 2001 From: Irina Yatsenko Date: Wed, 17 Feb 2021 11:24:21 -0800 Subject: [PATCH 22/27] layering example diagram --- src/QirRuntime/lib/Tracer/README.md | 15 ++++++++++++--- src/QirRuntime/lib/Tracer/layering_example.png | Bin 0 -> 26209 bytes 2 files changed, 12 insertions(+), 3 deletions(-) create mode 100644 src/QirRuntime/lib/Tracer/layering_example.png diff --git a/src/QirRuntime/lib/Tracer/README.md b/src/QirRuntime/lib/Tracer/README.md index 753ec5db260..781365d80ef 100644 --- a/src/QirRuntime/lib/Tracer/README.md +++ b/src/QirRuntime/lib/Tracer/README.md @@ -64,7 +64,7 @@ A user will be able to inject global barriers by calling `__quantum__qis__global duration of a barrier which would affect start time of the following layers but no operations will be added to a barrier, independent of its width. -__The conceptual algorithm__: +#### The conceptual algorithm #### 1. The tracer must be set the preferred layer duration: P. 1. The first encountered operation of __non-zero__ duration N is added into layer L(0, max(P,N)). The value @@ -78,8 +78,7 @@ __The conceptual algorithm__: duration __0__ (controlled and multi-qubit operations of duration 0 are treated the same as non-zero operations). Starting at L(k, Nk) and scanning backwards to L(_conditional barrier_, Nb) find the _first_ layer that contains an operation that acts on the qubit of _op_. Add _op_ into this layer. If no such layer is found, add _op_ to the list of - pending operations on the qubit. At the end of the program commit all pending operations of duration zero into a new - layer. + pending operations on the qubit. At the end of the program still pending operations are ignored. 1. Suppose, there are already layers L(0,N0), ... , L(k,Nk) and the operation being executed is _op_ of duration _N > 0_ or it involves more than one qubit. Starting at L(k, Nk) and scanning backwards to L(_conditional barrier_, Nb) find the _last_ layer L(t, Nt) such that Qubits(t, Nt) don't contain any of the _op_'s qubits and find the _first_ layer L(w, Nw) @@ -87,6 +86,16 @@ __The conceptual algorithm__: time. If neither such layers is found, add _op_ into a new layer L(k+1, max(P, N)). Add the pending operations of all involved qubits into the same layer and clear the pending lists. +#### Example of layering #### + +The diagram below shows an example of how a sequential program, represented by the left circuit, would be layered by the + algorithm above. The gates in light gray are of duration zero, the preferrred layer duration is 1, and the barrier, + represented by a vertical squiggle, is set to have duration 0. + +![layering example](layering_example.png?raw=true "Layering example diagram") + +Notice, that gate 9 is dropped because it cannot cross the barrier to be added into L(2,1). + ## Special handling of SWAP ## The tracer will provide a way to handle SWAP as, effectively, renaming of the involved qubits. The users will have the diff --git a/src/QirRuntime/lib/Tracer/layering_example.png b/src/QirRuntime/lib/Tracer/layering_example.png new file mode 100644 index 0000000000000000000000000000000000000000..5713843d8598169f80720dffc2b9fb714ab61527 GIT binary patch literal 26209 zcmbrmcU03^*EWoUfQo>M(n3)YP^nS^p{gjL2&nW9Mx=%kq&F)?2#5%XH0ja_C3GSJ zp@V?506_?$cci!P1ZU>HpXXicTkBo#A7jQ$@;m43v&*%wePx0ksw>l-Vmn1aK|yy< z<<4UY3aXP76vxy~`~}_-VDoJSFUOo8E8nKb>tLS&e>rY>OYIf~1v-p&&zuVU{iLIc zzB2^{M*#VsV_~Ef3knL0jeB=)X}Oy$jJTOGT2Q`NNu|AtxCR*vwEOtz%zMF$ii3fo z=g%H@IyTOEB2eJb!!sWPjOVPv;T? zN$3)6vAc59T8O!a1FTFzu?In%CciGb72hDgejDw=kzXnNlp&Pl*FBE^#}Cl!=h<%; zt2UmMnMiFk-t{nxWuD{=jfg0ys0=?xD$Zc_=Jpu0CCJ%EK&iocqL$@V8}ZVc5*(HM zIvvY@MYU$r^a}JZw(hSW{jqTFnHE~z48uO&qg|PX7Z8gwhDw%xZa3$RRJi$Fzq@H~ zTq!$Pt{LFEFly*&OwdlMPthD=%Kxz$9n1u)6H*-f>}!Et|hrA}GH zION`Y=Xq6GXtht~uBnM{^Vlm7?oPWaX>;1BE!7O+;g5cpRyxSN!;(xZ7hXb{`)X6T zrY_NCFWRC++87J9hJ&qdood})Q1F$U8e)x5s_ss;Dj2-->GdhO-}Ae0KD9&%uSGXn zUzxnD5PLh%e5J)z;oJ(|46#7D-*p9+Y?1uN)eR9EF4l(F$1$^E)#BD`7J{!gQ&or3 z>FkVi9rdi9)(pd5wY`MVD*VXlZ)%`{PjGY>#l`fLTa7_A!(dz!_M}Nvs%Q0eMlW|l zE5l?v!F|s<05U{^+*>M%X_zfG!iypv?i!F;B&0eYqR^-{@fc zD|-%#kSW^g(vhu&*%p4+&Qy8})jmJ-?uuzhpOasA$!k?5*Xyx8cA`@Pn-jW~3)uLm z=TEx1%a>-vcXL_^D$aua4p$I!5{&m&B=Zs!(3bn(Pdd0}>WT{7d}HOfuaR7&w{1V9 z;+dn?n7uZ+cyb}DZ*hC3h4I>yK_kcEOs_%tUu`cvxIAqeJhTt*sAcz*{PCt2OZSGi zO(M-~uO8PuG|NwK72~<<*Lz)~qauX??-zF6=`aJuO?jJZ&rBKL`P4v2a7J7DYT1NX zu+UYG8pAw+Io%|!*yi1CW9zFkKZp5(sXjlL{PkgEDiOVFg@#Z^(UzBX7Csmq*su{I zsHo@Y$_Oy?O0a{7VOS_fe#ch3gLZZ4e!YTF+T7wB zjwN|BAHs{j5E+TZhjmW^CVOXEO74*Z@7tyn$Ekk58EsjqQO31-ESEJ*AzSg_#fxE% znreZmJ-(SAf%w|VUr5S+{>m)-@w`hC@7xk>h0_Exc;(L7%V|Iho+gr9M}yR*r3zg! zc8?TYy;?~zy^ae?NSo?sq;B2zB7J6~dyq&7|`Oo~z37iUehKy83A{$90wn*Y&m@pK2 z(s1153PoeoBcMpyNc6>Q_U3(Uf}7Q$*GLc?+s+wD$W{plPuQtB`4)BD}i znP5-59eL49=EUdn%H}-+v8S#A*}t_SfuG5Y~exMdc~i4%+la~+w>$SH?v5^Ghp0+^=$Y%i6p5G* zMNLdNuoddXHw!rR!$rMqHM@f}W&;AbXq+8E{MuAee`@KS$@;Gi=~}uqSIueg(Mw_JoHPn~Ys{5k$0FYUr+aBlW(Hk5*e+m*xu^P8-5Co5|o)lIPH zo^z|x9Ng&?q0#l^?^z(ai|{*FWxD)x#R|#=^>Q1^Y1}l@7i`@&W4+IhRlhrH2yP zCu6B2OE69kXLKjnQRC3l4}8D8Xkv;;5zBv3##J#zcjD@>Z6=2Hl1B)?wRFT2;Y3*R_HLN8?ukyFGJ7m*W&G}R{%r5e=YQ>f_Ed9p4AjyVw*D?I zoKP|oW3)b7;Jqe0c zd%PFfLa2>|G#_i2>pixPm|sOrJb+{y#OwNU4t((}$`qnyPH;^Nn~gCgab$~}ZT(KmZJVc>rF=AuW|MTbi7=O~;xwvB&K>gQ6C^VKw7=)blQxQ)>)(e=}C$$c{P zwbC8i8-uCD~;|fD1Bhm(YH?x5zpywv$lfkI$Vjg)jSwfs+)b#aBlzJ{)8ozM%QDBAwVPk z2k6Ja!*~~qCC92tJs9& zA(ykzsLJ*J^tggY1=7UMr9JD(-&}5kyXolzONk-HHBNS-hS>fG9a@U(v(MZAiT{uO zuSk2kg0yl6BWf_~3)soGA6`!Wa4oWLVr8w*_f5KEw%Qo9fbDd(Fvy<2eBqU?xBJbG zN7H!!O&jLSopB9T`BRX!p6n3?h4+J4Zl|U9v$Zi;)Iy0-Mfl^MGO(>t+~3AmF z>^pO}>aZ|N>G>Mz49!J~2v{RrEUyKY**vAQo31llb22O{^M+YweAsthJ0G#Eh2Iab zrhL9G*zuQ>M)6Y_dCHWa=2ocg{^PxwMzP3l-t1mhY_YDn;ofJE_ga@@#vw&`9zSAf zGkf2Lt(U?%{r?fR7DR60%)%0pxpZPP*hiRpl zG@tRgN@j1_r}f3_8#QyhGe2J?JX$v~wcb=s66hjw){Ne=GEL~a9st`qaC3cXa>t~( z0n&Vb-_xZy@*t%KWn2@km8S+1v_MNUZqf%8%41+6opr>G48P?!2nX##6R(b^^bXs- zgW_|FgLp0Me-cYV{epuo=I!m0TCe%AM2GqPLLc%Q+(=MQE*(RI!f=#-&r+BAaKd0#Z{rvreNv8{WY9M6p{v9%% z1y|}D2qER(o`;2Qb9b*K9%p;_l~}A8?NEPTGT#*D^>p{0gEDrx$;Z4o<0W;ZuZ2&= zitp;K*)ghwgBw1Z=~3>E-?=>~!67iv^?RvNJ&Oy`YK>)M`MX7F-wX!DgDJ($5p zO4xU9DW5vdE3BGcfBPV6a0nOGlc*3k!j2!$UTYyB*Q6v7R|MxI)^e}tog+598ZC3Z z%e3lC;y9O<|} zC)+&5V@NHSAY*3EH<)*B|j^ z8O6>Vu0NmO>u5Hmp9_Q;_6Uo%znF8Dxl27^B`a?*2>HZ0m5xe9R6oN+zIh|)Ku8o5 z7q~3ymQc05O2YD+)>L^Ql_yUz&S(9XFTNo{{IzYB3_*m}c zG_>jpn@U+MGe%zis}1%}!q=rXJfy$OwE%h{x1)_<yzb&EZH6Hb-LP zCl1EFtI(UIT{MZ1-t>g;jgLeZZplPv6x%AOwNeGo2kY-WPx@An&ZsfF@1NSQSrab) z!;PQSoco$lyiXc(1-KPTMB4X7x2v{%|Ay2&l~P3oxD zV6XGELVdQ)E-*#Dph^X+-&+ELrjVx^kAJ5sBS|@O-eGkSSf8kD=!Y7^W)Vp9nS2l3 zJT*KE_;8+v!Pc6>4ga_e1))Ke9eW;b z#FZt#_R8mfR(@d|M1zBd2<&am7n=^wzKI=@w;HzlEg#>ZuQni7X#W(Q!GzymG{GkA zDs=;MAh|)Mlps@Ko@8T!$b^g6tPJR4v$G!JiU07JDUKOb_!TUvL)#MQ?aFZ)2D9B~5uiAUNFwS3>P2PK}Ek=xV zJZ`O1;qYgsCf}t?x6~o5M|{RKe7ru0H)5uN+4N)2viwv>JS{D4_LTldl;*j(!byMg)g!7Sev{Mf9Cx8=fA!kgE5;-##knt9a3@M9C0HFSh>s(px?hg zgBIn!d|Ak(>gp65)1S4>etmn1nLhVf(m`nId+gr!RN}LuK@6(UveS2U;+_Yk_{@)_ z*9ePKK0Cj|o#}#)*qJy9+dhGq6K>Uq2fM3K_W;cd^%1Y7YUJ9LY@J+uB){%N`$Oyc zz_Um>f`moOd5_o|M|9E)&1W5PtY^+#+`Sm-M(|K3P$n>Oa2RK4T`M<#oM})pdw8&^ z;K6rwV=REHMdH)DLAwN5kHV^eV}HJ_6u`jcJ08UMly#uz>!TDwLrjxb9pe)?Tp0y; z6J4S5d!kphzWzwP7hykA!E^W*J01;JE_&8JQBjoPv%9goxfHz#J%1D?52uF9or9SK zXS-6A&CI@>e)aUyrAx1A7&;>O!s2S?(2bn2a$XxN#75F+z})`s5*7lrW0ChRQ!y=P zU7Q_2!y|arChCpT?nko8xX$S|CN6M$s3Jpf<1Rq<{p^*-YSY>hfQmO*}}E{ic3C%fD(h(;p00y5+mOmY=Hf zzdEplJ@g$YFdy&j#`^h{4+~dcF-aRQ!KbUmhG0z#nj`u9fn^%F1G#YQFl1A?^#y*N z2j@$+2G1(oT6J2g5mne*u4@QpHZ60k#jtQ5F^%6ueD~M;<$ZRv@{9%-r@Z7;>dDIp zMXz?mD+bV)*bP1=OiJ93783lhRJl~MW|j~y5QK~Uf zUrE$-Y)!i!5%(jn3mO!w2&GlV!JN|_Mr&fKODps8QicgAj#Hh9 ze!ClpZO6sCNBmIs{{H4@d#q$M+;ZtXaGMI3xta~Ut-|4+bJS98o+;MkjXwEcNzK?L zj^yNI*Tvy$(cO`ELaMhXV`t<)%u)mYTIfn?j5hU+Aw_=GNR;0PDZp)}yCLSuPu|Oy zpR|3tfrJ@306Qcrkesv6GBIr|*D+vs7b|W1a>BzRPd>u5MhP+wnqqzT+tiC)b8LFE zhDsfx8K#tuny@Ne+)`@E$rC5sf4!vH*;tTyQXl9<#Hl^6z8AsU4s1lI@)*J(dR*1u zkGi}z7vmND4uHD}zEYFV<418R`2PI*Xn%JzUprezMJ3S4E%T0&(&7MGQ7F+iP=u-d zTB2}n^4(zhgDqm6KOIEIHOJ`5l=n&lGdMLlhF3|g!lpF>(Kpl9raFc73nzvgu;6yH z+;49GQ2!)i{=JdQZw~}&F=7aF^W|TwT#0?(UQ&{JD!boZ;GaM6zkNPudH{qE;`ek{ zqR&Q*(P9uIpGJa!N%r`KO0P|Ogr!Vy;`x=o7!@SOEtO?Idk+UOHWUTwrf=!?XKFSs z#%`QFdp1_mAu4QK=#M>q0tXfIO z3OS+D7|KDCR%1GQ)@gp=hDTA}o#mz}6`ijSlV*A{yf!>lO>Q3_+gc&^_{^>TNKz!V zdxASNtPol3&5kbJ_q<(oqGz9%M?heHd8}^rS%1nA!-FFD?jm3})j<}J{CrzVXY@AOrABoyKs$}ZkAA8$?VIY(?th? zwqZPJFI*Rg<4(hL6)vwyKc6=9I&~Ij*%22mc`*{Cd;F@@#fx7oI}@0NO+HFa^If}U zJzVA#9v+TC+(6*PJzkfx$$1td9yNT90WK3a=Lv4`n;#vAv=+PD=F+He#xeJp`uciM zCv-C(lgP!t=u7sq_RrP(8$-av?|Xy_>_?>0RFZZB7c;_ZLCu-#&EA|!^lP#1-x@tU z=+8IBf?CyVy#x-G4ZMCo(eEI<$NmkQl#F>mP6|DMK|ZvyBZ6`*OViMoZz`d)fmR*AR6=e z_AzuaFK~MDw_^iEg!Buqn8&5%8kW+ticUbLLO5h}WSdHC`zK!irOvOJewzDn?3XVW z(bp$Dk9`T+fC@cM4UBuC2=GZ7>ex40a`812UxyRJDM#Tx8Medbq#Yh{Lt$H=RP9Ub+Lx_=4L1F_(`Og6>p#tbwO0{dp?k z*=UW=_S%%gaM=xyoHPG^w&%;O*MCt`Hp8ob;DkV3d%=vabaSZj-sbe(GGEm@`S1F< zx~4wb6Dxb$Yb5cwU$zK#znR}}V^=#(UICo7^zlO@09g6JLQe;AQEwpqLZ&sY+_jf2 z2W~+W-AwSf1eUOF+<_fHa1&@D?@Sb&q0?)-wrUU(rm*nq+q^GuSNB|y1m8E_e~QPI z5{2fC<@{#LZX`bVywcvU$>+V=T(7TsD8=YyNJwBqoF4Hohj`h@-iPd#AETr`PDLfC zS8!8Zk@0n@)8jo<_LRoFKGyMjtnqB*X8K=`B`bJ7dGi;;;w`747^6== z1(Y$%dpulLKMuCTcsKJo1U62B?ca>&kZ}zpDuZl85*ID#!Ii@4g%0cn-X|prNjq}4 zSJs!R=f{cLH7#G8>)>sHog*JY5#rVC{t@c`FhiYVMRW_Gg9&jh(XCxvrtwae2NYcN z7xgDko`95BWZ8KD&|Ry3(?vzdXbpO47p)j@niXoX-|$;`Wa!Xt_u1C_OWfE&SDtpY z;v3JWe5SO#?0|vbAd8hFNj6={cW>W*6-O2Mw>#Pfi7Wy%ucyaiKCia}4rZia@#(}! zQlg;3wa#?)2Bh!8Q>VEC;eKNkpF3nj;H9V#^HXSP$3LwO zJ9n-x47p9V#W*=Rty1@^o+N_A)}-Hbrvf~DM3$)7Rj%l-Hu`bJxxsC*ejyHC?J+PR zG4*)lK5&s@r&CxPqH44did8Hfpgz!Q?13Z%iaC$NvMxZbXUvB|;@R4AhtYoi8hz_>7lgR2g%JZEv>U)%wlItl0LE^&JNyRg%rw{%5vsK<9; zV6yM!Tkc#*hQp1p*u#+re^x)VhNB^94$(Kiewh^w#(_%Qo2}a#yB{VPVu-Q3$08H< zC-COa#E zf9Ni0%a3^?`N)WG6N^^>9*p(=qM|tAq@vOU`XN&MUNb1}w^-%^|EwY^?RQXRWj0j6 zoAMXcsX*^Yw_Za4>=roS-}TCWFF_8gk;w}_#F*H8i*8(BY=F1aZs%bQYcSM*5#*JF zA2`4Lb=}oV-|52}(^R9QHrF7RL!3!1cmK1%xw=7+_=Y~=^6myRHCN6edb700S7Ie> zc{7r3?$|mtEFJoOEb$3Y8(CJ6p&9XW>tMC7hHbmT!l3@NKXBLfl2A0KSGXRdTnGQktT&?3%D zqL=8|!RaF0))|0Tlx>|oK}Uz^Nk_$zDko@18Xh1dGL6cSFaM&NfV-a*wkOhXy5G!` zqPSTI_K#N*bB$@A6t!q+s;uxo0bjXu;kwrQ9>=e``6kvwTk73|qv@xao%R~C#AdMd z*ADVcYxbnJ4U9JMWuVnuy_J**u%m|uq?M}IaB;ZY8V)gpXh{B5zVb6TB&7Jy-axbQ z(QO@)Yfy47o5yQoUM&_HS>*lEJE@XoN~@B6N{jNQ5`X}%jy7>D<885$8iZbp`VM{B zuE4Y06XK%L2XQvp^|Xf1P^{8Ve@Yq(r^zq!#+k@0O^y17Cn_h@%w)xN!g?K#b!4iK zF-&P;x5k4*J2MZ~`%Ov4`#}SleZby&=qI3pMi+g#fS5;uZ1(i`=BOXd@#6zZVyr=O z8D2H{$nMm86Xh9cqa@w@kz`wv}(|}W^0^wWqtxpNsk{09l9}8bMWx#@{bkO6ipxMs6RpG z+l~7Uiszlv8F5A&XU?4AkufO-s;fk=L~eKAXm{ zvCTUy{aM{~I#F>r|f?r8|HPp=lB2UV^Mcg+g!$NRku@B(jXnGXOwnGmoVmqFOUB+>T*T}*%f zm`<6&T6d)!t)Pq_V8U^EFTadaniMTwS(r0?Mqh+k`4Ndir%CK00 zcb^Jw4WI6q!w)zUAK?ca&f=s+-64kOh{Pujgn#L0>UTc3ZzkZD4y)$s-)7&CgcHZl z$sRI^0;Hyo^n-MMy75{>M~JwKixRWtr8{)r5%x>i-o#R4j1;!knCk+3Wo_3yWK3(x zu0`m=ncUz^L+xumc<=zg!xQjomP!;Orpm*w<3~YY)3C?tWNY+cp7-|ZWHUz$zpQxi z?_k>rW4Qn*^$UUWauk5{fL9VBgI83}KqZszaOUW<&|U97VeFEuySVhuz)KEWW?u=< ztpQdnXIb1BcT+R)gbseZUbDNn38CM`3a;?+98NiuJ-p}_`)jO&-EnffHpe5XfaBeG zW!Ie=6zBgJh|_}MEw4_0Qt(v!!DQ^<8ndwR=2TGj$yHt%@&k9}9p-Noqq%Gb3)2Rs zK|34S$t5WY7GZVlU*X*>2ZDfQ?WKPKBp=#2G%)athd`(;k}2-yA6Tl7JS-_| zf@@pB-T992MJdK^@HXe%+dpNnaN+>UR4?iF;y86#`opU2_x-(IX&Kym{3;O;@Qe$$J8 zuzCUM2A{6X>wgxng))&@2;4`|KScHX=VNDP`*QhBsDXtddLG{Mf&{M<=uZV+06fPF zIHw&EZ~)9#AA;OK4Q%kS=>1D8czDSuO!+h?uwV9r7|{?F#P{#t!98x`AP&c=sXG$n z4Am9oPztyCb#kWvYS76y5hOP-oEjzM%y|ON6UrfjdB}%GqgQVo%cusl39{hrQ)KlP z4PL55a@0FQ#5KWh-}5BJuXx#R91R1ffL?*w{?7VIIE4s1UaG;+q58s^GdIZ~hMK}X zVRuN1Cj@i?mn@G_guI4gJa!jK@3Q!TKap(eWacPgY7#2{eEMP1?YFbN*~%=GFK%_= zzRN6CDUtyM`4KWhdT*SptgHa9cmgaUgxA|!Q{(LcwAE41K}jLo3492U!?GT$9|a9J zz+;e16yL0@h>uc~f}jWE!NUK3uzgA^SQaq7?pj*sJx)_l--Xa#4r_n52I$eFwut<& z{r>U$&F~s~W{QwND25e=yyB6=4B+uEk3|PKx!ngnHFwytZ-D^*a4UtJ@c`>Sh0*0& z!AAkln5Hb(_ypZVa*T#KPc}`tRsve5`RxnQYR~l#a~%t=O9VmYzX*-o2!VmsGQc<5 ztVzkso75D@zWhYA!mVq7A>*63LG=cEA1P zNwN|BCIH4^cR2mFNzq7yw56I|^#jV`#Rv`gaN02{@?K_tUW;hf0&pl}BpaA_++zf* z$s+4s;BHcW#{Nn|v2qwE^3sF_j;MO-*E%Kzm0*Quu1@-I-{Q)jfDqrmCg&(Vf7p9NP4@=~uiP z;jG*HO;_%f)K2-`9HSGa(fW`?n6pw&0N{^WBFBX=XVtyEtBmMNA%H)gl;xCu{x!n{ zJyPiw{b%z}ZHXWdo2}jStK8ACz(B2I($ zdgL{<4}`HX2tPm~my$u(>crXC#xT`mc$VdJ#ZETG=wGD>0UQhz`RTsgSpP7|ZXC22 z?U4NMcN1*jXK?_00VJEBCBkF{|EBr_m)V7T<%dBFhTOHZsa#}0*eODGdGP~sR&Y8i z_y8#!s(unq*KZ9PeBoP=3?&Gy?*Oi&3JyLt^aEED1a)P=>rvuaT+6X2;`sPy30nYB z6F`D&*0%xNK=c=sW(tbR&z4Ml1d1$zUI4)SxHQiHz0M@mRWyX^KS$9;RDo*xdae zcqy7^Y^R(U5SCEE5^-_r>yt95dX>}4L^yXw9?o{xks6Wn5&)Aj-}PtDAdTmoNm<|f zuOa65!5Sd_G3EyVCbPv_EzXMg=F?i2vW8JeZ}Dgn>gDs|5_AM5XFvFpn-Q<~a0n-j zl92s@yDZQkb^vHwB>p6RXdn{~TKQ6-ez+HnQG*Or7>pJCX!0(1J|08q%;ft;KR-N} z0oNpR^~opDLjM1WL9-OEfQ@EBLw9Rb6XNbBb9ybl7G@SWM9d}BHHL8vSG%+)Tc^;t z*cxF}>bht??k9YNZur{EXY7EUn&H$bHW9NIaXK1`5W~EMm|gp+ipUn7K$~U9l+PDb zqp$CpRld}O8f&f1`Qlkrlr120V!yhuC>OSvAGlX7&_3Iov6oPc4^5JPzCn|U4`dGc z05CjE*N3G3r?Rx)`TtOsI^Y+hB;ds>?IVnM?c9;Io=CnYq&OL;MnBIIojeL>GeQ8t zz{zAqxCS_!R*BPmRbyTIxI|<)tk@`Qu<>I}w-tP8>cG~C%zsR*kO|@9`!+*TD_g^s zwm;pG^I43C3y+BEd2AzSyS&SS@&r%gjuyZ@q^6{!Tp-KS!?+#P11tC~a43ns<+dpl z!UHtF@_cK0;2oEvbJ*YqkQVybQ)&1I1ZF-fIF)s8C2H{AoUxXWs1?8&VCSD zKy~-WHO}It4#JX3T3zDZpu}wGwVgTow~C8Ci_!coCSAoh-iMAu`<%a}*h1!DqDd$! zIlNw-UY!_1AW+tKYb+qX4X%yI8NGpm`Wt?=BGwx#+MHjK-Fy|VTR{4ms{Fx?=_Svu zfbR3GaL|qT-c3yIqck&n85KdF0sB#(?}JV)yE{*3i;_y6KXV6S`vrqqyR&}q{HKiY zrf-&WV`JsI=XUkV8dQn@py9w6?0XG>jLTsS-*$R+O(T2WW!ez=ouw~)UB*A#wN&C= zI4pRkqPN!E;y*#&c{jAr|F7KVA$s32)|`!D!2&Q^e&tu zEeR1A^WEC}>_x-FaUf zGtWkhR?xjt1{7ZJHe|@#{0{AN``_D)0NYG?OvIrirqJ)8eQTeTAa#NtOMwq|oW*b; zJD5xpR1YBuZ`5A5Q0Y6+wW#05A#P%knPKW`6zDV6Seqj}Fo8N3r^|N>d2n?TzSq8K1DOZTW(qd^VC+UPF zts;_X7k}VxN}n3i8$A}5fXJC^dIM$JD?(8@nPq_@3LxVKptvosr6;4-|FI-HcGhFg z(yl(~a!x}ut31?fpT~ApSm5>I9QIbN>M%?Hcm+JDk~3ng^6Uq7@5c&v*d^L)FN_Xn z%{RtC`(A-kl7KkED!NU~rrwt_Ec-q_2S7hVNZ{=t*;m=SW;07_oPKJFV~-Eefu6ib#?E}aYW8Z zkmO|ta4E`R(Af5@?|NWs&5Be*Uu3<4lnKV(;=N1w{5-GqmvMKc+ojXyi{5}6PU!9g zKKQGv61C7JPTty~9jJDf?ZHGW`%(y%M51Kh%FzXD6{ER>WC)pd`+`}pvTnnnk9*&{ z3lMS7ZPYDSG&#>~|1z}IGWzfyxS1-1FHV1dCfHco)oL`$DVOH@U5Po^QvxT+l;PqJ z9HCPaks}yFrH%0Bo7pOcQbo`Zo`kWz&qnq;K|1(G?#UMIjZnW0E@iJ&+h)%023LH4 zXy9xwC>y0{T=+?ZfR!ygqmeCvyi?GWUe` z)$4`*okmAQ+&MD(5ftsBqbRDTYZXMd=9W zmiiECo;rqhbMP%Z@}|3oAK3LsMK!2Cdb0w?(WGLwS^U6G%ZPZoOS4@wm)rTa+P5~) zTy6QRxyHGUYx@hHqU3rI0M$|WB z5$YZ<-H*G+$6MOQagqK!SumR{;)*~F7nm!SvoBsoW&e@Hz-fZn$gL1oY zqf2Km$W$M$`}b@JUw@yelWPE2C>5dOA5Stc=yu^~6Pk<*NK-S|V9{p_troA_Ya1#D zcWvan><|L<$z*k859|W(4q*JdwfXRiSbPqM>0+52Y;x%tfcF z^4s5=s()ou8pLlU1#o_7J$7YxB0^*MFxXmq;#I|qK|9wxRLN;~JA@G!dX=AH{do1! z?TL3)rH`dU9+2UUsOZTxV~n2}Xs#MOG5do`Imd=LAMfdUK<32z-Xr2XSp=AMbyt&? zvJ6Y@K?f1a4r6C$o)|4wCUAQQ_>B5&Hv0vMdb0N{7K4G4S?w$2B~iYm1I%8z4(jUcaX=MN_IkLA#%N#l@k&EXL>|vtuEugbrA4I8BuqSWYY71WdJBRtaoY z95G^4^g;i<)tVL+AZoa_V%mVn>DN=lIDzS?-JI!r+lRnx5prv7VgTC)fnNIX zDXf)qTXG?MbGknt4D$dP6n=P)=Qk{U#2WCMv|poEMecxURZC;hf!N|dfSbLYgCG?-~qmivKc#I?sp+S zfULf-R?|w*W=SC{1(DSz4x+~+8GJi+CBa^z@c{a`_ zmy*-XC|I?p>5X$1RpgYG<$aP~9p#i37$s}T636NSNX3jl<27@KZlF3rm5H+@&S9~J z+yWwneRHj1-iM}4A;UN~f#_$Lc8mQ?`lkM870Esq5jk(=LCthi(OR|8yo~p8zT4sa z0sy;3J0ckD2czMjG(-s+@`5N#M@6W2_$IrnjBSX8rV(Jg3A+msLXpl?R~UHzT9t*a zr+(N6WK}8=Kme}gvNTjl2pJaTLv>(%%QqohQjTB1(4CG=yv##$yD@p;KWUs1%)*vi z``#*^QV_Lq#yJSIen=YH3gy{}*l9Mkx1!HmC2%3r6^qf-Hh8JA*=<$Hg&O*ZP(LGK z4>*;=GJ#7UOCW=|Nn5A1i4~v%APGc%EJc&PSMe>KZ0Ygbv6c~@9H<+WGZno2tztBD z=RlomSDl=AY#}ldznnB6wk%{IS^@1tD5kjT(ka8@qhm5q?7!^w#1g9nOIuzgr)b za(&(a*u~`8FWUVRRfl@&VGit1Ms#jjP}?=_%t4RP=bRRhu1g0I4Ul9Uk|R zDsgAbKQ26hG_Fy-NGBSr$maLVAvK(A;*?BOJje3%Ag&^mY+{fYr2Cd1Kxn}XF-0e!UiB^Q;CaL;!Q&k{cPIL0V>L!lbKzeaiLRsRtAOgfFX`|{yp zmienDIAM{H-ArZQxm;dN$Dbmw&P$;8N<*;Al*08^-7glnW3^f7#qI?t_hI&hN;jIg z_5D)c0(arursoT9?37&#X(9qLzDfVxHGAS#@BTX)>V7V-%WKS8r1|Lfy0+R!iklYI zYpI~nix;EUC^<*)dQ0Rwb=3iG+{ngh3%@ntbg;J_-waPfx-L}$JNP{yP-A88GICmhOT=Me|yr(iIz-SJq- z1CoD1xbK&A3OilSIbSY4-mA*lnbD+TN;1~NL;Ebgr5Hly6}O#y|1$9qk@86aUDvJuVTQZlC5zX_WIm%!WkHpRcM$X$obX`7|}=wA&+W z@f}loT_3wMdZfA5hxL%n-!P?R8rOyKE_DbqE#0)aFAYt%4;*|-pSRbi!$W2Xc0ICC zRN;-|G=p}~?3N=3QdmF-zG&Jg5^833HHsa&=4O@RMDI(N2u97_k9u*Qbnee>;nm8# z#po~NaW-Vnx! zr)^gvUnjmebl;ysMbL+4MrS{rYL@A+9B$@0SOGu6E0Q{Rqvg5aW)Uq$Mi0J%d2`s` zHHTvw^aC)?285ro9lx_1CQw*mj~&5eV*UCdTwxYIJE*LW<<7ho@sr?(-rHhB$cM7_E-gg2Pkf?E2{ z;*9bsnCgv&{_*b3zrmT)aKcL1`4bQ1P5Cr$rI~K4bqTkHI11*;5o4Jv^B2BKWb_;a z+b?nsi95V#X4hH{cdCp$C?q!p9*|KWLLHtvpz&-QVCJ^GqAI<%M!PvBLNke29!nRO zl)4G=QeeQW01*t3UV_O%#QQH+%9bg~s3kq2d{6G{x{h6qIQtIAZj&~`jE3%2t=*CJ zQ~4iNA(S&$5pZJ~*$R_j)lPCie6@yi99_#<9N^emq6A|LDK$O8ar^&t`~)0&O@`0U zIe~l9P7N!SKYhXe1(eC2^V1O4ylap2S#GyuOFuX|-=xN|5q3I%!eyn zY_-kFyQ)Q645eDyPIT&j#S!yQvns3wkXrq=o_$w-T_LYkXracjDoVX;qg zzb(9Gh2443s2nTdRZQ&3_m=V@nO~3@dHScV432*a1XcA6 zg?JGrEeyIH1-Gr?P?$$eRhHVWG^KDPcv?^Esxla9ZdJMjk%PfYP{`S=EIe4OYh&mH}&1V+woY#7&0{Nc+|@mqrHCM*uamWILP>XA2I)ch4wRE zkA|EVI*ZXlARntj=4t^G`hBZ>D#00$PJ)OW8jywoWB(7oG>ik))s6SR`6w`@`d$Vk zc^v`hoBx~nDnhvd7Ttv5$v_tDe|~2e$U;{_fQ&G_2k_4S2aCllWOOFK?i|nkIJxZU z-P;Sv|4n8A)f{8z)j}P08Nj_Hp?nw-^PIrU#W&`WYp~wSVf+iwDqFR(0Nn&WifU^>6b^~)99Rpb%XOyM66G}8{) zeI500rS5=52IKE{wGYV(X>p2ga~6HL4xnrfkYc~1tlVmyVhu)KdIw-O+F}i6pp+tD zSQ!AgXR;&%du+{bpn)qnF@r0B^*rZM25}km~g__Js_ZG{O2K(r7J+xq6xYtuvADel!?q;{rdGQ z+QjpodL0Z5@#?;yd_EmbR!VGoPhGfv2Mk`Ex&Upf#>;4)!;NQoVg3Ur-v%aq9&68T zeEMo!iP^=5a8P_<#+LyJ?zkhyyp9@1mq9*Nog<5Ka-aNo_!5gPdqu`KhT9-uR-cH%DJ{EkRjqNGw6x z<-XAIV$YehXmN*{z`#J@wM{1@>mxc66-UE$?*1>+5%tOU}5XfT^3@0TSH z)t{pe7mXn+$V*4~H4O z=r>4<-;6oE=5MT2ktd$C$8rsdt;RAb$#QYh?Np1LTK^^@n2x=ZB>>rI92MUW^4Y&2 zyw_*7VGBY)RO18`2NQ1~K~SL6td4U`0Ua0kLE=kUYVVt7X1~ZHxi^*bgGB+*zPDCH z%S0a?^|D7m3*rrE`S3;#>E{>2T!6?C_>>-iGv#D_HQ^M-Dc1!5_QeXWn1XU~n(GsE z+M{D&IMOlv^T|j6B;PlZug_lzGRMOc20EQ9mbc{oT;i!NoDGno8D;?)36kxF(~qN1 zsZ2NlnxmfI8R)%`Oj;n_u>k)43?iow3=`|4kcZ1`WHljq(eoS}KAm!#S~+@M`;-ic z|Jh+APyn}c!)+-lzM$wp*$+MecbW!`rZjnmf8;=5?Uw3;ow+Aw`y+0n&DIo3iVz4J zA}2V4PvafY7q}j%L6b>xLvh28DR*8BO%b}yjnTMLohwPTwSzJ*KKOQ#PU>#fjDu0A^>p>wPG zPt^?}jJOvLKl*U$i_fg#)nBdP#tiK8J-9b$z>ib-k8vT`*BVpvA}@1acRBQ~8$1pZ zo*7TS5vJ`sQS`dS*I3A(nn1usrx!QbGx0axa5h7mVC$Q(A7FHdy~$XIcPqbv%p2sG zy2qn9>G}UGQPYLXP>;V3grFdvPLjovz{?o$QqP8_)2Ut;>iT*#q#Y8fRiqCPVY_Q@ zjvtTYD4tmq>A1AyKzxJBkD@E2gNs_y3-vm$P*E+7e?<@hNf8|2(3=$*M{V}>`jB4Y zhyWso8G!MnmvxeOkQum`4y{ZAq{&X;pAXu>mhPHOs#rx%ciS~J82?}iYZ%XAmh45h z{<}`d1VzFqPB&d4suaiXtm80@ao!~O>@uX5`i(TDziq*m-5&hhvs{B-IA;|1kZy}Y z0+BOh9|fFa723D?Z4832lS%r#iE=qgo?H&ZPZ##@)5Nxy8~D2LF6385Bv1X$G7-^D z`kcS_bs~BUbdGNX^L1_IFO4XyDctK48;Z`_!1?EByesP3@PMzfEYn@b>tT;h1X_u; zmvxr%4tBego`y4gE`$;S*~Yl zu=OjD(^0onGlbA$9H5i5alib=g}_f`JbIK6K$?KrY@^#&F}djknJbS?L#-#5pD0S*iI$M$h$9#L@ql>Sx5F1-XkNC~YdryA;46mNpPn7IwE^JlTcDyL_~98%snsJD!K>Vb-W zU>4jAw>UWx!Gi`r_(Vg4bfE zR^~a69Pmr8ZzA5MC`Pv$Qy#tH-t*mCJ);?$R=fBQJ#IACPRiK1N6;(N3Cc={nyl$l zH(uQsl5#ryL>(SU7 zpzb8dpf-ob0!jQ8zK}0~Iv+oi_60@M{eA766-P0Za*5Tb^>9-0l1cqu zAJ)47R?tKoO?bQ!462alOu6byP_u_Ii}{?OpgzmO z0+5O_%=|cL&d$?>TD=^!9+fN!kALJ$fmD5g?ifLiHM*`M*sSB5U z0ieS!FkR0tK@m%9v|&Xm%_e3M9_ZUI(UZ3vb1~>*3c<^hZ$RY|#gNo|;t~?yd_HB>6C~Hxuv9$aoC0jMf;I&so)SD3zLzYYom3?`$C0kLJ2=A+G36m|7 zt&FS@Y0PBF*q4l5*6(^ozwi71o1>17qnYP^?&p54`&!QXJg?Yzaq+UpToXH3SXelz zxY6Fc*HM70$O`Sa7CzEJB^!z=eKekf1N%6x$MHXdqQ(J-c-H;skk;|5dBXhsUx56r zd}7}(#Z!_qqrF4%?(zWO-4L#Vz6a}HEJFxErHoQ5TDiw}zTWwVv{vianJK2Ohd&AAE3~jwdwZR}x;Wu=;r&64rdlaQyel0XTdPT)!fbr5jS3p2O<<8h{LBUcYU00dw8Z)Eupeb2@ z2vpGM)-b&mD#^}%aIN&VkBf;>L#C^LE?#nGd>F(2$I#Q+?~*7@@Zg!X=Ew;H8xXp zC9N-=Fp1vh98{VOK+yNpXcFKT#8tfY&B=OvLt1xtjuXujI1@J4L5L9{$L<^W2Gi}O z2t}KYIa(!PfDwfO7Qrh|QAAN?_`#7m^B(|qCoxK#V7{w=l1^wyNu zzf2Q`upE(J$)t$1H$2#ock$CdfOT(z~MUo9l+v5<61Iph62ii$~=r%On@9K312pkSk?r>BGN%j|ca%oxVN zO>BJWmx`pa2b1@!tJndRF#N8zkcl6|kPVN)m&~@#oYhIJcjA=(;^S4Bk`-IZgjaz< zGbD{-e^DHDx0m_CyU1rnkQdH2htp%O)i|cgeU#cH)bvMkkhrIZUWIY;@It~#u&J7V zd#g?F+2M|q5WmdEOiEaZO9G_$^)>+kpIV=tX~fsG?=2&xGF|C4DlUeZ0TsIb(0trb zP&xN7CG;xu4zrjle0k;6aMM35j~CUx$4{J);x}q>4lqhr&YO63LiR-E;Dg6hCF1gc zQ|TeYUCU*p8_jfZFOZRInmdd{idz8*LL*+1xf}VYlqNT?Z*@jiGd;5% zH)F6=QbV9|kz%TeSI!XXs?H~a)G1QV^rV8iLDxWVA@>nHlD~R}{IYEf;@a92#7CBxZ zuwWtAMBodx9x^De(B4X@KFD3C))LbWRT#+ z{a&+Tk|z8KmLFi<&aLI|PF$(O2_f$)J!;>-QBMm)h(Oxi1+SO+gXi{KbP#8P80T+f z$&t&6@xWgL4z&(wwvba^l&}E7*`b&e2Y}$wI?qp!4!Ip@`{%#;{8Zvjy2;B6;eSDa zrxu98oTgItcV-z06=>CM8zJ+yqO@}NEQ$;yocia^J%S+xnM0m}O3G{Y>^juX+YCB6 z?x0elt*x!`GQZKax31O*k&KGNmRRgmnUe} z{?JT->kc2jk7Y-_fI=BW5Fqlcw~@VGai#4(bBCmZP{LfS%&{{RezQ|o#Q6dCC#d&u za{x*-nr$0;vAl&foL!e+kFW~>0~6L7wb}GGC<^N5;nw6bcurtm z{%=9X-3!VYAOk{h1Ab-TjrLkHXt99E(w}X7q>i$G+iw0TKu)0K1Pfk)$u^sT0`TU? z+R&YJkaGZw?UCF);NT$W!Pv3kHxj|}sDmLt*wGxkB84Hdz&`^SJg`a0$Uz@S38RV8 zp1~6O@t^cg@1zQQABtqDIR@YX$MsNE5E(p;8GA%xN0-XoZck^i&f}sA%cTMz+$cqf znCu?Py?+k!h-T)M6}zolmjB!lXx#E##RCzpt^E5HUS2~KT2j2@5GxOcR}6bjcoLNJ zk@g_M2i?nKfom`ukbDHJf?VL@(aQ*t4SWCzbT5| z3l2p+SmL0zG7pMzp_s%9YRA1^7MTV~fD1I4x$Yov3xjfStS-L6%6B#-1(>MfREj*) zb>F}i%cmy*1opTAHPGrnmk!6~<4A|Sz(+7A(%$ju@ev&{qi3hj8?vG*cM^ZJVGQe+RMGlI*(e*aNn;+l~8kfUUwgOFu~UY){ufp85=gg$(N$^gh-fyF$s0+8ukytnA#6K!Z7xksehfwzT33bv9CZ(i) z{_EM-UDM(yMEdE<;f!izyjvhhnm5q*O2 z*WoQ@V4`C-@2*b0-YW7hN^>cmMKD~!lLv%_k1cItpq0R2H8|UZWz?025fQgQMGI&u z0HP8=hOT^?fw_vEB7P@>GXXBMuC)EP{OfVCvA>N}avo!-RN_P~OP1}an(qVl_0glc zBCkNY?97pS2hPR+GsBVnpSibYMK^(*E&+zqY}ZFV9;ym5lJ+2;#|_)$Q?%FiE^UGH z;Dw3({mzxH9{ppi1f-=!)~3X5r<+CPnjxHX{(;_7;+<6Rq=m977O}D4yzGf4+I{ksaD+OZh8o zisI|XzT`uBfWX=Bb{JC1T+%#sN*p>g5f`Dm>YjUs$44aP^7N@7o@l!^Ao z|Nf-a-&Z0E|bh2cn7P5d!*_$dVG! zbcLN=r8@yR7B+f@+4T6l9;2T2e!)~F-e1K(Tj?BkGNbq5(D>6^5Q&wC+kmiQkSM1i zmTt1AAs6y9kj88E?P{L@@Dp@XF%KWw$g!aMq%NXwi6Vp7jv>?ZhCG9Q9_W5q_wp{N z{b5^ZQ#-V0P}rJ*5xfaKKHk`#Cq7X~ZXK+KoMy5!i_e;SR6Y^5enMtc39pX`WSie> z*&7s?JCw4;o5jt)wd$C+c+h&lJ@@kWDnE0PuOB;z&=_KB^&c+UjeortCjiBSoCnn> z2`8ziet1&i?9f~gw&6*I5Gxfgl}|4i8yf@1DTmx?v=oC8iMFdi*3+0u4hvYPcs~+W za{<@BuE%{8oQZj+!G3lD)WF7*Qql+On3)p!1_e`qTIjGf%T9)u@ejIKss{`tobR4I z!@GaAhwojR#k>6pt|vO&%t0_6Dh}1-rEPh_O4nJGr+Nb+&y_;=A+(TM_90GFq!b7i z6;k3n;__~wmV$E*(tcI%uI$H0Liy4P2W?|3q0eyjJy2bN2oU7xX@}TQp2}f_ygL+n z+~^f|=}u@fEF4qko+}De-%*p$y-ckKtbyJS<*(7dY|&gI%?n5j*Fv*aD7g**d4x^a z1Qn>55hY5;WdfLwM920B!nuxg4c5@TAl-nNMU?L3f^m@;=diW8VRNnnXko~gAr5#6 zrTB5g!$PE7C~+S(jBD(IV@)Jd9)J{7q&WrT+sNI{!ePtRWDLP=q-A_PW*_QLk75$P zNb?=Qh>@?o_XLHP5@@>G+RS&MjkXWRCs`tE;(-5C$VRRMwhx2BfUu8bEwWl@6*9>H zN+Tq`4q)XI9r~+OgLa`9ZV;Y-KjJBGU^{uINBt=Kc;jC{BW&bIj-5x9xljX7wb9Lg zN4S%j8HOp?1F~wLf$ps5(ZkCmpDCbwDpF(ariU}rTc+oAe0IH5w36UL7~)( zm?ZmlB(;^0XoU3;=4?=SpZIn{5mlr98c=mOlz@ICc-V$=$D-}dDiC_!=pa{ClbpUD zmropn9uM_4fG?rs_D0*SDiEMo1oHJ(q1*5U`9#mv@vQv@E0vI2b1y$agt#Fnlu-L5 z4&&5yteocd4o(-FLm0f7AT9r-z5_MDf%Ke#(~9M>jv%=u4)B`tNe08chFC|WD~2X2 zu^A|Fw%EkzgGcgx7X;kRhpjB5->F z2!7Dg%R;*!Zw7*77=IYfL}+E7(KN)nZC|;yF%tk-nbcpoUC?M_Sb7B#yug``Wt=WL zOK!UNohfs{4ut-0 zDB_ULojb)TY-wQH3__cDGYEh(t2bBmnbUEHcn3n{!TN|aC8B2AeHZYJD_F@GJJ6eV zB3OVp3kvkjATJLaY3FaqjW85~n?IsHcIJ>{TlF>KhX>kLTf&OS(T9z(4KUCFJ3GuVC$n z+c&Y5jm=w)9tKKlYr!(ZbrGk%Tz(}*9#Mzs`X~iRHy1};FHczbf*GNSvY`COU7mFrEb|_CIm|2}$7FzhxTUL=vV#-@iyGC9SKYQ2untr6GIHhw{U~ATotb z+^!%>KvuNZeSlIv8QvYPZ*}H#{MT&Rn>zhItgY4;B~x2m6M~`TA}=Pad(**XE(7pf zmbcF=e-Hd0*ye$wqH4Z4?xSMfV~6S};)S^;<8vNqn8ha$6b=C$xRo2=b>PXL%>yNE&2C<|p`AC;;*z;biE|g`N%}o49?GKJE zv>>vDFBagF;s|+8vQqYlEA)(klXC#@vi&z!eHoN87=q+Z4SV)Fql%oCMpB=kozBj? z+a+|TKagkZx#2ljPuAfGVWi5zQ8VuA*X*Go-VuB_q1>^inH6;>c;G-X??R-Bf%Gdg-;=_8Nh^Zl3*wx zJ>e|0Q59~RI6-0IF=z)&)o(KMpy8w|Egs~3svfcF#`v>?3+<}%TZ31*7qpbXI)GP+ z=-J!}zFrr&XxRnhFSw%h{B*o!^*hND7oGs$Ko_VO@%jUHp?u;pLVE)HQn2yLJ>=Y_ zShwz83}6PjEJpn(Vj7sO^?9RdY6+K+%sQ}P8_gIe+XDJLZ#T%%B*)2VrRo!Lrb)Yw zt1vv&T)1mkktwWs;lVbQk$!YFuPmzl-vfOcT)nFILN?eIef@4O!Ai}BGhbySal=K@ zY|SQTNTurlF>&$=tE=K@j9r^$*0okklQ!6iD25nhl&_$?AlcNzS;ur%4NC(^HNaj0`GGT@u)RkL4zddX)OXCX5~=UUhstcjJ)`!z%Vm}BvL)8Sr`XJGarpUY94$S%qJMrvuqbeC@Ll1hlm zx;i>fj~Ro^QvhC10ua|moAQ<(-;7f(Zf&{cX6O|Dj@^_8lXU+=^_=;H#vyRyFCjoF z)5y=3VgV;Xi5+DZK$!g?{@nKBnb6#%Tw$wm2TNx9+N#US%r$eVeBWNocW2JyTdVP> zL9~am(Cf!1brSOIWRc!N0|?(`4+njuH5ppmn77?GYR-*1IeHYcixj;o243T*j%^FOQZt@=Q4Dd@yaZeboQ=E3^_+#kyFS|R`>Wg6YqJA-U(KL$A_5yu2e`6ai%&{=u z8HQcrSfWv4pCl&k4xl%U`<>7&KIZV=-w6ceK?y%q{f2cdK>BGCx~$938|m^)QqF!+ zF+kssoIc!AnO`4t|6y^Ozw`#11LPVqFX^NfOX&Q%J@0+~pb~}lC5u0Mld(%)OH?K6 z=H%s3Zk~t-Ezc#99y~Km39>G!-yfIPX=&+KC(hnih(BSjHe+(=&7ZH;1WZLMce^ik zwTY{FS8Ce!aae6`CO&`l84d>#FJMN-Tz!-9#A73-;yu&5mO<~WBw>?W78lsq5^CL;nw%ILxE>xIm!e9Phbsh&=o z#;Li(a#%ka4q6d0!%t^X)IP}OQ4ATVdp@PAt^t^)CAM}PzW(e!hXhk__b7h?AQyv- z?`1Al$~^txycryAB1pgnQ>Y1$kdS~TSrKvCv&~xLG4F~dSKIs;of|m!~c74C@i!qhFX&5 z%liG}iUyEg163Ca@cR*kI6G=30n$*U!Cvjh)x}A;T&9C;v-0%w^v5`+#WmoQC|2bG z>W)4pNR)|zB--%#*^rPB0;Ny`Y>oy1m8sYpNczfmqtZ`+H6etWcPGo<`EOKM?40=% z>4NnM@(>V7gB(~Y3lhcy(RMPh2&naDC}u(L9K1^aur(wFMQRjOVE?ZbdQAPJL5Sc; zf2{w@Zx Date: Thu, 18 Feb 2021 08:50:20 -0800 Subject: [PATCH 23/27] Finally addressed all CR feedback! --- src/QirRuntime/lib/Tracer/README.md | 87 +- src/QirRuntime/lib/Tracer/tracer.cpp | 10 +- src/QirRuntime/lib/Tracer/tracer.hpp | 32 +- src/QirRuntime/public/TracerTypes.hpp | 17 + src/QirRuntime/test.py | 136 +- src/QirRuntime/test/QIR-static/qir-gen.ll | 2278 ++++++++--------- src/QirRuntime/test/QIR-tracer/generate.py | 42 +- .../test/QIR-tracer/tracer-config.cpp | 4 +- .../test/QIR-tracer/tracer-config.hpp | 12 +- .../test/QIR-tracer/tracer-intrinsics.qs | 3 +- .../test/QIR-tracer/tracer-target.qs | 173 +- 11 files changed, 1404 insertions(+), 1390 deletions(-) create mode 100644 src/QirRuntime/public/TracerTypes.hpp diff --git a/src/QirRuntime/lib/Tracer/README.md b/src/QirRuntime/lib/Tracer/README.md index 781365d80ef..7e7e6f646ea 100644 --- a/src/QirRuntime/lib/Tracer/README.md +++ b/src/QirRuntime/lib/Tracer/README.md @@ -30,7 +30,22 @@ The Resource Tracer will consist of: ## Layering ## -_Definition_: ___Time___ is an integer-valued function on all quantum operations in a program (gates, measurements, +One of the goals of the tracer is to compute which of the quantum operations can be executed in parallel. Further in + this section we provide the defintions of used concepts and the description of how we group the operations into + _layers_, however, we hope that the following example of layering is intuitively clear. + +### Example of layering ### + +The diagram below shows an example of how a sequential program, represented by the left circuit, could be layered. The gates in light gray are of duration zero, the preferrred layer duration is 1, and the barrier, + represented by a vertical squiggle, is set to have duration 0. + +![layering example](layering_example.png?raw=true "Layering example diagram") + +Notice, that gate 9 is dropped because it cannot cross the barrier to be added into L(2,1). + +### Definitions ### + +___Time___ is an integer-valued function on all quantum operations in a program (gates, measurements, qubits allocation/release). For each gate there are start and end times. For each qubit, there are times when the qubit is allocated and released. Start time of a gate cannot be less than allocation time of any of the qubits the gate uses. If two gates or measurements use the same qubit, one of the gates must have start time greater than or equal to the end @@ -41,7 +56,7 @@ A sequentially executed quantum program can be assigned a trivial time function, assuming that some operations might be executed simultaneously while allowing for different operations to have various durations. -_Definition_: Provided a valid _time_ function for the program a ___layer of duration N at time T, denoted as L(T,N),___ +Provided a valid _time_ function for the program a ___layer of duration N at time T, denoted as L(T,N),___ is a subset of operations in the program such that all of these operations have start time greater or equal _T_ and finish time less than _T + N_. The program is ___layered___ if all gates in it are partitioned into layers, that don't overlap in time. The union of all qubits that are involved in operations of a given layer, will be denoted _Qubits(T,N)_. @@ -52,49 +67,53 @@ A sequential program can be trivially layered such that each layer contains exac zero time to execute, those gates can be added to a layer even if they act on the same qubit another gate in this layer is acting on and have to be executed sequentially within the layer. +_Definition_: A ___barrier___ is a layer that no operations can be added into. + + ### The Resource Tracer's Layering Algorithm ### As the tracer is executing a sequential quantum program, it will compute a time function and corresponding layering using the _conceptual_ algorithm, described below (aka "tetris algorithm"). The actual implementation of layering might be done differently, as long as the resulting layering is the same as if running the conceptual algorithm. -_Definition_: A ___barrier___ is a layer that no more operations can be added into. +A user can inject _barriers_ by calling `__quantum__qis__global_barrier` function. The user can choose duration of + a barrier which would affect start time of the following layers but no operations will be added to a barrier, + independent of its duration. -A user will be able to inject global barriers by calling `__quantum__qis__global_barrier` function. The user can choose - duration of a barrier which would affect start time of the following layers but no operations will be added to a barrier, - independent of its width. +__Conditional execution on measurement results__: The Tracer will execute LLVM IR's branching structures "as is", + depending on the values of the corresponding variables at runtime. To enable estimation of branches that depend on a + measurement result, the source Q# program will have to be authored in such a way that the Q# compiler will translate the + conditionals into corresponding callbacks to the tracer. The tracer will add operations from _both branches_ into the + layers it creates to compute the upper bound estimate. + +Conditionals, measurements and operations, that open frames, inside conditional callbacks are _not_ supported. + +__Caching__ (lower priority): It might be a huge perf win if the Resource Tracer could cache statistics for repeated + computations. The Tracer will have an option to cache layering results per quantum module if the boundaries of modules + are treated as barriers. #### The conceptual algorithm #### -1. The tracer must be set the preferred layer duration: P. -1. The first encountered operation of __non-zero__ duration N is added into layer L(0, max(P,N)). The value +Note: The tracer assumes that the preferred layer duration is _P_. + +1. The first encountered operation of __non-zero__ duration _N_ is added into layer _L(0, max(P,N))_. The value of _conditional barrier_ variable on the tracer is set to 0. -1. When conditional callback is encountered, the layer L(t,N) of the measurement that produced the result the conditional +1. When conditional callback is encountered, the layer _L(t,N)_ of the measurement that produced the result the conditional is dependent on, is looked up and the _conditional barrier_ is set to _t + N_. At the end of the conditional scope _conditional barrier_ is reset to 0. (Effectively, no operations, conditioned on the result of a measurement, can happen before or in the same layer as the measurement, even if they don't involve the measured qubits.) TODO: is it OK for later operations to be added to the layers with ops _inside_ conditional branches? -1. Suppose, there are already layers L(0,N0), ... , L(k,Nk) and the operation being executed is a single-qubit _op_ of +1. Suppose, there are already layers _L(0,N0), ... , L(k,Nk)_ and the operation being executed is a single-qubit _op_ of duration __0__ (controlled and multi-qubit operations of duration 0 are treated the same as non-zero operations). - Starting at L(k, Nk) and scanning backwards to L(_conditional barrier_, Nb) find the _first_ layer that contains an + Starting at _L(k, Nk)_ and scanning backwards to _L(conditional barrier, Nb)_ find the _first_ layer that contains an operation that acts on the qubit of _op_. Add _op_ into this layer. If no such layer is found, add _op_ to the list of pending operations on the qubit. At the end of the program still pending operations are ignored. -1. Suppose, there are already layers L(0,N0), ... , L(k,Nk) and the operation being executed is _op_ of duration _N > 0_ - or it involves more than one qubit. Starting at L(k, Nk) and scanning backwards to L(_conditional barrier_, Nb) find the - _last_ layer L(t, Nt) such that Qubits(t, Nt) don't contain any of the _op_'s qubits and find the _first_ layer L(w, Nw) - such that Qubits(w, Nw) contains some of _op_'s qubits but Nw + N <= P. Add _op_ into one of the two layer with later - time. If neither such layers is found, add _op_ into a new layer L(k+1, max(P, N)). Add the pending operations of all - involved qubits into the same layer and clear the pending lists. - -#### Example of layering #### - -The diagram below shows an example of how a sequential program, represented by the left circuit, would be layered by the - algorithm above. The gates in light gray are of duration zero, the preferrred layer duration is 1, and the barrier, - represented by a vertical squiggle, is set to have duration 0. - -![layering example](layering_example.png?raw=true "Layering example diagram") - -Notice, that gate 9 is dropped because it cannot cross the barrier to be added into L(2,1). +1. Suppose, there are already layers _L(0,N0), ... , L(k,Nk)_ and the operation being executed is _op_ of duration _N > 0_ + or it involves more than one qubit. Starting at _L(k, Nk)_ and scanning backwards to _L(conditional barrier, Nb)_ find + the _last_ layer _L(t, Nt)_ such that _Qubits(t, Nt)_ don't contain any of the _op_'s qubits and find the _first_ layer + _L(w, Nw)_ such that Qubits(w, Nw) contains some of _op_'s qubits but Nw + N <= P. Add _op_ into one of the two layer + with later time. If neither such layers is found, add _op_ into a new layer _L(k+Nk, max(P, N))_. Add the pending + operations of all involved qubits into the same layer and clear the pending lists. ## Special handling of SWAP ## @@ -104,7 +123,7 @@ The tracer will provide a way to handle SWAP as, effectively, renaming of the in ## Frame tracking ## A user might want to count differently operations that are applied in a different state. For example, if Hadamard gate - is applied to a qubit and then Rz and Mx gates, a user might want to count the sequence as if Rz as Mz were executed. + is applied to a qubit and then Rz gate, a user might want to count it as if Rz were executed instead. The frame is closed when the state of the qubit is reset (in Hadamard's case, another Hadamard operator is applied to the qubit). The user will be able to register the required frame tracking with the tracer via a C++ registration callback. @@ -172,15 +191,3 @@ _Note on mapping Q# intrinsics to the methods above_: Q# compiler will support T this project (`tracer-target.qs` specifies the mapping). The Resource Tracer will reuse qir-rt library while implementing the qis methods specified above. - -__Conditionals on measurements__: The Resource Tracer will execute LLVM IR's branching structures "as is", depending on - the values of the corresponding variables at runtime. To enable estimation of branches that depend on a measurement - result, the source Q# program will have to be authored in such a way that the Q# compiler will translate the - conditionals into `__quantum__qis__apply_if*` calls. The tracer will add operations from _both branches_ into the - layers it creates to compute the upper bound estimate. - -Nested conditionals, conditional measurements and conditional tracked operations will _not_ be supported. - -__Caching__ (lower priority): It might be a huge perf win if the Resource Tracer could cache statistics for repeated - computations. The Tracer will have an option to cache layering results per quantum module if the boundaries of modules - are treated as layering barriers. \ No newline at end of file diff --git a/src/QirRuntime/lib/Tracer/tracer.cpp b/src/QirRuntime/lib/Tracer/tracer.cpp index 99ea765a1d0..c9e906f4a29 100644 --- a/src/QirRuntime/lib/Tracer/tracer.cpp +++ b/src/QirRuntime/lib/Tracer/tracer.cpp @@ -47,7 +47,7 @@ namespace Quantum const QubitState& qstate = this->UseQubit(q); stringstream str(std::to_string(qubitIndex)); - str << " last used in layer " << qstate.layer << "(pending zero ops: " << qstate.pendingZeroOps.size() << ")"; + str << " last used in layer " << qstate.layer << "(pending zero ops: " << qstate.pendingZeroDurationOps.size() << ")"; return str.str(); } @@ -81,7 +81,7 @@ namespace Quantum layerStartTime = lastLayer.startTime + lastLayer.duration; } this->metricsByLayer.emplace_back( - Layer{max(this->preferredLayerDuration, minRequiredDuration), layerStartTime}); + Layer {layerStartTime, max(this->preferredLayerDuration, minRequiredDuration)}); return this->metricsByLayer.size() - 1; } @@ -146,7 +146,7 @@ namespace Quantum void CTracer::UpdateQubitState(Qubit q, LayerId layer, Duration opDuration) { QubitState& qstate = this->UseQubit(q); - for (OpId idPending : qstate.pendingZeroOps) + for (OpId idPending : qstate.pendingZeroDurationOps) { this->AddOperationToLayer(idPending, layer); } @@ -155,7 +155,7 @@ namespace Quantum qstate.layer = layer; const Time layerStart = this->metricsByLayer[layer].startTime; qstate.lastUsedTime = max(layerStart, qstate.lastUsedTime) + opDuration; - qstate.pendingZeroOps.clear(); + qstate.pendingZeroDurationOps.clear(); } //------------------------------------------------------------------------------------------------------------------ @@ -169,7 +169,7 @@ namespace Quantum if (opDuration == 0 && (qstate.layer == INVALID || (this->globalBarrier != INVALID && qstate.layer < this->globalBarrier))) { - qstate.pendingZeroOps.push_back(id); + qstate.pendingZeroDurationOps.push_back(id); return INVALID; } diff --git a/src/QirRuntime/lib/Tracer/tracer.hpp b/src/QirRuntime/lib/Tracer/tracer.hpp index fcdb6453d49..c5548e4bd18 100644 --- a/src/QirRuntime/lib/Tracer/tracer.hpp +++ b/src/QirRuntime/lib/Tracer/tracer.hpp @@ -1,7 +1,7 @@ // Copyright (c) Microsoft Corporation. // Licensed under the MIT License. +#pragma once -#include #include #include #include @@ -9,39 +9,33 @@ #include #include "CoreTypes.hpp" +#include "TracerTypes.hpp" #include "QuantumApi_I.hpp" namespace Microsoft { namespace Quantum { - using OpId = int; - using Time = int; - using Duration = int; - using LayerId = size_t; - - constexpr LayerId INVALID = std::numeric_limits::max(); - /*================================================================================================================== Layer ==================================================================================================================*/ struct Layer { - // Width of the layer on the time axis. - const Duration duration; - // Start time of the layer. const Time startTime; + // Width of the layer on the time axis. + const Duration duration; + // Quantum operations, assigned to this layer. std::unordered_map operations; // Optional id, if the layer represents a global barrier. OpId barrierId = -1; - Layer(Duration duration, Time startTime) - : duration(duration) - , startTime(startTime) + Layer(Time startTime, Duration duration) + : startTime(startTime) + , duration(duration) { } }; @@ -55,14 +49,12 @@ namespace Quantum // operations of non-zero duration. LayerId layer = INVALID; - // For layers with duration greater than one, multiple operations might fit on the same qubit, if the operations - // are short. `lastUsedTime` is the end time of the last operation, the qubit participated it. + // `lastUsedTime` stores the end time of the last operation, the qubit participated in. It might not match the + // end time of a layer, if the duration of the last operation is less than duration of the layer. Tracking this + // time allows us to possibly fit multiple short operations on the same qubit into a single layer. Time lastUsedTime = 0; - std::vector pendingZeroOps; - - // For now assume that only one kind of frame can be tracked. - bool isFrameOpen = false; + std::vector pendingZeroDurationOps; }; /*================================================================================================================== diff --git a/src/QirRuntime/public/TracerTypes.hpp b/src/QirRuntime/public/TracerTypes.hpp new file mode 100644 index 00000000000..9b7d242f9f7 --- /dev/null +++ b/src/QirRuntime/public/TracerTypes.hpp @@ -0,0 +1,17 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. +#pragma once + +#include +namespace Microsoft +{ +namespace Quantum +{ + using OpId = int; + using Time = int; + using Duration = int; + using LayerId = size_t; + + constexpr LayerId INVALID = std::numeric_limits::max(); +} +} \ No newline at end of file diff --git a/src/QirRuntime/test.py b/src/QirRuntime/test.py index 268835e38d2..869eea365cb 100644 --- a/src/QirRuntime/test.py +++ b/src/QirRuntime/test.py @@ -20,81 +20,83 @@ def log(message): print(current_time + ": " + message) # ============================================================================= -root_dir = os.path.dirname(os.path.abspath(__file__)) +if __name__ == '__main__': + # this script is executed as script + root_dir = os.path.dirname(os.path.abspath(__file__)) -# parameters -flavor = "Debug" -nobuild = False -noqirgen = False -for arg in sys.argv: - arg = arg.lower() - if arg == "test.py": - continue - elif arg == "debug": - flavor = "Debug" - elif arg == "release": - flavor = "Release" - elif arg == "nobuild": - nobuild = True - noqirgen = True - elif arg == "noqirgen": - noqirgen = True - else: - log("unrecognized argument: " + arg) - sys.exit() + # parameters + flavor = "Debug" + nobuild = False + noqirgen = False + for arg in sys.argv: + arg = arg.lower() + if arg == "test.py": + continue + elif arg == "debug": + flavor = "Debug" + elif arg == "release": + flavor = "Release" + elif arg == "nobuild": + nobuild = True + noqirgen = True + elif arg == "noqirgen": + noqirgen = True + else: + log("unrecognized argument: " + arg) + sys.exit() -if not noqirgen: - if generateqir.do_generate_all(root_dir) != 0: - log("build failed to generate QIR => won't execute the tests") - log("to execute the tests from the last successful build run `test.py nobuild`") - sys.exit() + if not noqirgen: + if generateqir.do_generate_all(root_dir) != 0: + log("build failed to generate QIR => won't execute the tests") + log("to execute the tests from the last successful build run `test.py nobuild`") + sys.exit() -if not nobuild: - result = build.do_build(root_dir, True, True, flavor) # should_make, should_build - if result.returncode != 0: - log("build failed with exit code {0} => won't execute the tests".format(result.returncode)) - log("to execute the tests from the last successful build run `test.py nobuild`") - sys.exit() + if not nobuild: + result = build.do_build(root_dir, True, True, flavor) # should_make, should_build + if result.returncode != 0: + log("build failed with exit code {0} => won't execute the tests".format(result.returncode)) + log("to execute the tests from the last successful build run `test.py nobuild`") + sys.exit() -install_dir = os.path.join(root_dir, "build", platform.system(), flavor, "bin") -if not os.path.isdir(install_dir): - log("please build first: 'build.py [debug|release] [ir]'") - sys.exit() + install_dir = os.path.join(root_dir, "build", platform.system(), flavor, "bin") + if not os.path.isdir(install_dir): + log("please build first: 'build.py [debug|release] [ir]'") + sys.exit() -print("\n") + print("\n") -# Configure DLL lookup locations to include full state simulator and qdk -exe_ext = "" -fullstate_sim_dir = os.path.join(root_dir, "..", "Simulation", "Native", "build", flavor) -if platform.system() == "Windows": - exe_ext = ".exe" - os.environ['PATH'] = os.environ['PATH'] + ";" + fullstate_sim_dir + ";" + install_dir -else: - # add the folder to the list of locations to load libraries from - old = os.environ.get("LD_LIBRARY_PATH") - if old: - os.environ["LD_LIBRARY_PATH"] = old + ":" + fullstate_sim_dir + ":" + install_dir + # Configure DLL lookup locations to include full state simulator and qdk + exe_ext = "" + fullstate_sim_dir = os.path.join(root_dir, "..", "Simulation", "Native", "build", flavor) + if platform.system() == "Windows": + exe_ext = ".exe" + os.environ['PATH'] = os.environ['PATH'] + ";" + fullstate_sim_dir + ";" + install_dir else: - os.environ["LD_LIBRARY_PATH"] = fullstate_sim_dir + ":" + install_dir + # add the folder to the list of locations to load libraries from + old = os.environ.get("LD_LIBRARY_PATH") + if old: + os.environ["LD_LIBRARY_PATH"] = old + ":" + fullstate_sim_dir + ":" + install_dir + else: + os.environ["LD_LIBRARY_PATH"] = fullstate_sim_dir + ":" + install_dir - old = os.environ.get("DYLD_LIBRARY_PATH") - if old: - os.environ["DYLD_LIBRARY_PATH"] = old + ":" + fullstate_sim_dir + ":" + install_dir - else: - os.environ["DYLD_LIBRARY_PATH"] = fullstate_sim_dir + ":" + install_dir + old = os.environ.get("DYLD_LIBRARY_PATH") + if old: + os.environ["DYLD_LIBRARY_PATH"] = old + ":" + fullstate_sim_dir + ":" + install_dir + else: + os.environ["DYLD_LIBRARY_PATH"] = fullstate_sim_dir + ":" + install_dir -log("========= Running native tests =========") -test_binaries = [ - "fullstate-simulator-tests", - "qir-runtime-unittests", - "qir-static-tests", - "qir-dynamic-tests", - "qir-tracer-tests" -] + log("========= Running native tests =========") + test_binaries = [ + "fullstate-simulator-tests", + "qir-runtime-unittests", + "qir-static-tests", + "qir-dynamic-tests", + "qir-tracer-tests" + ] -for name in test_binaries: - test_binary = os.path.join(install_dir, name + exe_ext) - log(test_binary) - subprocess.run(test_binary + " ~[skip]", shell = True) + for name in test_binaries: + test_binary = os.path.join(install_dir, name + exe_ext) + log(test_binary) + subprocess.run(test_binary + " ~[skip]", shell = True) -print("\n") \ No newline at end of file + print("\n") \ No newline at end of file diff --git a/src/QirRuntime/test/QIR-static/qir-gen.ll b/src/QirRuntime/test/QIR-static/qir-gen.ll index 4a41d3df9e7..c94d1e6d972 100644 --- a/src/QirRuntime/test/QIR-static/qir-gen.ll +++ b/src/QirRuntime/test/QIR-static/qir-gen.ll @@ -2,10 +2,10 @@ %Result = type opaque %Range = type { i64, i64, i64 } %Tuple = type opaque +%Array = type opaque %Callable = type opaque %Qubit = type opaque %String = type opaque -%Array = type opaque @ResultZero = external global %Result* @ResultOne = external global %Result* @@ -14,880 +14,298 @@ @PauliY = constant i2 -1 @PauliZ = constant i2 -2 @EmptyRange = internal constant %Range { i64 0, i64 1, i64 -1 } -@Microsoft__Quantum__Testing__QIR__Qop = constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Testing__QIR__Qop__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Testing__QIR__Qop__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Testing__QIR__Qop__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Testing__QIR__Qop__ctladj__wrapper] -@PartialApplication__1 = constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__1__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__1__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__1__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__1__ctladj__wrapper] -@MemoryManagement__1 = constant [2 x void (%Tuple*, i64)*] [void (%Tuple*, i64)* @MemoryManagement__1__RefCount, void (%Tuple*, i64)* @MemoryManagement__1__AliasCount] -@0 = internal constant [14 x i8] c"error code: 1\00" -@1 = internal constant [14 x i8] c"error code: 2\00" -@2 = internal constant [14 x i8] c"error code: 3\00" -@3 = internal constant [14 x i8] c"error code: 2\00" -@4 = internal constant [14 x i8] c"error code: 5\00" -@5 = internal constant [14 x i8] c"error code: 6\00" -@6 = internal constant [14 x i8] c"error code: 7\00" -@7 = internal constant [30 x i8] c"Unexpected measurement result\00" +@0 = internal constant [30 x i8] c"Unexpected measurement result\00" @Microsoft__Quantum__Testing__QIR__Subtract = constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Testing__QIR__Subtract__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null] -@PartialApplication__2 = constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__2__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null] +@PartialApplication__1 = constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__1__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null] +@MemoryManagement__1 = constant [2 x void (%Tuple*, i64)*] [void (%Tuple*, i64)* @MemoryManagement__1__RefCount, void (%Tuple*, i64)* @MemoryManagement__1__AliasCount] +@Microsoft__Quantum__Testing__QIR__Qop = constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Testing__QIR__Qop__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Testing__QIR__Qop__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Testing__QIR__Qop__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Testing__QIR__Qop__ctladj__wrapper] +@PartialApplication__2 = constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__2__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__2__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__2__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__2__ctladj__wrapper] @MemoryManagement__2 = constant [2 x void (%Tuple*, i64)*] [void (%Tuple*, i64)* @MemoryManagement__2__RefCount, void (%Tuple*, i64)* @MemoryManagement__2__AliasCount] +@1 = internal constant [14 x i8] c"error code: 1\00" +@2 = internal constant [14 x i8] c"error code: 2\00" +@3 = internal constant [14 x i8] c"error code: 3\00" +@4 = internal constant [14 x i8] c"error code: 2\00" +@5 = internal constant [14 x i8] c"error code: 5\00" +@6 = internal constant [14 x i8] c"error code: 6\00" +@7 = internal constant [14 x i8] c"error code: 7\00" @8 = internal constant [20 x i8] c"Pauli value: PauliI\00" @9 = internal constant [14 x i8] c"Pauli value: \00" @10 = internal constant [7 x i8] c"PauliX\00" @11 = internal constant [7 x i8] c"PauliY\00" @12 = internal constant [7 x i8] c"PauliZ\00" -define void @Microsoft__Quantum__Testing__QIR__TestControlled__body() { +define i64 @Microsoft__Quantum__Testing__QIR__Math__ArcTan2Test__body() { entry: - %0 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, i64 }* getelementptr ({ %Callable*, i64 }, { %Callable*, i64 }* null, i32 1) to i64)) - %1 = bitcast %Tuple* %0 to { %Callable*, i64 }* - %2 = getelementptr inbounds { %Callable*, i64 }, { %Callable*, i64 }* %1, i32 0, i32 0 - %3 = getelementptr inbounds { %Callable*, i64 }, { %Callable*, i64 }* %1, i32 0, i32 1 - %4 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Testing__QIR__Qop, [2 x void (%Tuple*, i64)*]* null, %Tuple* null) - store %Callable* %4, %Callable** %2 - store i64 1, i64* %3 - %qop = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__1, [2 x void (%Tuple*, i64)*]* @MemoryManagement__1, %Tuple* %0) - call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %qop, i64 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %qop, i64 1) - %adj_qop = call %Callable* @__quantum__rt__callable_copy(%Callable* %qop, i1 false) - call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %adj_qop, i64 1) - call void @__quantum__rt__callable_make_adjoint(%Callable* %adj_qop) - call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %adj_qop, i64 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %adj_qop, i64 1) - %ctl_qop = call %Callable* @__quantum__rt__callable_copy(%Callable* %qop, i1 false) - call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %ctl_qop, i64 1) - call void @__quantum__rt__callable_make_controlled(%Callable* %ctl_qop) - call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %ctl_qop, i64 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %ctl_qop, i64 1) - %adj_ctl_qop = call %Callable* @__quantum__rt__callable_copy(%Callable* %qop, i1 false) - call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %adj_ctl_qop, i64 1) - call void @__quantum__rt__callable_make_controlled(%Callable* %adj_ctl_qop) - call void @__quantum__rt__callable_make_adjoint(%Callable* %adj_ctl_qop) - call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %adj_ctl_qop, i64 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %adj_ctl_qop, i64 1) - %ctl_ctl_qop = call %Callable* @__quantum__rt__callable_copy(%Callable* %ctl_qop, i1 false) - call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %ctl_ctl_qop, i64 1) - call void @__quantum__rt__callable_make_controlled(%Callable* %ctl_ctl_qop) - call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %ctl_ctl_qop, i64 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %ctl_ctl_qop, i64 1) - %q1 = call %Qubit* @__quantum__rt__qubit_allocate() - %q2 = call %Qubit* @__quantum__rt__qubit_allocate() - %q3 = call %Qubit* @__quantum__rt__qubit_allocate() - %5 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64)) - %6 = bitcast %Tuple* %5 to { %Qubit* }* - %7 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %6, i32 0, i32 0 - store %Qubit* %q1, %Qubit** %7 - call void @__quantum__rt__callable_invoke(%Callable* %qop, %Tuple* %5, %Tuple* null) - %8 = call %Result* @Microsoft__Quantum__Intrinsic__M__body(%Qubit* %q1) - %9 = load %Result*, %Result** @ResultOne - %10 = call i1 @__quantum__rt__result_equal(%Result* %8, %Result* %9) - %11 = xor i1 %10, true - br i1 %11, label %then0__1, label %continue__1 + %0 = call double @__quantum__qis__arctan2__body(double 0.000000e+00, double 1.000000e+00) + %1 = fcmp one double 0.000000e+00, %0 + br i1 %1, label %then0__1, label %continue__1 then0__1: ; preds = %entry - %12 = call %String* @__quantum__rt__string_create(i32 13, i8* getelementptr inbounds ([14 x i8], [14 x i8]* @0, i32 0, i32 0)) - call void @__quantum__rt__qubit_release(%Qubit* %q1) - call void @__quantum__rt__qubit_release(%Qubit* %q2) - call void @__quantum__rt__qubit_release(%Qubit* %q3) - call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %qop, i64 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %qop, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %adj_qop, i64 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %adj_qop, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %ctl_qop, i64 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %ctl_qop, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %adj_ctl_qop, i64 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %adj_ctl_qop, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %ctl_ctl_qop, i64 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %ctl_ctl_qop, i64 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %5, i64 -1) - call void @__quantum__rt__result_update_reference_count(%Result* %8, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %qop, i64 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %qop, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %adj_qop, i64 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %adj_qop, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %ctl_qop, i64 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %ctl_qop, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %adj_ctl_qop, i64 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %adj_ctl_qop, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %ctl_ctl_qop, i64 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %ctl_ctl_qop, i64 -1) - call void @__quantum__rt__fail(%String* %12) - unreachable + ret i64 1 continue__1: ; preds = %entry - %13 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64)) - %14 = bitcast %Tuple* %13 to { %Qubit* }* - %15 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %14, i32 0, i32 0 - store %Qubit* %q2, %Qubit** %15 - call void @__quantum__rt__callable_invoke(%Callable* %adj_qop, %Tuple* %13, %Tuple* null) - %16 = call %Result* @Microsoft__Quantum__Intrinsic__M__body(%Qubit* %q2) - %17 = load %Result*, %Result** @ResultOne - %18 = call i1 @__quantum__rt__result_equal(%Result* %16, %Result* %17) - %19 = xor i1 %18, true - br i1 %19, label %then0__2, label %continue__2 + %2 = call double @Microsoft__Quantum__Math__PI__body() + %3 = call double @__quantum__qis__arctan2__body(double 0.000000e+00, double -1.000000e+00) + %4 = fcmp one double %2, %3 + br i1 %4, label %then0__2, label %continue__2 then0__2: ; preds = %continue__1 - %20 = call %String* @__quantum__rt__string_create(i32 13, i8* getelementptr inbounds ([14 x i8], [14 x i8]* @1, i32 0, i32 0)) - call void @__quantum__rt__qubit_release(%Qubit* %q1) - call void @__quantum__rt__qubit_release(%Qubit* %q2) - call void @__quantum__rt__qubit_release(%Qubit* %q3) - call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %qop, i64 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %qop, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %adj_qop, i64 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %adj_qop, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %ctl_qop, i64 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %ctl_qop, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %adj_ctl_qop, i64 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %adj_ctl_qop, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %ctl_ctl_qop, i64 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %ctl_ctl_qop, i64 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %5, i64 -1) - call void @__quantum__rt__result_update_reference_count(%Result* %8, i64 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %13, i64 -1) - call void @__quantum__rt__result_update_reference_count(%Result* %16, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %qop, i64 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %qop, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %adj_qop, i64 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %adj_qop, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %ctl_qop, i64 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %ctl_qop, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %adj_ctl_qop, i64 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %adj_ctl_qop, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %ctl_ctl_qop, i64 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %ctl_ctl_qop, i64 -1) - call void @__quantum__rt__fail(%String* %20) - unreachable + ret i64 2 continue__2: ; preds = %continue__1 - %21 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %22 = bitcast %Tuple* %21 to { %Array*, %Qubit* }* - %23 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %22, i32 0, i32 0 - %24 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %22, i32 0, i32 1 - %25 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) - %26 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %25, i64 0) - %27 = bitcast i8* %26 to %Qubit** - store %Qubit* %q1, %Qubit** %27 - store %Array* %25, %Array** %23 - store %Qubit* %q3, %Qubit** %24 - call void @__quantum__rt__callable_invoke(%Callable* %ctl_qop, %Tuple* %21, %Tuple* null) - %28 = call %Result* @Microsoft__Quantum__Intrinsic__M__body(%Qubit* %q3) - %29 = load %Result*, %Result** @ResultOne - %30 = call i1 @__quantum__rt__result_equal(%Result* %28, %Result* %29) - %31 = xor i1 %30, true - br i1 %31, label %then0__3, label %continue__3 + %5 = call double @Microsoft__Quantum__Math__PI__body() + %6 = fdiv double %5, 2.000000e+00 + %7 = call double @__quantum__qis__arctan2__body(double 1.000000e+00, double 0.000000e+00) + %8 = fcmp one double %6, %7 + br i1 %8, label %then0__3, label %continue__3 then0__3: ; preds = %continue__2 - %32 = call %String* @__quantum__rt__string_create(i32 13, i8* getelementptr inbounds ([14 x i8], [14 x i8]* @2, i32 0, i32 0)) - call void @__quantum__rt__qubit_release(%Qubit* %q1) - call void @__quantum__rt__qubit_release(%Qubit* %q2) - call void @__quantum__rt__qubit_release(%Qubit* %q3) - call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %qop, i64 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %qop, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %adj_qop, i64 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %adj_qop, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %ctl_qop, i64 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %ctl_qop, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %adj_ctl_qop, i64 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %adj_ctl_qop, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %ctl_ctl_qop, i64 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %ctl_ctl_qop, i64 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %5, i64 -1) - call void @__quantum__rt__result_update_reference_count(%Result* %8, i64 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %13, i64 -1) - call void @__quantum__rt__result_update_reference_count(%Result* %16, i64 -1) - call void @__quantum__rt__array_update_reference_count(%Array* %25, i64 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %21, i64 -1) - call void @__quantum__rt__result_update_reference_count(%Result* %28, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %qop, i64 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %qop, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %adj_qop, i64 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %adj_qop, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %ctl_qop, i64 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %ctl_qop, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %adj_ctl_qop, i64 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %adj_ctl_qop, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %ctl_ctl_qop, i64 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %ctl_ctl_qop, i64 -1) - call void @__quantum__rt__fail(%String* %32) - unreachable + ret i64 3 continue__3: ; preds = %continue__2 - %33 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %34 = bitcast %Tuple* %33 to { %Array*, %Qubit* }* - %35 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %34, i32 0, i32 0 - %36 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %34, i32 0, i32 1 - %37 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) - %38 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %37, i64 0) - %39 = bitcast i8* %38 to %Qubit** - store %Qubit* %q2, %Qubit** %39 - store %Array* %37, %Array** %35 - store %Qubit* %q3, %Qubit** %36 - call void @__quantum__rt__callable_invoke(%Callable* %adj_ctl_qop, %Tuple* %33, %Tuple* null) - %40 = call %Result* @Microsoft__Quantum__Intrinsic__M__body(%Qubit* %q3) - %41 = load %Result*, %Result** @ResultZero - %42 = call i1 @__quantum__rt__result_equal(%Result* %40, %Result* %41) - %43 = xor i1 %42, true - br i1 %43, label %then0__4, label %continue__4 + %9 = call double @Microsoft__Quantum__Math__PI__body() + %10 = fneg double %9 + %11 = fdiv double %10, 2.000000e+00 + %12 = call double @__quantum__qis__arctan2__body(double -1.000000e+00, double 0.000000e+00) + %13 = fcmp one double %11, %12 + br i1 %13, label %then0__4, label %continue__4 then0__4: ; preds = %continue__3 - %44 = call %String* @__quantum__rt__string_create(i32 13, i8* getelementptr inbounds ([14 x i8], [14 x i8]* @3, i32 0, i32 0)) - call void @__quantum__rt__qubit_release(%Qubit* %q1) - call void @__quantum__rt__qubit_release(%Qubit* %q2) - call void @__quantum__rt__qubit_release(%Qubit* %q3) - call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %qop, i64 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %qop, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %adj_qop, i64 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %adj_qop, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %ctl_qop, i64 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %ctl_qop, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %adj_ctl_qop, i64 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %adj_ctl_qop, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %ctl_ctl_qop, i64 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %ctl_ctl_qop, i64 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %5, i64 -1) - call void @__quantum__rt__result_update_reference_count(%Result* %8, i64 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %13, i64 -1) - call void @__quantum__rt__result_update_reference_count(%Result* %16, i64 -1) - call void @__quantum__rt__array_update_reference_count(%Array* %25, i64 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %21, i64 -1) - call void @__quantum__rt__result_update_reference_count(%Result* %28, i64 -1) - call void @__quantum__rt__array_update_reference_count(%Array* %37, i64 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %33, i64 -1) - call void @__quantum__rt__result_update_reference_count(%Result* %40, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %qop, i64 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %qop, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %adj_qop, i64 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %adj_qop, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %ctl_qop, i64 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %ctl_qop, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %adj_ctl_qop, i64 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %adj_ctl_qop, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %ctl_ctl_qop, i64 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %ctl_ctl_qop, i64 -1) - call void @__quantum__rt__fail(%String* %44) - unreachable + ret i64 4 continue__4: ; preds = %continue__3 - %45 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %46 = bitcast %Tuple* %45 to { %Array*, { %Array*, %Qubit* }* }* - %47 = getelementptr inbounds { %Array*, { %Array*, %Qubit* }* }, { %Array*, { %Array*, %Qubit* }* }* %46, i32 0, i32 0 - %48 = getelementptr inbounds { %Array*, { %Array*, %Qubit* }* }, { %Array*, { %Array*, %Qubit* }* }* %46, i32 0, i32 1 - %49 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) - %50 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %49, i64 0) - %51 = bitcast i8* %50 to %Qubit** - store %Qubit* %q1, %Qubit** %51 - %52 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %53 = bitcast %Tuple* %52 to { %Array*, %Qubit* }* - %54 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %53, i32 0, i32 0 - %55 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %53, i32 0, i32 1 - %56 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) - %57 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %56, i64 0) - %58 = bitcast i8* %57 to %Qubit** - store %Qubit* %q2, %Qubit** %58 - store %Array* %56, %Array** %54 - store %Qubit* %q3, %Qubit** %55 - store %Array* %49, %Array** %47 - store { %Array*, %Qubit* }* %53, { %Array*, %Qubit* }** %48 - call void @__quantum__rt__callable_invoke(%Callable* %ctl_ctl_qop, %Tuple* %45, %Tuple* null) - %59 = call %Result* @Microsoft__Quantum__Intrinsic__M__body(%Qubit* %q3) - %60 = load %Result*, %Result** @ResultOne - %61 = call i1 @__quantum__rt__result_equal(%Result* %59, %Result* %60) - %62 = xor i1 %61, true - br i1 %62, label %then0__5, label %continue__5 + %14 = call double @Microsoft__Quantum__Math__PI__body() + %15 = fdiv double %14, 4.000000e+00 + %16 = call double @__quantum__qis__arctan2__body(double 1.000000e+00, double 1.000000e+00) + %17 = fcmp one double %15, %16 + br i1 %17, label %then0__5, label %continue__5 then0__5: ; preds = %continue__4 - %63 = call %String* @__quantum__rt__string_create(i32 13, i8* getelementptr inbounds ([14 x i8], [14 x i8]* @4, i32 0, i32 0)) - call void @__quantum__rt__qubit_release(%Qubit* %q1) - call void @__quantum__rt__qubit_release(%Qubit* %q2) - call void @__quantum__rt__qubit_release(%Qubit* %q3) - call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %qop, i64 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %qop, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %adj_qop, i64 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %adj_qop, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %ctl_qop, i64 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %ctl_qop, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %adj_ctl_qop, i64 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %adj_ctl_qop, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %ctl_ctl_qop, i64 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %ctl_ctl_qop, i64 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %5, i64 -1) - call void @__quantum__rt__result_update_reference_count(%Result* %8, i64 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %13, i64 -1) - call void @__quantum__rt__result_update_reference_count(%Result* %16, i64 -1) - call void @__quantum__rt__array_update_reference_count(%Array* %25, i64 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %21, i64 -1) - call void @__quantum__rt__result_update_reference_count(%Result* %28, i64 -1) - call void @__quantum__rt__array_update_reference_count(%Array* %37, i64 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %33, i64 -1) - call void @__quantum__rt__result_update_reference_count(%Result* %40, i64 -1) - call void @__quantum__rt__array_update_reference_count(%Array* %49, i64 -1) - call void @__quantum__rt__array_update_reference_count(%Array* %56, i64 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %52, i64 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %45, i64 -1) - call void @__quantum__rt__result_update_reference_count(%Result* %59, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %qop, i64 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %qop, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %adj_qop, i64 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %adj_qop, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %ctl_qop, i64 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %ctl_qop, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %adj_ctl_qop, i64 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %adj_ctl_qop, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %ctl_ctl_qop, i64 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %ctl_ctl_qop, i64 -1) - call void @__quantum__rt__fail(%String* %63) - unreachable + ret i64 5 continue__5: ; preds = %continue__4 - %64 = call %Callable* @__quantum__rt__callable_copy(%Callable* %qop, i1 false) - call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %64, i64 1) - call void @__quantum__rt__callable_make_controlled(%Callable* %64) - %65 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %66 = bitcast %Tuple* %65 to { %Array*, %Qubit* }* - %67 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %66, i32 0, i32 0 - %68 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %66, i32 0, i32 1 - %69 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 2) - %70 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %69, i64 0) - %71 = bitcast i8* %70 to %Qubit** - %72 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %69, i64 1) - %73 = bitcast i8* %72 to %Qubit** - store %Qubit* %q1, %Qubit** %71 - store %Qubit* %q2, %Qubit** %73 - store %Array* %69, %Array** %67 - store %Qubit* %q3, %Qubit** %68 - call void @__quantum__rt__callable_invoke(%Callable* %64, %Tuple* %65, %Tuple* null) - %74 = call %Result* @Microsoft__Quantum__Intrinsic__M__body(%Qubit* %q3) - %75 = load %Result*, %Result** @ResultZero - %76 = call i1 @__quantum__rt__result_equal(%Result* %74, %Result* %75) - %77 = xor i1 %76, true - br i1 %77, label %then0__6, label %continue__6 + %18 = call double @Microsoft__Quantum__Math__PI__body() + %19 = fmul double %18, 3.000000e+00 + %20 = fdiv double %19, 4.000000e+00 + %21 = call double @__quantum__qis__arctan2__body(double 1.000000e+00, double -1.000000e+00) + %22 = fcmp one double %20, %21 + br i1 %22, label %then0__6, label %continue__6 then0__6: ; preds = %continue__5 - %78 = call %String* @__quantum__rt__string_create(i32 13, i8* getelementptr inbounds ([14 x i8], [14 x i8]* @5, i32 0, i32 0)) - call void @__quantum__rt__qubit_release(%Qubit* %q1) - call void @__quantum__rt__qubit_release(%Qubit* %q2) - call void @__quantum__rt__qubit_release(%Qubit* %q3) - call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %qop, i64 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %qop, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %adj_qop, i64 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %adj_qop, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %ctl_qop, i64 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %ctl_qop, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %adj_ctl_qop, i64 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %adj_ctl_qop, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %ctl_ctl_qop, i64 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %ctl_ctl_qop, i64 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %5, i64 -1) - call void @__quantum__rt__result_update_reference_count(%Result* %8, i64 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %13, i64 -1) - call void @__quantum__rt__result_update_reference_count(%Result* %16, i64 -1) - call void @__quantum__rt__array_update_reference_count(%Array* %25, i64 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %21, i64 -1) - call void @__quantum__rt__result_update_reference_count(%Result* %28, i64 -1) - call void @__quantum__rt__array_update_reference_count(%Array* %37, i64 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %33, i64 -1) - call void @__quantum__rt__result_update_reference_count(%Result* %40, i64 -1) - call void @__quantum__rt__array_update_reference_count(%Array* %49, i64 -1) - call void @__quantum__rt__array_update_reference_count(%Array* %56, i64 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %52, i64 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %45, i64 -1) - call void @__quantum__rt__result_update_reference_count(%Result* %59, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %64, i64 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %64, i64 -1) - call void @__quantum__rt__array_update_reference_count(%Array* %69, i64 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %65, i64 -1) - call void @__quantum__rt__result_update_reference_count(%Result* %74, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %qop, i64 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %qop, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %adj_qop, i64 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %adj_qop, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %ctl_qop, i64 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %ctl_qop, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %adj_ctl_qop, i64 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %adj_ctl_qop, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %ctl_ctl_qop, i64 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %ctl_ctl_qop, i64 -1) - call void @__quantum__rt__fail(%String* %78) - unreachable + ret i64 6 continue__6: ; preds = %continue__5 - %q4 = call %Qubit* @__quantum__rt__qubit_allocate() - %79 = call %Callable* @__quantum__rt__callable_copy(%Callable* %qop, i1 false) - call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %79, i64 1) - call void @__quantum__rt__callable_make_adjoint(%Callable* %79) - %80 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64)) - %81 = bitcast %Tuple* %80 to { %Qubit* }* - %82 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %81, i32 0, i32 0 - store %Qubit* %q3, %Qubit** %82 - call void @__quantum__rt__callable_invoke(%Callable* %79, %Tuple* %80, %Tuple* null) - %83 = call %Callable* @__quantum__rt__callable_copy(%Callable* %ctl_ctl_qop, i1 false) - call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %83, i64 1) - call void @__quantum__rt__callable_make_controlled(%Callable* %83) - call void @__quantum__rt__callable_make_adjoint(%Callable* %83) - %84 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %85 = bitcast %Tuple* %84 to { %Array*, { %Array*, { %Array*, %Qubit* }* }* }* - %86 = getelementptr inbounds { %Array*, { %Array*, { %Array*, %Qubit* }* }* }, { %Array*, { %Array*, { %Array*, %Qubit* }* }* }* %85, i32 0, i32 0 - %87 = getelementptr inbounds { %Array*, { %Array*, { %Array*, %Qubit* }* }* }, { %Array*, { %Array*, { %Array*, %Qubit* }* }* }* %85, i32 0, i32 1 - %88 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) - %89 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %88, i64 0) - %90 = bitcast i8* %89 to %Qubit** - store %Qubit* %q1, %Qubit** %90 - %91 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %92 = bitcast %Tuple* %91 to { %Array*, { %Array*, %Qubit* }* }* - %93 = getelementptr inbounds { %Array*, { %Array*, %Qubit* }* }, { %Array*, { %Array*, %Qubit* }* }* %92, i32 0, i32 0 - %94 = getelementptr inbounds { %Array*, { %Array*, %Qubit* }* }, { %Array*, { %Array*, %Qubit* }* }* %92, i32 0, i32 1 - %95 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) - %96 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %95, i64 0) - %97 = bitcast i8* %96 to %Qubit** - store %Qubit* %q2, %Qubit** %97 - %98 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %99 = bitcast %Tuple* %98 to { %Array*, %Qubit* }* - %100 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %99, i32 0, i32 0 - %101 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %99, i32 0, i32 1 - %102 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) - %103 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %102, i64 0) - %104 = bitcast i8* %103 to %Qubit** - store %Qubit* %q3, %Qubit** %104 - store %Array* %102, %Array** %100 - store %Qubit* %q4, %Qubit** %101 - store %Array* %95, %Array** %93 - store { %Array*, %Qubit* }* %99, { %Array*, %Qubit* }** %94 - store %Array* %88, %Array** %86 - store { %Array*, { %Array*, %Qubit* }* }* %92, { %Array*, { %Array*, %Qubit* }* }** %87 - call void @__quantum__rt__callable_invoke(%Callable* %83, %Tuple* %84, %Tuple* null) - %105 = call %Result* @Microsoft__Quantum__Intrinsic__M__body(%Qubit* %q4) - %106 = load %Result*, %Result** @ResultOne - %107 = call i1 @__quantum__rt__result_equal(%Result* %105, %Result* %106) - %108 = xor i1 %107, true - br i1 %108, label %then0__7, label %continue__7 + %23 = call double @Microsoft__Quantum__Math__PI__body() + %24 = fneg double %23 + %25 = fmul double %24, 3.000000e+00 + %26 = fdiv double %25, 4.000000e+00 + %27 = call double @__quantum__qis__arctan2__body(double -1.000000e+00, double -1.000000e+00) + %28 = fcmp one double %26, %27 + br i1 %28, label %then0__7, label %continue__7 then0__7: ; preds = %continue__6 - %109 = call %String* @__quantum__rt__string_create(i32 13, i8* getelementptr inbounds ([14 x i8], [14 x i8]* @6, i32 0, i32 0)) - call void @__quantum__rt__qubit_release(%Qubit* %q4) - call void @__quantum__rt__qubit_release(%Qubit* %q1) - call void @__quantum__rt__qubit_release(%Qubit* %q2) - call void @__quantum__rt__qubit_release(%Qubit* %q3) - call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %qop, i64 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %qop, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %adj_qop, i64 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %adj_qop, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %ctl_qop, i64 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %ctl_qop, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %adj_ctl_qop, i64 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %adj_ctl_qop, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %ctl_ctl_qop, i64 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %ctl_ctl_qop, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %79, i64 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %79, i64 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %80, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %83, i64 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %83, i64 -1) - call void @__quantum__rt__array_update_reference_count(%Array* %88, i64 -1) - call void @__quantum__rt__array_update_reference_count(%Array* %95, i64 -1) - call void @__quantum__rt__array_update_reference_count(%Array* %102, i64 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %98, i64 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %91, i64 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %84, i64 -1) - call void @__quantum__rt__result_update_reference_count(%Result* %105, i64 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %5, i64 -1) - call void @__quantum__rt__result_update_reference_count(%Result* %8, i64 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %13, i64 -1) - call void @__quantum__rt__result_update_reference_count(%Result* %16, i64 -1) - call void @__quantum__rt__array_update_reference_count(%Array* %25, i64 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %21, i64 -1) - call void @__quantum__rt__result_update_reference_count(%Result* %28, i64 -1) - call void @__quantum__rt__array_update_reference_count(%Array* %37, i64 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %33, i64 -1) - call void @__quantum__rt__result_update_reference_count(%Result* %40, i64 -1) - call void @__quantum__rt__array_update_reference_count(%Array* %49, i64 -1) - call void @__quantum__rt__array_update_reference_count(%Array* %56, i64 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %52, i64 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %45, i64 -1) - call void @__quantum__rt__result_update_reference_count(%Result* %59, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %64, i64 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %64, i64 -1) - call void @__quantum__rt__array_update_reference_count(%Array* %69, i64 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %65, i64 -1) - call void @__quantum__rt__result_update_reference_count(%Result* %74, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %qop, i64 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %qop, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %adj_qop, i64 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %adj_qop, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %ctl_qop, i64 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %ctl_qop, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %adj_ctl_qop, i64 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %adj_ctl_qop, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %ctl_ctl_qop, i64 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %ctl_ctl_qop, i64 -1) - call void @__quantum__rt__fail(%String* %109) - unreachable + ret i64 7 continue__7: ; preds = %continue__6 - call void @__quantum__rt__qubit_release(%Qubit* %q4) - call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %79, i64 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %79, i64 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %80, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %83, i64 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %83, i64 -1) - call void @__quantum__rt__array_update_reference_count(%Array* %88, i64 -1) - call void @__quantum__rt__array_update_reference_count(%Array* %95, i64 -1) - call void @__quantum__rt__array_update_reference_count(%Array* %102, i64 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %98, i64 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %91, i64 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %84, i64 -1) - call void @__quantum__rt__result_update_reference_count(%Result* %105, i64 -1) - call void @__quantum__rt__qubit_release(%Qubit* %q1) - call void @__quantum__rt__qubit_release(%Qubit* %q2) - call void @__quantum__rt__qubit_release(%Qubit* %q3) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %5, i64 -1) - call void @__quantum__rt__result_update_reference_count(%Result* %8, i64 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %13, i64 -1) - call void @__quantum__rt__result_update_reference_count(%Result* %16, i64 -1) - call void @__quantum__rt__array_update_reference_count(%Array* %25, i64 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %21, i64 -1) - call void @__quantum__rt__result_update_reference_count(%Result* %28, i64 -1) - call void @__quantum__rt__array_update_reference_count(%Array* %37, i64 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %33, i64 -1) - call void @__quantum__rt__result_update_reference_count(%Result* %40, i64 -1) - call void @__quantum__rt__array_update_reference_count(%Array* %49, i64 -1) - call void @__quantum__rt__array_update_reference_count(%Array* %56, i64 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %52, i64 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %45, i64 -1) - call void @__quantum__rt__result_update_reference_count(%Result* %59, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %64, i64 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %64, i64 -1) - call void @__quantum__rt__array_update_reference_count(%Array* %69, i64 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %65, i64 -1) - call void @__quantum__rt__result_update_reference_count(%Result* %74, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %qop, i64 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %qop, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %adj_qop, i64 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %adj_qop, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %ctl_qop, i64 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %ctl_qop, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %adj_ctl_qop, i64 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %adj_ctl_qop, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %ctl_ctl_qop, i64 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %ctl_ctl_qop, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %qop, i64 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %qop, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %adj_qop, i64 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %adj_qop, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %ctl_qop, i64 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %ctl_qop, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %adj_ctl_qop, i64 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %adj_ctl_qop, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %ctl_ctl_qop, i64 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %ctl_ctl_qop, i64 -1) - ret void -} + %29 = call double @Microsoft__Quantum__Math__PI__body() + %30 = fneg double %29 + %31 = fdiv double %30, 4.000000e+00 + %32 = call double @__quantum__qis__arctan2__body(double -1.000000e+00, double 1.000000e+00) + %33 = fcmp one double %31, %32 + br i1 %33, label %then0__8, label %continue__8 -declare %Tuple* @__quantum__rt__tuple_create(i64) +then0__8: ; preds = %continue__7 + ret i64 8 -define void @Microsoft__Quantum__Testing__QIR__Qop__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { -entry: - %0 = bitcast %Tuple* %arg-tuple to { %Qubit*, i64 }* - %1 = getelementptr inbounds { %Qubit*, i64 }, { %Qubit*, i64 }* %0, i32 0, i32 0 - %2 = getelementptr inbounds { %Qubit*, i64 }, { %Qubit*, i64 }* %0, i32 0, i32 1 - %3 = load %Qubit*, %Qubit** %1 - %4 = load i64, i64* %2 - call void @Microsoft__Quantum__Testing__QIR__Qop__body(%Qubit* %3, i64 %4) - ret void -} +continue__8: ; preds = %continue__7 + %34 = call double @__quantum__qis__arctan2__body(double 0.000000e+00, double 0.000000e+00) + %35 = fcmp one double 0.000000e+00, %34 + br i1 %35, label %then0__9, label %continue__9 -define void @Microsoft__Quantum__Testing__QIR__Qop__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { -entry: - %0 = bitcast %Tuple* %arg-tuple to { %Qubit*, i64 }* - %1 = getelementptr inbounds { %Qubit*, i64 }, { %Qubit*, i64 }* %0, i32 0, i32 0 - %2 = getelementptr inbounds { %Qubit*, i64 }, { %Qubit*, i64 }* %0, i32 0, i32 1 - %3 = load %Qubit*, %Qubit** %1 - %4 = load i64, i64* %2 - call void @Microsoft__Quantum__Testing__QIR__Qop__adj(%Qubit* %3, i64 %4) - ret void -} +then0__9: ; preds = %continue__8 + ret i64 9 -define void @Microsoft__Quantum__Testing__QIR__Qop__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { -entry: - %0 = bitcast %Tuple* %arg-tuple to { %Array*, { %Qubit*, i64 }* }* - %1 = getelementptr inbounds { %Array*, { %Qubit*, i64 }* }, { %Array*, { %Qubit*, i64 }* }* %0, i32 0, i32 0 - %2 = getelementptr inbounds { %Array*, { %Qubit*, i64 }* }, { %Array*, { %Qubit*, i64 }* }* %0, i32 0, i32 1 - %3 = load %Array*, %Array** %1 - %4 = load { %Qubit*, i64 }*, { %Qubit*, i64 }** %2 - call void @Microsoft__Quantum__Testing__QIR__Qop__ctl(%Array* %3, { %Qubit*, i64 }* %4) - ret void +continue__9: ; preds = %continue__8 + %y__9 = call double @__quantum__qis__nan__body() + %d = call double @__quantum__qis__arctan2__body(double %y__9, double 0.000000e+00) + %36 = call i1 @__quantum__qis__isnan__body(double %d) + %37 = xor i1 %36, true + br i1 %37, label %then0__10, label %continue__10 + +then0__10: ; preds = %continue__9 + ret i64 11 + +continue__10: ; preds = %continue__9 + %x__10 = call double @__quantum__qis__nan__body() + %d__1 = call double @__quantum__qis__arctan2__body(double 0.000000e+00, double %x__10) + %38 = call i1 @__quantum__qis__isnan__body(double %d__1) + %39 = xor i1 %38, true + br i1 %39, label %then0__11, label %continue__11 + +then0__11: ; preds = %continue__10 + ret i64 12 + +continue__11: ; preds = %continue__10 + %y__11 = call double @__quantum__qis__nan__body() + %x__11 = call double @__quantum__qis__nan__body() + %d__2 = call double @__quantum__qis__arctan2__body(double %y__11, double %x__11) + %40 = call i1 @__quantum__qis__isnan__body(double %d__2) + %41 = xor i1 %40, true + br i1 %41, label %then0__12, label %continue__12 + +then0__12: ; preds = %continue__11 + ret i64 13 + +continue__12: ; preds = %continue__11 + ret i64 0 } -define void @Microsoft__Quantum__Testing__QIR__Qop__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +declare double @__quantum__qis__arctan2__body(double, double) + +define double @Microsoft__Quantum__Math__PI__body() { entry: - %0 = bitcast %Tuple* %arg-tuple to { %Array*, { %Qubit*, i64 }* }* - %1 = getelementptr inbounds { %Array*, { %Qubit*, i64 }* }, { %Array*, { %Qubit*, i64 }* }* %0, i32 0, i32 0 - %2 = getelementptr inbounds { %Array*, { %Qubit*, i64 }* }, { %Array*, { %Qubit*, i64 }* }* %0, i32 0, i32 1 - %3 = load %Array*, %Array** %1 - %4 = load { %Qubit*, i64 }*, { %Qubit*, i64 }** %2 - call void @Microsoft__Quantum__Testing__QIR__Qop__ctladj(%Array* %3, { %Qubit*, i64 }* %4) - ret void + ret double 0x400921FB54442D18 } -declare %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]*, [2 x void (%Tuple*, i64)*]*, %Tuple*) +declare double @__quantum__qis__nan__body() -define void @Lifted__PartialApplication__1__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +declare i1 @__quantum__qis__isnan__body(double) + +define i64 @Microsoft__Quantum__Testing__QIR__Math__LogTest__body() { entry: - %0 = bitcast %Tuple* %arg-tuple to { %Qubit* }* - %1 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %0, i32 0, i32 0 - %2 = load %Qubit*, %Qubit** %1 - %3 = bitcast %Tuple* %capture-tuple to { %Callable*, i64 }* - %4 = getelementptr inbounds { %Callable*, i64 }, { %Callable*, i64 }* %3, i32 0, i32 1 - %5 = load i64, i64* %4 - %6 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Qubit*, i64 }* getelementptr ({ %Qubit*, i64 }, { %Qubit*, i64 }* null, i32 1) to i64)) - %7 = bitcast %Tuple* %6 to { %Qubit*, i64 }* - %8 = getelementptr inbounds { %Qubit*, i64 }, { %Qubit*, i64 }* %7, i32 0, i32 0 - %9 = getelementptr inbounds { %Qubit*, i64 }, { %Qubit*, i64 }* %7, i32 0, i32 1 - store %Qubit* %2, %Qubit** %8 - store i64 %5, i64* %9 - %10 = getelementptr inbounds { %Callable*, i64 }, { %Callable*, i64 }* %3, i32 0, i32 0 - %11 = load %Callable*, %Callable** %10 - call void @__quantum__rt__callable_invoke(%Callable* %11, %Tuple* %6, %Tuple* %result-tuple) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i64 -1) - ret void + %input = call double @Microsoft__Quantum__Math__E__body() + %0 = call double @__quantum__qis__log__body(double %input) + %1 = fcmp one double 1.000000e+00, %0 + br i1 %1, label %then0__1, label %continue__1 + +then0__1: ; preds = %entry + ret i64 1 + +continue__1: ; preds = %entry + %2 = call double @Microsoft__Quantum__Math__E__body() + %3 = call double @Microsoft__Quantum__Math__E__body() + %input__1 = fmul double %2, %3 + %4 = call double @__quantum__qis__log__body(double %input__1) + %5 = fcmp one double 2.000000e+00, %4 + br i1 %5, label %then0__2, label %continue__2 + +then0__2: ; preds = %continue__1 + ret i64 2 + +continue__2: ; preds = %continue__1 + %d = call double @__quantum__qis__log__body(double 0.000000e+00) + %6 = call i1 @__quantum__qis__isnegativeinfinity__body(double %d) + %7 = xor i1 %6, true + br i1 %7, label %then0__3, label %continue__3 + +then0__3: ; preds = %continue__2 + ret i64 3 + +continue__3: ; preds = %continue__2 + %d__1 = call double @__quantum__qis__log__body(double -5.000000e+00) + %8 = call i1 @__quantum__qis__isnan__body(double %d__1) + %9 = xor i1 %8, true + br i1 %9, label %then0__4, label %continue__4 + +then0__4: ; preds = %continue__3 + ret i64 4 + +continue__4: ; preds = %continue__3 + %input__4 = call double @__quantum__qis__nan__body() + %d__2 = call double @__quantum__qis__log__body(double %input__4) + %10 = call i1 @__quantum__qis__isnan__body(double %d__2) + %11 = xor i1 %10, true + br i1 %11, label %then0__5, label %continue__5 + +then0__5: ; preds = %continue__4 + ret i64 5 + +continue__5: ; preds = %continue__4 + %input__5 = call double @__quantum__qis__infinity__body() + %d__3 = call double @__quantum__qis__log__body(double %input__5) + %12 = call i1 @__quantum__qis__isinf__body(double %d__3) + %13 = xor i1 %12, true + br i1 %13, label %then0__6, label %continue__6 + +then0__6: ; preds = %continue__5 + ret i64 6 + +continue__6: ; preds = %continue__5 + ret i64 0 } -define void @Lifted__PartialApplication__1__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +define double @Microsoft__Quantum__Math__E__body() { entry: - %0 = bitcast %Tuple* %arg-tuple to { %Qubit* }* - %1 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %0, i32 0, i32 0 - %2 = load %Qubit*, %Qubit** %1 - %3 = bitcast %Tuple* %capture-tuple to { %Callable*, i64 }* - %4 = getelementptr inbounds { %Callable*, i64 }, { %Callable*, i64 }* %3, i32 0, i32 1 - %5 = load i64, i64* %4 - %6 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Qubit*, i64 }* getelementptr ({ %Qubit*, i64 }, { %Qubit*, i64 }* null, i32 1) to i64)) - %7 = bitcast %Tuple* %6 to { %Qubit*, i64 }* - %8 = getelementptr inbounds { %Qubit*, i64 }, { %Qubit*, i64 }* %7, i32 0, i32 0 - %9 = getelementptr inbounds { %Qubit*, i64 }, { %Qubit*, i64 }* %7, i32 0, i32 1 - store %Qubit* %2, %Qubit** %8 - store i64 %5, i64* %9 - %10 = getelementptr inbounds { %Callable*, i64 }, { %Callable*, i64 }* %3, i32 0, i32 0 - %11 = load %Callable*, %Callable** %10 - %12 = call %Callable* @__quantum__rt__callable_copy(%Callable* %11, i1 false) - call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %12, i64 1) - call void @__quantum__rt__callable_make_adjoint(%Callable* %12) - call void @__quantum__rt__callable_invoke(%Callable* %12, %Tuple* %6, %Tuple* %result-tuple) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %12, i64 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %12, i64 -1) - ret void + ret double 0x4005BF0A8B145769 } -define void @Lifted__PartialApplication__1__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +declare double @__quantum__qis__log__body(double) + +declare i1 @__quantum__qis__isnegativeinfinity__body(double) + +declare double @__quantum__qis__infinity__body() + +declare i1 @__quantum__qis__isinf__body(double) + +define i64 @Microsoft__Quantum__Testing__QIR__Math__SqrtTest__body() { entry: - %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* - %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 - %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 - %3 = load %Array*, %Array** %1 - %4 = load %Qubit*, %Qubit** %2 - %5 = bitcast %Tuple* %capture-tuple to { %Callable*, i64 }* - %6 = getelementptr inbounds { %Callable*, i64 }, { %Callable*, i64 }* %5, i32 0, i32 1 - %7 = load i64, i64* %6 - %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Qubit*, i64 }* getelementptr ({ %Qubit*, i64 }, { %Qubit*, i64 }* null, i32 1) to i64)) - %9 = bitcast %Tuple* %8 to { %Qubit*, i64 }* - %10 = getelementptr inbounds { %Qubit*, i64 }, { %Qubit*, i64 }* %9, i32 0, i32 0 - %11 = getelementptr inbounds { %Qubit*, i64 }, { %Qubit*, i64 }* %9, i32 0, i32 1 - store %Qubit* %4, %Qubit** %10 - store i64 %7, i64* %11 - %12 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %13 = bitcast %Tuple* %12 to { %Array*, { %Qubit*, i64 }* }* - %14 = getelementptr inbounds { %Array*, { %Qubit*, i64 }* }, { %Array*, { %Qubit*, i64 }* }* %13, i32 0, i32 0 - %15 = getelementptr inbounds { %Array*, { %Qubit*, i64 }* }, { %Array*, { %Qubit*, i64 }* }* %13, i32 0, i32 1 - store %Array* %3, %Array** %14 - store { %Qubit*, i64 }* %9, { %Qubit*, i64 }** %15 - %16 = getelementptr inbounds { %Callable*, i64 }, { %Callable*, i64 }* %5, i32 0, i32 0 - %17 = load %Callable*, %Callable** %16 - %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) - call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %18, i64 1) - call void @__quantum__rt__callable_make_controlled(%Callable* %18) - call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i64 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %18, i64 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i64 -1) - ret void -} - -define void @Lifted__PartialApplication__1__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { -entry: - %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* - %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 - %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 - %3 = load %Array*, %Array** %1 - %4 = load %Qubit*, %Qubit** %2 - %5 = bitcast %Tuple* %capture-tuple to { %Callable*, i64 }* - %6 = getelementptr inbounds { %Callable*, i64 }, { %Callable*, i64 }* %5, i32 0, i32 1 - %7 = load i64, i64* %6 - %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Qubit*, i64 }* getelementptr ({ %Qubit*, i64 }, { %Qubit*, i64 }* null, i32 1) to i64)) - %9 = bitcast %Tuple* %8 to { %Qubit*, i64 }* - %10 = getelementptr inbounds { %Qubit*, i64 }, { %Qubit*, i64 }* %9, i32 0, i32 0 - %11 = getelementptr inbounds { %Qubit*, i64 }, { %Qubit*, i64 }* %9, i32 0, i32 1 - store %Qubit* %4, %Qubit** %10 - store i64 %7, i64* %11 - %12 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %13 = bitcast %Tuple* %12 to { %Array*, { %Qubit*, i64 }* }* - %14 = getelementptr inbounds { %Array*, { %Qubit*, i64 }* }, { %Array*, { %Qubit*, i64 }* }* %13, i32 0, i32 0 - %15 = getelementptr inbounds { %Array*, { %Qubit*, i64 }* }, { %Array*, { %Qubit*, i64 }* }* %13, i32 0, i32 1 - store %Array* %3, %Array** %14 - store { %Qubit*, i64 }* %9, { %Qubit*, i64 }** %15 - %16 = getelementptr inbounds { %Callable*, i64 }, { %Callable*, i64 }* %5, i32 0, i32 0 - %17 = load %Callable*, %Callable** %16 - %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) - call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %18, i64 1) - call void @__quantum__rt__callable_make_adjoint(%Callable* %18) - call void @__quantum__rt__callable_make_controlled(%Callable* %18) - call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i64 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %18, i64 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i64 -1) - ret void -} - -define void @MemoryManagement__1__RefCount(%Tuple* %capture-tuple, i64 %count-change) { -entry: - %0 = bitcast %Tuple* %capture-tuple to { %Callable*, i64 }* - %1 = getelementptr inbounds { %Callable*, i64 }, { %Callable*, i64 }* %0, i32 0, i32 0 - %2 = load %Callable*, %Callable** %1 - call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %2, i64 %count-change) - call void @__quantum__rt__callable_update_reference_count(%Callable* %2, i64 %count-change) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %capture-tuple, i64 %count-change) - ret void -} - -define void @MemoryManagement__1__AliasCount(%Tuple* %capture-tuple, i64 %count-change) { -entry: - %0 = bitcast %Tuple* %capture-tuple to { %Callable*, i64 }* - %1 = getelementptr inbounds { %Callable*, i64 }, { %Callable*, i64 }* %0, i32 0, i32 0 - %2 = load %Callable*, %Callable** %1 - call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %2, i64 %count-change) - call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i64 %count-change) - call void @__quantum__rt__tuple_update_alias_count(%Tuple* %capture-tuple, i64 %count-change) - ret void -} - -declare void @__quantum__rt__callable_memory_management(i32, %Callable*, i64) - -declare void @__quantum__rt__callable_update_alias_count(%Callable*, i64) - -declare %Callable* @__quantum__rt__callable_copy(%Callable*, i1) - -declare void @__quantum__rt__callable_make_adjoint(%Callable*) - -declare void @__quantum__rt__callable_make_controlled(%Callable*) - -declare %Qubit* @__quantum__rt__qubit_allocate() - -declare %Array* @__quantum__rt__qubit_allocate_array(i64) - -declare void @__quantum__rt__callable_invoke(%Callable*, %Tuple*, %Tuple*) - -define %Result* @Microsoft__Quantum__Intrinsic__M__body(%Qubit* %qubit) { -entry: - %bases = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 1) - %0 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %bases, i64 0) - %1 = bitcast i8* %0 to i2* - %2 = load i2, i2* @PauliZ - store i2 %2, i2* %1 - call void @__quantum__rt__array_update_alias_count(%Array* %bases, i64 1) - %qubits = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) - %3 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qubits, i64 0) - %4 = bitcast i8* %3 to %Qubit** - store %Qubit* %qubit, %Qubit** %4 - call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i64 1) - %5 = call %Result* @__quantum__qis__measure__body(%Array* %bases, %Array* %qubits) - call void @__quantum__rt__array_update_alias_count(%Array* %bases, i64 -1) - call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i64 -1) - call void @__quantum__rt__array_update_reference_count(%Array* %bases, i64 -1) - call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i64 -1) - ret %Result* %5 -} - -declare i1 @__quantum__rt__result_equal(%Result*, %Result*) - -declare %String* @__quantum__rt__string_create(i32, i8*) - -declare void @__quantum__rt__qubit_release(%Qubit*) + %0 = call double @__quantum__qis__sqrt__body(double 4.000000e+00) + %1 = fcmp one double 2.000000e+00, %0 + br i1 %1, label %then0__1, label %continue__1 -declare void @__quantum__rt__tuple_update_reference_count(%Tuple*, i64) +then0__1: ; preds = %entry + ret i64 1 -declare void @__quantum__rt__result_update_reference_count(%Result*, i64) +continue__1: ; preds = %entry + %2 = call double @__quantum__qis__sqrt__body(double 9.000000e+00) + %3 = fcmp one double 3.000000e+00, %2 + br i1 %3, label %then0__2, label %continue__2 -declare void @__quantum__rt__callable_update_reference_count(%Callable*, i64) +then0__2: ; preds = %continue__1 + ret i64 2 -declare void @__quantum__rt__fail(%String*) +continue__2: ; preds = %continue__1 + %4 = call double @__quantum__qis__sqrt__body(double 1.000000e+02) + %5 = fcmp one double 1.000000e+01, %4 + br i1 %5, label %then0__3, label %continue__3 -declare %Array* @__quantum__rt__array_create_1d(i32, i64) +then0__3: ; preds = %continue__2 + ret i64 3 -declare i8* @__quantum__rt__array_get_element_ptr_1d(%Array*, i64) +continue__3: ; preds = %continue__2 + %d__4 = call double @__quantum__qis__sqrt__body(double -5.000000e+00) + %6 = call i1 @__quantum__qis__isnan__body(double %d__4) + %7 = xor i1 %6, true + br i1 %7, label %then0__4, label %continue__4 -declare void @__quantum__rt__array_update_reference_count(%Array*, i64) +then0__4: ; preds = %continue__3 + ret i64 4 -define void @Microsoft__Quantum__Testing__QIR__TestQubitResultManagement__body() { -entry: - %qs = call %Array* @__quantum__rt__qubit_allocate_array(i64 2) - call void @__quantum__rt__array_update_alias_count(%Array* %qs, i64 1) - %0 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 1) - %1 = bitcast i8* %0 to %Qubit** - %qubit = load %Qubit*, %Qubit** %1 - call void @__quantum__qis__x__body(%Qubit* %qubit) - %q = call %Qubit* @__quantum__rt__qubit_allocate() - %2 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 1) - %3 = bitcast i8* %2 to %Qubit** - %4 = load %Qubit*, %Qubit** %3 - %5 = call %Result* @Microsoft__Quantum__Intrinsic__M__body(%Qubit* %4) - %6 = load %Result*, %Result** @ResultOne - %7 = call i1 @__quantum__rt__result_equal(%Result* %5, %Result* %6) - br i1 %7, label %then0__1, label %continue__1 +continue__4: ; preds = %continue__3 + %d__5 = call double @__quantum__qis__nan__body() + %d__6 = call double @__quantum__qis__sqrt__body(double %d__5) + %8 = call i1 @__quantum__qis__isnan__body(double %d__6) + %9 = xor i1 %8, true + br i1 %9, label %then0__5, label %continue__5 -then0__1: ; preds = %entry - call void @__quantum__qis__x__body(%Qubit* %q) - br label %continue__1 +then0__5: ; preds = %continue__4 + ret i64 5 -continue__1: ; preds = %then0__1, %entry - %8 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 0) - %9 = bitcast i8* %8 to %Qubit** - %10 = load %Qubit*, %Qubit** %9 - %11 = call %Result* @Microsoft__Quantum__Intrinsic__M__body(%Qubit* %10) - %12 = call %Result* @Microsoft__Quantum__Intrinsic__M__body(%Qubit* %q) - %13 = call i1 @__quantum__rt__result_equal(%Result* %11, %Result* %12) - br i1 %13, label %then0__2, label %continue__2 +continue__5: ; preds = %continue__4 + %d__7 = call double @__quantum__qis__infinity__body() + %d__8 = call double @__quantum__qis__sqrt__body(double %d__7) + %10 = call i1 @__quantum__qis__isinf__body(double %d__8) + %11 = xor i1 %10, true + br i1 %11, label %then0__6, label %continue__6 -then0__2: ; preds = %continue__1 - %14 = call %String* @__quantum__rt__string_create(i32 29, i8* getelementptr inbounds ([30 x i8], [30 x i8]* @7, i32 0, i32 0)) - call void @__quantum__rt__qubit_release(%Qubit* %q) - call void @__quantum__rt__qubit_release_array(%Array* %qs) - call void @__quantum__rt__array_update_alias_count(%Array* %qs, i64 -1) - call void @__quantum__rt__result_update_reference_count(%Result* %5, i64 -1) - call void @__quantum__rt__result_update_reference_count(%Result* %11, i64 -1) - call void @__quantum__rt__result_update_reference_count(%Result* %12, i64 -1) - call void @__quantum__rt__array_update_reference_count(%Array* %qs, i64 -1) - call void @__quantum__rt__fail(%String* %14) - unreachable +then0__6: ; preds = %continue__5 + ret i64 6 -continue__2: ; preds = %continue__1 - call void @__quantum__rt__qubit_release(%Qubit* %q) - call void @__quantum__rt__result_update_reference_count(%Result* %5, i64 -1) - call void @__quantum__rt__result_update_reference_count(%Result* %11, i64 -1) - call void @__quantum__rt__result_update_reference_count(%Result* %12, i64 -1) - call void @__quantum__rt__qubit_release_array(%Array* %qs) - call void @__quantum__rt__array_update_alias_count(%Array* %qs, i64 -1) - call void @__quantum__rt__array_update_reference_count(%Array* %qs, i64 -1) - ret void +continue__6: ; preds = %continue__5 + ret i64 0 } -declare void @__quantum__rt__array_update_alias_count(%Array*, i64) - -declare void @__quantum__qis__x__body(%Qubit*) - -declare void @__quantum__rt__qubit_release_array(%Array*) - -define i64 @Microsoft__Quantum__Testing__QIR__Subtract__body(i64 %from, i64 %what) { -entry: - %0 = sub i64 %from, %what - ret i64 %0 -} +declare double @__quantum__qis__sqrt__body(double) define i64 @Microsoft__Quantum__Testing__QIR__Test_Arrays__body(%Array* %array, i64 %index, i64 %val, i1 %compilerDecoy) { entry: @@ -974,289 +392,646 @@ continue__1: ; preds = %then0__1, %exit__1 ret i64 %22 } +declare void @__quantum__rt__array_update_alias_count(%Array*, i64) + +declare void @__quantum__rt__array_update_reference_count(%Array*, i64) + declare %Array* @__quantum__rt__array_copy(%Array*, i1) +declare i8* @__quantum__rt__array_get_element_ptr_1d(%Array*, i64) + declare i64 @__quantum__rt__array_get_size_1d(%Array*) declare %Array* @__quantum__rt__array_slice_1d(%Array*, %Range, i1) declare %Array* @__quantum__rt__array_concatenate(%Array*, %Array*) -define i64 @Microsoft__Quantum__Testing__QIR__TestPartials__body(i64 %x, i64 %y) { +define void @Microsoft__Quantum__Testing__QIR__TestControlled__body() { entry: %0 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, i64 }* getelementptr ({ %Callable*, i64 }, { %Callable*, i64 }* null, i32 1) to i64)) %1 = bitcast %Tuple* %0 to { %Callable*, i64 }* %2 = getelementptr inbounds { %Callable*, i64 }, { %Callable*, i64 }* %1, i32 0, i32 0 %3 = getelementptr inbounds { %Callable*, i64 }, { %Callable*, i64 }* %1, i32 0, i32 1 - %4 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Testing__QIR__Subtract, [2 x void (%Tuple*, i64)*]* null, %Tuple* null) + %4 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Testing__QIR__Qop, [2 x void (%Tuple*, i64)*]* null, %Tuple* null) store %Callable* %4, %Callable** %2 - store i64 %x, i64* %3 - %subtractor = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__2, [2 x void (%Tuple*, i64)*]* @MemoryManagement__2, %Tuple* %0) - call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %subtractor, i64 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %subtractor, i64 1) - %5 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint (i64* getelementptr (i64, i64* null, i32 1) to i64)) - %6 = bitcast %Tuple* %5 to { i64 }* - %7 = getelementptr inbounds { i64 }, { i64 }* %6, i32 0, i32 0 - store i64 %y, i64* %7 - %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint (i64* getelementptr (i64, i64* null, i32 1) to i64)) - call void @__quantum__rt__callable_invoke(%Callable* %subtractor, %Tuple* %5, %Tuple* %8) - %9 = bitcast %Tuple* %8 to { i64 }* - %10 = getelementptr inbounds { i64 }, { i64 }* %9, i32 0, i32 0 - %11 = load i64, i64* %10 - call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %subtractor, i64 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %subtractor, i64 -1) - call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %subtractor, i64 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %subtractor, i64 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %5, i64 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i64 -1) - ret i64 %11 -} - -define i64 @Microsoft__Quantum__Testing__QIR__Math__SqrtTest__body() { -entry: - %0 = call double @__quantum__qis__sqrt__body(double 4.000000e+00) - %1 = fcmp one double 2.000000e+00, %0 - br i1 %1, label %then0__1, label %continue__1 + store i64 1, i64* %3 + %qop = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__2, [2 x void (%Tuple*, i64)*]* @MemoryManagement__2, %Tuple* %0) + call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %qop, i64 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %qop, i64 1) + %adj_qop = call %Callable* @__quantum__rt__callable_copy(%Callable* %qop, i1 false) + call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %adj_qop, i64 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %adj_qop) + call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %adj_qop, i64 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %adj_qop, i64 1) + %ctl_qop = call %Callable* @__quantum__rt__callable_copy(%Callable* %qop, i1 false) + call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %ctl_qop, i64 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %ctl_qop) + call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %ctl_qop, i64 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %ctl_qop, i64 1) + %adj_ctl_qop = call %Callable* @__quantum__rt__callable_copy(%Callable* %qop, i1 false) + call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %adj_ctl_qop, i64 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %adj_ctl_qop) + call void @__quantum__rt__callable_make_adjoint(%Callable* %adj_ctl_qop) + call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %adj_ctl_qop, i64 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %adj_ctl_qop, i64 1) + %ctl_ctl_qop = call %Callable* @__quantum__rt__callable_copy(%Callable* %ctl_qop, i1 false) + call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %ctl_ctl_qop, i64 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %ctl_ctl_qop) + call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %ctl_ctl_qop, i64 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %ctl_ctl_qop, i64 1) + %q1 = call %Qubit* @__quantum__rt__qubit_allocate() + %q2 = call %Qubit* @__quantum__rt__qubit_allocate() + %q3 = call %Qubit* @__quantum__rt__qubit_allocate() + %5 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64)) + %6 = bitcast %Tuple* %5 to { %Qubit* }* + %7 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %6, i32 0, i32 0 + store %Qubit* %q1, %Qubit** %7 + call void @__quantum__rt__callable_invoke(%Callable* %qop, %Tuple* %5, %Tuple* null) + %8 = call %Result* @Microsoft__Quantum__Intrinsic__M__body(%Qubit* %q1) + %9 = load %Result*, %Result** @ResultOne + %10 = call i1 @__quantum__rt__result_equal(%Result* %8, %Result* %9) + %11 = xor i1 %10, true + br i1 %11, label %then0__1, label %continue__1 then0__1: ; preds = %entry - ret i64 1 + %12 = call %String* @__quantum__rt__string_create(i32 13, i8* getelementptr inbounds ([14 x i8], [14 x i8]* @1, i32 0, i32 0)) + call void @__quantum__rt__qubit_release(%Qubit* %q1) + call void @__quantum__rt__qubit_release(%Qubit* %q2) + call void @__quantum__rt__qubit_release(%Qubit* %q3) + call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %qop, i64 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %qop, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %adj_qop, i64 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %adj_qop, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %ctl_qop, i64 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %ctl_qop, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %adj_ctl_qop, i64 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %adj_ctl_qop, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %ctl_ctl_qop, i64 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %ctl_ctl_qop, i64 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %5, i64 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %8, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %qop, i64 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %qop, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %adj_qop, i64 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %adj_qop, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %ctl_qop, i64 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %ctl_qop, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %adj_ctl_qop, i64 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %adj_ctl_qop, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %ctl_ctl_qop, i64 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %ctl_ctl_qop, i64 -1) + call void @__quantum__rt__fail(%String* %12) + unreachable continue__1: ; preds = %entry - %2 = call double @__quantum__qis__sqrt__body(double 9.000000e+00) - %3 = fcmp one double 3.000000e+00, %2 - br i1 %3, label %then0__2, label %continue__2 + %13 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64)) + %14 = bitcast %Tuple* %13 to { %Qubit* }* + %15 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %14, i32 0, i32 0 + store %Qubit* %q2, %Qubit** %15 + call void @__quantum__rt__callable_invoke(%Callable* %adj_qop, %Tuple* %13, %Tuple* null) + %16 = call %Result* @Microsoft__Quantum__Intrinsic__M__body(%Qubit* %q2) + %17 = load %Result*, %Result** @ResultOne + %18 = call i1 @__quantum__rt__result_equal(%Result* %16, %Result* %17) + %19 = xor i1 %18, true + br i1 %19, label %then0__2, label %continue__2 then0__2: ; preds = %continue__1 - ret i64 2 + %20 = call %String* @__quantum__rt__string_create(i32 13, i8* getelementptr inbounds ([14 x i8], [14 x i8]* @2, i32 0, i32 0)) + call void @__quantum__rt__qubit_release(%Qubit* %q1) + call void @__quantum__rt__qubit_release(%Qubit* %q2) + call void @__quantum__rt__qubit_release(%Qubit* %q3) + call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %qop, i64 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %qop, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %adj_qop, i64 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %adj_qop, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %ctl_qop, i64 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %ctl_qop, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %adj_ctl_qop, i64 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %adj_ctl_qop, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %ctl_ctl_qop, i64 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %ctl_ctl_qop, i64 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %5, i64 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %8, i64 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %13, i64 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %16, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %qop, i64 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %qop, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %adj_qop, i64 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %adj_qop, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %ctl_qop, i64 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %ctl_qop, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %adj_ctl_qop, i64 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %adj_ctl_qop, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %ctl_ctl_qop, i64 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %ctl_ctl_qop, i64 -1) + call void @__quantum__rt__fail(%String* %20) + unreachable continue__2: ; preds = %continue__1 - %4 = call double @__quantum__qis__sqrt__body(double 1.000000e+02) - %5 = fcmp one double 1.000000e+01, %4 - br i1 %5, label %then0__3, label %continue__3 + %21 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %22 = bitcast %Tuple* %21 to { %Array*, %Qubit* }* + %23 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %22, i32 0, i32 0 + %24 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %22, i32 0, i32 1 + %25 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %26 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %25, i64 0) + %27 = bitcast i8* %26 to %Qubit** + store %Qubit* %q1, %Qubit** %27 + store %Array* %25, %Array** %23 + store %Qubit* %q3, %Qubit** %24 + call void @__quantum__rt__callable_invoke(%Callable* %ctl_qop, %Tuple* %21, %Tuple* null) + %28 = call %Result* @Microsoft__Quantum__Intrinsic__M__body(%Qubit* %q3) + %29 = load %Result*, %Result** @ResultOne + %30 = call i1 @__quantum__rt__result_equal(%Result* %28, %Result* %29) + %31 = xor i1 %30, true + br i1 %31, label %then0__3, label %continue__3 then0__3: ; preds = %continue__2 - ret i64 3 + %32 = call %String* @__quantum__rt__string_create(i32 13, i8* getelementptr inbounds ([14 x i8], [14 x i8]* @3, i32 0, i32 0)) + call void @__quantum__rt__qubit_release(%Qubit* %q1) + call void @__quantum__rt__qubit_release(%Qubit* %q2) + call void @__quantum__rt__qubit_release(%Qubit* %q3) + call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %qop, i64 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %qop, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %adj_qop, i64 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %adj_qop, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %ctl_qop, i64 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %ctl_qop, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %adj_ctl_qop, i64 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %adj_ctl_qop, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %ctl_ctl_qop, i64 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %ctl_ctl_qop, i64 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %5, i64 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %8, i64 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %13, i64 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %16, i64 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %25, i64 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %21, i64 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %28, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %qop, i64 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %qop, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %adj_qop, i64 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %adj_qop, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %ctl_qop, i64 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %ctl_qop, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %adj_ctl_qop, i64 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %adj_ctl_qop, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %ctl_ctl_qop, i64 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %ctl_ctl_qop, i64 -1) + call void @__quantum__rt__fail(%String* %32) + unreachable continue__3: ; preds = %continue__2 - %d__4 = call double @__quantum__qis__sqrt__body(double -5.000000e+00) - %6 = call i1 @__quantum__qis__isnan__body(double %d__4) - %7 = xor i1 %6, true - br i1 %7, label %then0__4, label %continue__4 + %33 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %34 = bitcast %Tuple* %33 to { %Array*, %Qubit* }* + %35 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %34, i32 0, i32 0 + %36 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %34, i32 0, i32 1 + %37 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %38 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %37, i64 0) + %39 = bitcast i8* %38 to %Qubit** + store %Qubit* %q2, %Qubit** %39 + store %Array* %37, %Array** %35 + store %Qubit* %q3, %Qubit** %36 + call void @__quantum__rt__callable_invoke(%Callable* %adj_ctl_qop, %Tuple* %33, %Tuple* null) + %40 = call %Result* @Microsoft__Quantum__Intrinsic__M__body(%Qubit* %q3) + %41 = load %Result*, %Result** @ResultZero + %42 = call i1 @__quantum__rt__result_equal(%Result* %40, %Result* %41) + %43 = xor i1 %42, true + br i1 %43, label %then0__4, label %continue__4 then0__4: ; preds = %continue__3 - ret i64 4 + %44 = call %String* @__quantum__rt__string_create(i32 13, i8* getelementptr inbounds ([14 x i8], [14 x i8]* @4, i32 0, i32 0)) + call void @__quantum__rt__qubit_release(%Qubit* %q1) + call void @__quantum__rt__qubit_release(%Qubit* %q2) + call void @__quantum__rt__qubit_release(%Qubit* %q3) + call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %qop, i64 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %qop, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %adj_qop, i64 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %adj_qop, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %ctl_qop, i64 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %ctl_qop, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %adj_ctl_qop, i64 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %adj_ctl_qop, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %ctl_ctl_qop, i64 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %ctl_ctl_qop, i64 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %5, i64 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %8, i64 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %13, i64 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %16, i64 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %25, i64 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %21, i64 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %28, i64 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %37, i64 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %33, i64 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %40, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %qop, i64 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %qop, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %adj_qop, i64 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %adj_qop, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %ctl_qop, i64 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %ctl_qop, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %adj_ctl_qop, i64 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %adj_ctl_qop, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %ctl_ctl_qop, i64 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %ctl_ctl_qop, i64 -1) + call void @__quantum__rt__fail(%String* %44) + unreachable continue__4: ; preds = %continue__3 - %d__5 = call double @__quantum__qis__nan__body() - %d__6 = call double @__quantum__qis__sqrt__body(double %d__5) - %8 = call i1 @__quantum__qis__isnan__body(double %d__6) - %9 = xor i1 %8, true - br i1 %9, label %then0__5, label %continue__5 - -then0__5: ; preds = %continue__4 - ret i64 5 - -continue__5: ; preds = %continue__4 - %d__7 = call double @__quantum__qis__infinity__body() - %d__8 = call double @__quantum__qis__sqrt__body(double %d__7) - %10 = call i1 @__quantum__qis__isinf__body(double %d__8) - %11 = xor i1 %10, true - br i1 %11, label %then0__6, label %continue__6 - -then0__6: ; preds = %continue__5 - ret i64 6 - -continue__6: ; preds = %continue__5 - ret i64 0 -} - -define i64 @Microsoft__Quantum__Testing__QIR__Math__LogTest__body() { -entry: - %input = call double @Microsoft__Quantum__Math__E__body() - %0 = call double @__quantum__qis__log__body(double %input) - %1 = fcmp one double 1.000000e+00, %0 - br i1 %1, label %then0__1, label %continue__1 - -then0__1: ; preds = %entry - ret i64 1 - -continue__1: ; preds = %entry - %2 = call double @Microsoft__Quantum__Math__E__body() - %3 = call double @Microsoft__Quantum__Math__E__body() - %input__1 = fmul double %2, %3 - %4 = call double @__quantum__qis__log__body(double %input__1) - %5 = fcmp one double 2.000000e+00, %4 - br i1 %5, label %then0__2, label %continue__2 - -then0__2: ; preds = %continue__1 - ret i64 2 - -continue__2: ; preds = %continue__1 - %d = call double @__quantum__qis__log__body(double 0.000000e+00) - %6 = call i1 @__quantum__qis__isnegativeinfinity__body(double %d) - %7 = xor i1 %6, true - br i1 %7, label %then0__3, label %continue__3 - -then0__3: ; preds = %continue__2 - ret i64 3 - -continue__3: ; preds = %continue__2 - %d__1 = call double @__quantum__qis__log__body(double -5.000000e+00) - %8 = call i1 @__quantum__qis__isnan__body(double %d__1) - %9 = xor i1 %8, true - br i1 %9, label %then0__4, label %continue__4 - -then0__4: ; preds = %continue__3 - ret i64 4 - -continue__4: ; preds = %continue__3 - %input__4 = call double @__quantum__qis__nan__body() - %d__2 = call double @__quantum__qis__log__body(double %input__4) - %10 = call i1 @__quantum__qis__isnan__body(double %d__2) - %11 = xor i1 %10, true - br i1 %11, label %then0__5, label %continue__5 - -then0__5: ; preds = %continue__4 - ret i64 5 - -continue__5: ; preds = %continue__4 - %input__5 = call double @__quantum__qis__infinity__body() - %d__3 = call double @__quantum__qis__log__body(double %input__5) - %12 = call i1 @__quantum__qis__isinf__body(double %d__3) - %13 = xor i1 %12, true - br i1 %13, label %then0__6, label %continue__6 - -then0__6: ; preds = %continue__5 - ret i64 6 - -continue__6: ; preds = %continue__5 - ret i64 0 -} - -define i64 @Microsoft__Quantum__Testing__QIR__Math__ArcTan2Test__body() { -entry: - %0 = call double @__quantum__qis__arctan2__body(double 0.000000e+00, double 1.000000e+00) - %1 = fcmp one double 0.000000e+00, %0 - br i1 %1, label %then0__1, label %continue__1 - -then0__1: ; preds = %entry - ret i64 1 - -continue__1: ; preds = %entry - %2 = call double @Microsoft__Quantum__Math__PI__body() - %3 = call double @__quantum__qis__arctan2__body(double 0.000000e+00, double -1.000000e+00) - %4 = fcmp one double %2, %3 - br i1 %4, label %then0__2, label %continue__2 - -then0__2: ; preds = %continue__1 - ret i64 2 - -continue__2: ; preds = %continue__1 - %5 = call double @Microsoft__Quantum__Math__PI__body() - %6 = fdiv double %5, 2.000000e+00 - %7 = call double @__quantum__qis__arctan2__body(double 1.000000e+00, double 0.000000e+00) - %8 = fcmp one double %6, %7 - br i1 %8, label %then0__3, label %continue__3 - -then0__3: ; preds = %continue__2 - ret i64 3 - -continue__3: ; preds = %continue__2 - %9 = call double @Microsoft__Quantum__Math__PI__body() - %10 = fneg double %9 - %11 = fdiv double %10, 2.000000e+00 - %12 = call double @__quantum__qis__arctan2__body(double -1.000000e+00, double 0.000000e+00) - %13 = fcmp one double %11, %12 - br i1 %13, label %then0__4, label %continue__4 - -then0__4: ; preds = %continue__3 - ret i64 4 - -continue__4: ; preds = %continue__3 - %14 = call double @Microsoft__Quantum__Math__PI__body() - %15 = fdiv double %14, 4.000000e+00 - %16 = call double @__quantum__qis__arctan2__body(double 1.000000e+00, double 1.000000e+00) - %17 = fcmp one double %15, %16 - br i1 %17, label %then0__5, label %continue__5 + %45 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %46 = bitcast %Tuple* %45 to { %Array*, { %Array*, %Qubit* }* }* + %47 = getelementptr inbounds { %Array*, { %Array*, %Qubit* }* }, { %Array*, { %Array*, %Qubit* }* }* %46, i32 0, i32 0 + %48 = getelementptr inbounds { %Array*, { %Array*, %Qubit* }* }, { %Array*, { %Array*, %Qubit* }* }* %46, i32 0, i32 1 + %49 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %50 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %49, i64 0) + %51 = bitcast i8* %50 to %Qubit** + store %Qubit* %q1, %Qubit** %51 + %52 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %53 = bitcast %Tuple* %52 to { %Array*, %Qubit* }* + %54 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %53, i32 0, i32 0 + %55 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %53, i32 0, i32 1 + %56 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %57 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %56, i64 0) + %58 = bitcast i8* %57 to %Qubit** + store %Qubit* %q2, %Qubit** %58 + store %Array* %56, %Array** %54 + store %Qubit* %q3, %Qubit** %55 + store %Array* %49, %Array** %47 + store { %Array*, %Qubit* }* %53, { %Array*, %Qubit* }** %48 + call void @__quantum__rt__callable_invoke(%Callable* %ctl_ctl_qop, %Tuple* %45, %Tuple* null) + %59 = call %Result* @Microsoft__Quantum__Intrinsic__M__body(%Qubit* %q3) + %60 = load %Result*, %Result** @ResultOne + %61 = call i1 @__quantum__rt__result_equal(%Result* %59, %Result* %60) + %62 = xor i1 %61, true + br i1 %62, label %then0__5, label %continue__5 then0__5: ; preds = %continue__4 - ret i64 5 + %63 = call %String* @__quantum__rt__string_create(i32 13, i8* getelementptr inbounds ([14 x i8], [14 x i8]* @5, i32 0, i32 0)) + call void @__quantum__rt__qubit_release(%Qubit* %q1) + call void @__quantum__rt__qubit_release(%Qubit* %q2) + call void @__quantum__rt__qubit_release(%Qubit* %q3) + call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %qop, i64 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %qop, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %adj_qop, i64 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %adj_qop, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %ctl_qop, i64 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %ctl_qop, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %adj_ctl_qop, i64 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %adj_ctl_qop, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %ctl_ctl_qop, i64 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %ctl_ctl_qop, i64 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %5, i64 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %8, i64 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %13, i64 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %16, i64 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %25, i64 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %21, i64 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %28, i64 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %37, i64 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %33, i64 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %40, i64 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %49, i64 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %56, i64 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %52, i64 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %45, i64 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %59, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %qop, i64 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %qop, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %adj_qop, i64 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %adj_qop, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %ctl_qop, i64 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %ctl_qop, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %adj_ctl_qop, i64 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %adj_ctl_qop, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %ctl_ctl_qop, i64 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %ctl_ctl_qop, i64 -1) + call void @__quantum__rt__fail(%String* %63) + unreachable continue__5: ; preds = %continue__4 - %18 = call double @Microsoft__Quantum__Math__PI__body() - %19 = fmul double %18, 3.000000e+00 - %20 = fdiv double %19, 4.000000e+00 - %21 = call double @__quantum__qis__arctan2__body(double 1.000000e+00, double -1.000000e+00) - %22 = fcmp one double %20, %21 - br i1 %22, label %then0__6, label %continue__6 + %64 = call %Callable* @__quantum__rt__callable_copy(%Callable* %qop, i1 false) + call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %64, i64 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %64) + %65 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %66 = bitcast %Tuple* %65 to { %Array*, %Qubit* }* + %67 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %66, i32 0, i32 0 + %68 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %66, i32 0, i32 1 + %69 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 2) + %70 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %69, i64 0) + %71 = bitcast i8* %70 to %Qubit** + %72 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %69, i64 1) + %73 = bitcast i8* %72 to %Qubit** + store %Qubit* %q1, %Qubit** %71 + store %Qubit* %q2, %Qubit** %73 + store %Array* %69, %Array** %67 + store %Qubit* %q3, %Qubit** %68 + call void @__quantum__rt__callable_invoke(%Callable* %64, %Tuple* %65, %Tuple* null) + %74 = call %Result* @Microsoft__Quantum__Intrinsic__M__body(%Qubit* %q3) + %75 = load %Result*, %Result** @ResultZero + %76 = call i1 @__quantum__rt__result_equal(%Result* %74, %Result* %75) + %77 = xor i1 %76, true + br i1 %77, label %then0__6, label %continue__6 then0__6: ; preds = %continue__5 - ret i64 6 + %78 = call %String* @__quantum__rt__string_create(i32 13, i8* getelementptr inbounds ([14 x i8], [14 x i8]* @6, i32 0, i32 0)) + call void @__quantum__rt__qubit_release(%Qubit* %q1) + call void @__quantum__rt__qubit_release(%Qubit* %q2) + call void @__quantum__rt__qubit_release(%Qubit* %q3) + call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %qop, i64 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %qop, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %adj_qop, i64 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %adj_qop, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %ctl_qop, i64 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %ctl_qop, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %adj_ctl_qop, i64 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %adj_ctl_qop, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %ctl_ctl_qop, i64 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %ctl_ctl_qop, i64 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %5, i64 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %8, i64 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %13, i64 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %16, i64 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %25, i64 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %21, i64 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %28, i64 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %37, i64 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %33, i64 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %40, i64 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %49, i64 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %56, i64 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %52, i64 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %45, i64 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %59, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %64, i64 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %64, i64 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %69, i64 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %65, i64 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %74, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %qop, i64 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %qop, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %adj_qop, i64 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %adj_qop, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %ctl_qop, i64 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %ctl_qop, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %adj_ctl_qop, i64 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %adj_ctl_qop, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %ctl_ctl_qop, i64 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %ctl_ctl_qop, i64 -1) + call void @__quantum__rt__fail(%String* %78) + unreachable continue__6: ; preds = %continue__5 - %23 = call double @Microsoft__Quantum__Math__PI__body() - %24 = fneg double %23 - %25 = fmul double %24, 3.000000e+00 - %26 = fdiv double %25, 4.000000e+00 - %27 = call double @__quantum__qis__arctan2__body(double -1.000000e+00, double -1.000000e+00) - %28 = fcmp one double %26, %27 - br i1 %28, label %then0__7, label %continue__7 + %q4 = call %Qubit* @__quantum__rt__qubit_allocate() + %79 = call %Callable* @__quantum__rt__callable_copy(%Callable* %qop, i1 false) + call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %79, i64 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %79) + %80 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64)) + %81 = bitcast %Tuple* %80 to { %Qubit* }* + %82 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %81, i32 0, i32 0 + store %Qubit* %q3, %Qubit** %82 + call void @__quantum__rt__callable_invoke(%Callable* %79, %Tuple* %80, %Tuple* null) + %83 = call %Callable* @__quantum__rt__callable_copy(%Callable* %ctl_ctl_qop, i1 false) + call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %83, i64 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %83) + call void @__quantum__rt__callable_make_adjoint(%Callable* %83) + %84 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %85 = bitcast %Tuple* %84 to { %Array*, { %Array*, { %Array*, %Qubit* }* }* }* + %86 = getelementptr inbounds { %Array*, { %Array*, { %Array*, %Qubit* }* }* }, { %Array*, { %Array*, { %Array*, %Qubit* }* }* }* %85, i32 0, i32 0 + %87 = getelementptr inbounds { %Array*, { %Array*, { %Array*, %Qubit* }* }* }, { %Array*, { %Array*, { %Array*, %Qubit* }* }* }* %85, i32 0, i32 1 + %88 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %89 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %88, i64 0) + %90 = bitcast i8* %89 to %Qubit** + store %Qubit* %q1, %Qubit** %90 + %91 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %92 = bitcast %Tuple* %91 to { %Array*, { %Array*, %Qubit* }* }* + %93 = getelementptr inbounds { %Array*, { %Array*, %Qubit* }* }, { %Array*, { %Array*, %Qubit* }* }* %92, i32 0, i32 0 + %94 = getelementptr inbounds { %Array*, { %Array*, %Qubit* }* }, { %Array*, { %Array*, %Qubit* }* }* %92, i32 0, i32 1 + %95 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %96 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %95, i64 0) + %97 = bitcast i8* %96 to %Qubit** + store %Qubit* %q2, %Qubit** %97 + %98 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %99 = bitcast %Tuple* %98 to { %Array*, %Qubit* }* + %100 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %99, i32 0, i32 0 + %101 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %99, i32 0, i32 1 + %102 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %103 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %102, i64 0) + %104 = bitcast i8* %103 to %Qubit** + store %Qubit* %q3, %Qubit** %104 + store %Array* %102, %Array** %100 + store %Qubit* %q4, %Qubit** %101 + store %Array* %95, %Array** %93 + store { %Array*, %Qubit* }* %99, { %Array*, %Qubit* }** %94 + store %Array* %88, %Array** %86 + store { %Array*, { %Array*, %Qubit* }* }* %92, { %Array*, { %Array*, %Qubit* }* }** %87 + call void @__quantum__rt__callable_invoke(%Callable* %83, %Tuple* %84, %Tuple* null) + %105 = call %Result* @Microsoft__Quantum__Intrinsic__M__body(%Qubit* %q4) + %106 = load %Result*, %Result** @ResultOne + %107 = call i1 @__quantum__rt__result_equal(%Result* %105, %Result* %106) + %108 = xor i1 %107, true + br i1 %108, label %then0__7, label %continue__7 then0__7: ; preds = %continue__6 - ret i64 7 + %109 = call %String* @__quantum__rt__string_create(i32 13, i8* getelementptr inbounds ([14 x i8], [14 x i8]* @7, i32 0, i32 0)) + call void @__quantum__rt__qubit_release(%Qubit* %q4) + call void @__quantum__rt__qubit_release(%Qubit* %q1) + call void @__quantum__rt__qubit_release(%Qubit* %q2) + call void @__quantum__rt__qubit_release(%Qubit* %q3) + call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %qop, i64 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %qop, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %adj_qop, i64 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %adj_qop, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %ctl_qop, i64 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %ctl_qop, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %adj_ctl_qop, i64 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %adj_ctl_qop, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %ctl_ctl_qop, i64 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %ctl_ctl_qop, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %79, i64 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %79, i64 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %80, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %83, i64 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %83, i64 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %88, i64 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %95, i64 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %102, i64 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %98, i64 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %91, i64 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %84, i64 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %105, i64 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %5, i64 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %8, i64 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %13, i64 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %16, i64 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %25, i64 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %21, i64 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %28, i64 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %37, i64 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %33, i64 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %40, i64 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %49, i64 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %56, i64 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %52, i64 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %45, i64 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %59, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %64, i64 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %64, i64 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %69, i64 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %65, i64 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %74, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %qop, i64 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %qop, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %adj_qop, i64 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %adj_qop, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %ctl_qop, i64 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %ctl_qop, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %adj_ctl_qop, i64 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %adj_ctl_qop, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %ctl_ctl_qop, i64 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %ctl_ctl_qop, i64 -1) + call void @__quantum__rt__fail(%String* %109) + unreachable continue__7: ; preds = %continue__6 - %29 = call double @Microsoft__Quantum__Math__PI__body() - %30 = fneg double %29 - %31 = fdiv double %30, 4.000000e+00 - %32 = call double @__quantum__qis__arctan2__body(double -1.000000e+00, double 1.000000e+00) - %33 = fcmp one double %31, %32 - br i1 %33, label %then0__8, label %continue__8 - -then0__8: ; preds = %continue__7 - ret i64 8 - -continue__8: ; preds = %continue__7 - %34 = call double @__quantum__qis__arctan2__body(double 0.000000e+00, double 0.000000e+00) - %35 = fcmp one double 0.000000e+00, %34 - br i1 %35, label %then0__9, label %continue__9 - -then0__9: ; preds = %continue__8 - ret i64 9 - -continue__9: ; preds = %continue__8 - %y__9 = call double @__quantum__qis__nan__body() - %d = call double @__quantum__qis__arctan2__body(double %y__9, double 0.000000e+00) - %36 = call i1 @__quantum__qis__isnan__body(double %d) - %37 = xor i1 %36, true - br i1 %37, label %then0__10, label %continue__10 + call void @__quantum__rt__qubit_release(%Qubit* %q4) + call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %79, i64 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %79, i64 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %80, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %83, i64 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %83, i64 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %88, i64 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %95, i64 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %102, i64 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %98, i64 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %91, i64 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %84, i64 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %105, i64 -1) + call void @__quantum__rt__qubit_release(%Qubit* %q1) + call void @__quantum__rt__qubit_release(%Qubit* %q2) + call void @__quantum__rt__qubit_release(%Qubit* %q3) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %5, i64 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %8, i64 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %13, i64 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %16, i64 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %25, i64 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %21, i64 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %28, i64 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %37, i64 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %33, i64 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %40, i64 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %49, i64 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %56, i64 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %52, i64 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %45, i64 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %59, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %64, i64 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %64, i64 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %69, i64 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %65, i64 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %74, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %qop, i64 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %qop, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %adj_qop, i64 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %adj_qop, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %ctl_qop, i64 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %ctl_qop, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %adj_ctl_qop, i64 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %adj_ctl_qop, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %ctl_ctl_qop, i64 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %ctl_ctl_qop, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %qop, i64 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %qop, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %adj_qop, i64 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %adj_qop, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %ctl_qop, i64 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %ctl_qop, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %adj_ctl_qop, i64 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %adj_ctl_qop, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %ctl_ctl_qop, i64 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %ctl_ctl_qop, i64 -1) + ret void +} -then0__10: ; preds = %continue__9 - ret i64 11 +define i64 @Microsoft__Quantum__Testing__QIR__TestPartials__body(i64 %x, i64 %y) { +entry: + %0 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, i64 }* getelementptr ({ %Callable*, i64 }, { %Callable*, i64 }* null, i32 1) to i64)) + %1 = bitcast %Tuple* %0 to { %Callable*, i64 }* + %2 = getelementptr inbounds { %Callable*, i64 }, { %Callable*, i64 }* %1, i32 0, i32 0 + %3 = getelementptr inbounds { %Callable*, i64 }, { %Callable*, i64 }* %1, i32 0, i32 1 + %4 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Testing__QIR__Subtract, [2 x void (%Tuple*, i64)*]* null, %Tuple* null) + store %Callable* %4, %Callable** %2 + store i64 %x, i64* %3 + %subtractor = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__1, [2 x void (%Tuple*, i64)*]* @MemoryManagement__1, %Tuple* %0) + call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %subtractor, i64 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %subtractor, i64 1) + %5 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint (i64* getelementptr (i64, i64* null, i32 1) to i64)) + %6 = bitcast %Tuple* %5 to { i64 }* + %7 = getelementptr inbounds { i64 }, { i64 }* %6, i32 0, i32 0 + store i64 %y, i64* %7 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint (i64* getelementptr (i64, i64* null, i32 1) to i64)) + call void @__quantum__rt__callable_invoke(%Callable* %subtractor, %Tuple* %5, %Tuple* %8) + %9 = bitcast %Tuple* %8 to { i64 }* + %10 = getelementptr inbounds { i64 }, { i64 }* %9, i32 0, i32 0 + %11 = load i64, i64* %10 + call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %subtractor, i64 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %subtractor, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %subtractor, i64 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %subtractor, i64 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %5, i64 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i64 -1) + ret i64 %11 +} -continue__10: ; preds = %continue__9 - %x__10 = call double @__quantum__qis__nan__body() - %d__1 = call double @__quantum__qis__arctan2__body(double 0.000000e+00, double %x__10) - %38 = call i1 @__quantum__qis__isnan__body(double %d__1) - %39 = xor i1 %38, true - br i1 %39, label %then0__11, label %continue__11 +define void @Microsoft__Quantum__Testing__QIR__TestQubitResultManagement__body() { +entry: + %qs = call %Array* @__quantum__rt__qubit_allocate_array(i64 2) + call void @__quantum__rt__array_update_alias_count(%Array* %qs, i64 1) + %0 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 1) + %1 = bitcast i8* %0 to %Qubit** + %qubit = load %Qubit*, %Qubit** %1 + call void @__quantum__qis__x__body(%Qubit* %qubit) + %q = call %Qubit* @__quantum__rt__qubit_allocate() + %2 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 1) + %3 = bitcast i8* %2 to %Qubit** + %4 = load %Qubit*, %Qubit** %3 + %5 = call %Result* @Microsoft__Quantum__Intrinsic__M__body(%Qubit* %4) + %6 = load %Result*, %Result** @ResultOne + %7 = call i1 @__quantum__rt__result_equal(%Result* %5, %Result* %6) + br i1 %7, label %then0__1, label %continue__1 -then0__11: ; preds = %continue__10 - ret i64 12 +then0__1: ; preds = %entry + call void @__quantum__qis__x__body(%Qubit* %q) + br label %continue__1 -continue__11: ; preds = %continue__10 - %y__11 = call double @__quantum__qis__nan__body() - %x__11 = call double @__quantum__qis__nan__body() - %d__2 = call double @__quantum__qis__arctan2__body(double %y__11, double %x__11) - %40 = call i1 @__quantum__qis__isnan__body(double %d__2) - %41 = xor i1 %40, true - br i1 %41, label %then0__12, label %continue__12 +continue__1: ; preds = %then0__1, %entry + %8 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 0) + %9 = bitcast i8* %8 to %Qubit** + %10 = load %Qubit*, %Qubit** %9 + %11 = call %Result* @Microsoft__Quantum__Intrinsic__M__body(%Qubit* %10) + %12 = call %Result* @Microsoft__Quantum__Intrinsic__M__body(%Qubit* %q) + %13 = call i1 @__quantum__rt__result_equal(%Result* %11, %Result* %12) + br i1 %13, label %then0__2, label %continue__2 -then0__12: ; preds = %continue__11 - ret i64 13 +then0__2: ; preds = %continue__1 + %14 = call %String* @__quantum__rt__string_create(i32 29, i8* getelementptr inbounds ([30 x i8], [30 x i8]* @0, i32 0, i32 0)) + call void @__quantum__rt__qubit_release(%Qubit* %q) + call void @__quantum__rt__qubit_release_array(%Array* %qs) + call void @__quantum__rt__array_update_alias_count(%Array* %qs, i64 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %5, i64 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %11, i64 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %12, i64 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qs, i64 -1) + call void @__quantum__rt__fail(%String* %14) + unreachable -continue__12: ; preds = %continue__11 - ret i64 0 +continue__2: ; preds = %continue__1 + call void @__quantum__rt__qubit_release(%Qubit* %q) + call void @__quantum__rt__result_update_reference_count(%Result* %5, i64 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %11, i64 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %12, i64 -1) + call void @__quantum__rt__qubit_release_array(%Array* %qs) + call void @__quantum__rt__array_update_alias_count(%Array* %qs, i64 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qs, i64 -1) + ret void } define i64 @Microsoft__Quantum__Testing__QIR__Str__PauliToStringTest__body() { @@ -1340,6 +1115,51 @@ continue__4: ; preds = %continue__3 ret i64 0 } +define i64 @Microsoft__Quantum__Testing__QIR__Subtract__body(i64 %from, i64 %what) { +entry: + %0 = sub i64 %from, %what + ret i64 %0 +} + +declare %Qubit* @__quantum__rt__qubit_allocate() + +declare %Array* @__quantum__rt__qubit_allocate_array(i64) + +declare void @__quantum__qis__x__body(%Qubit*) + +define %Result* @Microsoft__Quantum__Intrinsic__M__body(%Qubit* %qubit) { +entry: + %bases = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 1) + %0 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %bases, i64 0) + %1 = bitcast i8* %0 to i2* + %2 = load i2, i2* @PauliZ + store i2 %2, i2* %1 + call void @__quantum__rt__array_update_alias_count(%Array* %bases, i64 1) + %qubits = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %3 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qubits, i64 0) + %4 = bitcast i8* %3 to %Qubit** + store %Qubit* %qubit, %Qubit** %4 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i64 1) + %5 = call %Result* @__quantum__qis__measure__body(%Array* %bases, %Array* %qubits) + call void @__quantum__rt__array_update_alias_count(%Array* %bases, i64 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i64 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %bases, i64 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i64 -1) + ret %Result* %5 +} + +declare i1 @__quantum__rt__result_equal(%Result*, %Result*) + +declare %String* @__quantum__rt__string_create(i32, i8*) + +declare void @__quantum__rt__qubit_release(%Qubit*) + +declare void @__quantum__rt__qubit_release_array(%Array*) + +declare void @__quantum__rt__result_update_reference_count(%Result*, i64) + +declare void @__quantum__rt__fail(%String*) + define void @Microsoft__Quantum__Testing__QIR__Qop__body(%Qubit* %q, i64 %n) { entry: %0 = srem i64 %n, 2 @@ -1405,38 +1225,236 @@ entry: ret void } -define void @Microsoft__Quantum__Testing__QIR__Subtract__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +declare %Tuple* @__quantum__rt__tuple_create(i64) + +declare void @__quantum__rt__tuple_update_reference_count(%Tuple*, i64) + +define void @Microsoft__Quantum__Testing__QIR__Subtract__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { i64, i64 }* + %1 = getelementptr inbounds { i64, i64 }, { i64, i64 }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { i64, i64 }, { i64, i64 }* %0, i32 0, i32 1 + %3 = load i64, i64* %1 + %4 = load i64, i64* %2 + %5 = call i64 @Microsoft__Quantum__Testing__QIR__Subtract__body(i64 %3, i64 %4) + %6 = bitcast %Tuple* %result-tuple to { i64 }* + %7 = getelementptr inbounds { i64 }, { i64 }* %6, i32 0, i32 0 + store i64 %5, i64* %7 + ret void +} + +declare %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]*, [2 x void (%Tuple*, i64)*]*, %Tuple*) + +define void @Lifted__PartialApplication__1__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, i64 }* + %1 = getelementptr inbounds { %Callable*, i64 }, { %Callable*, i64 }* %0, i32 0, i32 1 + %2 = load i64, i64* %1 + %3 = bitcast %Tuple* %arg-tuple to { i64 }* + %4 = getelementptr inbounds { i64 }, { i64 }* %3, i32 0, i32 0 + %5 = load i64, i64* %4 + %6 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i64* getelementptr (i64, i64* null, i32 1) to i64), i64 2)) + %7 = bitcast %Tuple* %6 to { i64, i64 }* + %8 = getelementptr inbounds { i64, i64 }, { i64, i64 }* %7, i32 0, i32 0 + %9 = getelementptr inbounds { i64, i64 }, { i64, i64 }* %7, i32 0, i32 1 + store i64 %2, i64* %8 + store i64 %5, i64* %9 + %10 = getelementptr inbounds { %Callable*, i64 }, { %Callable*, i64 }* %0, i32 0, i32 0 + %11 = load %Callable*, %Callable** %10 + call void @__quantum__rt__callable_invoke(%Callable* %11, %Tuple* %6, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i64 -1) + ret void +} + +define void @MemoryManagement__1__RefCount(%Tuple* %capture-tuple, i64 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, i64 }* + %1 = getelementptr inbounds { %Callable*, i64 }, { %Callable*, i64 }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1 + call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %2, i64 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %2, i64 %count-change) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %capture-tuple, i64 %count-change) + ret void +} + +define void @MemoryManagement__1__AliasCount(%Tuple* %capture-tuple, i64 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, i64 }* + %1 = getelementptr inbounds { %Callable*, i64 }, { %Callable*, i64 }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1 + call void @__quantum__rt__callable_memory_management(i32 1, %Callable* %2, i64 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i64 %count-change) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %capture-tuple, i64 %count-change) + ret void +} + +declare void @__quantum__rt__callable_memory_management(i32, %Callable*, i64) + +declare void @__quantum__rt__callable_update_alias_count(%Callable*, i64) + +declare void @__quantum__rt__callable_invoke(%Callable*, %Tuple*, %Tuple*) + +declare void @__quantum__rt__callable_update_reference_count(%Callable*, i64) + +define void @Microsoft__Quantum__Testing__QIR__Qop__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Qubit*, i64 }* + %1 = getelementptr inbounds { %Qubit*, i64 }, { %Qubit*, i64 }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Qubit*, i64 }, { %Qubit*, i64 }* %0, i32 0, i32 1 + %3 = load %Qubit*, %Qubit** %1 + %4 = load i64, i64* %2 + call void @Microsoft__Quantum__Testing__QIR__Qop__body(%Qubit* %3, i64 %4) + ret void +} + +define void @Microsoft__Quantum__Testing__QIR__Qop__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Qubit*, i64 }* + %1 = getelementptr inbounds { %Qubit*, i64 }, { %Qubit*, i64 }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Qubit*, i64 }, { %Qubit*, i64 }* %0, i32 0, i32 1 + %3 = load %Qubit*, %Qubit** %1 + %4 = load i64, i64* %2 + call void @Microsoft__Quantum__Testing__QIR__Qop__adj(%Qubit* %3, i64 %4) + ret void +} + +define void @Microsoft__Quantum__Testing__QIR__Qop__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { %Qubit*, i64 }* }* + %1 = getelementptr inbounds { %Array*, { %Qubit*, i64 }* }, { %Array*, { %Qubit*, i64 }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { %Qubit*, i64 }* }, { %Array*, { %Qubit*, i64 }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1 + %4 = load { %Qubit*, i64 }*, { %Qubit*, i64 }** %2 + call void @Microsoft__Quantum__Testing__QIR__Qop__ctl(%Array* %3, { %Qubit*, i64 }* %4) + ret void +} + +define void @Microsoft__Quantum__Testing__QIR__Qop__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { %Qubit*, i64 }* }* + %1 = getelementptr inbounds { %Array*, { %Qubit*, i64 }* }, { %Array*, { %Qubit*, i64 }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { %Qubit*, i64 }* }, { %Array*, { %Qubit*, i64 }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1 + %4 = load { %Qubit*, i64 }*, { %Qubit*, i64 }** %2 + call void @Microsoft__Quantum__Testing__QIR__Qop__ctladj(%Array* %3, { %Qubit*, i64 }* %4) + ret void +} + +define void @Lifted__PartialApplication__2__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { entry: - %0 = bitcast %Tuple* %arg-tuple to { i64, i64 }* - %1 = getelementptr inbounds { i64, i64 }, { i64, i64 }* %0, i32 0, i32 0 - %2 = getelementptr inbounds { i64, i64 }, { i64, i64 }* %0, i32 0, i32 1 - %3 = load i64, i64* %1 - %4 = load i64, i64* %2 - %5 = call i64 @Microsoft__Quantum__Testing__QIR__Subtract__body(i64 %3, i64 %4) - %6 = bitcast %Tuple* %result-tuple to { i64 }* - %7 = getelementptr inbounds { i64 }, { i64 }* %6, i32 0, i32 0 - store i64 %5, i64* %7 + %0 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %1 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %0, i32 0, i32 0 + %2 = load %Qubit*, %Qubit** %1 + %3 = bitcast %Tuple* %capture-tuple to { %Callable*, i64 }* + %4 = getelementptr inbounds { %Callable*, i64 }, { %Callable*, i64 }* %3, i32 0, i32 1 + %5 = load i64, i64* %4 + %6 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Qubit*, i64 }* getelementptr ({ %Qubit*, i64 }, { %Qubit*, i64 }* null, i32 1) to i64)) + %7 = bitcast %Tuple* %6 to { %Qubit*, i64 }* + %8 = getelementptr inbounds { %Qubit*, i64 }, { %Qubit*, i64 }* %7, i32 0, i32 0 + %9 = getelementptr inbounds { %Qubit*, i64 }, { %Qubit*, i64 }* %7, i32 0, i32 1 + store %Qubit* %2, %Qubit** %8 + store i64 %5, i64* %9 + %10 = getelementptr inbounds { %Callable*, i64 }, { %Callable*, i64 }* %3, i32 0, i32 0 + %11 = load %Callable*, %Callable** %10 + call void @__quantum__rt__callable_invoke(%Callable* %11, %Tuple* %6, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i64 -1) ret void } -define void @Lifted__PartialApplication__2__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +define void @Lifted__PartialApplication__2__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { entry: - %0 = bitcast %Tuple* %capture-tuple to { %Callable*, i64 }* - %1 = getelementptr inbounds { %Callable*, i64 }, { %Callable*, i64 }* %0, i32 0, i32 1 - %2 = load i64, i64* %1 - %3 = bitcast %Tuple* %arg-tuple to { i64 }* - %4 = getelementptr inbounds { i64 }, { i64 }* %3, i32 0, i32 0 + %0 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %1 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %0, i32 0, i32 0 + %2 = load %Qubit*, %Qubit** %1 + %3 = bitcast %Tuple* %capture-tuple to { %Callable*, i64 }* + %4 = getelementptr inbounds { %Callable*, i64 }, { %Callable*, i64 }* %3, i32 0, i32 1 %5 = load i64, i64* %4 - %6 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i64* getelementptr (i64, i64* null, i32 1) to i64), i64 2)) - %7 = bitcast %Tuple* %6 to { i64, i64 }* - %8 = getelementptr inbounds { i64, i64 }, { i64, i64 }* %7, i32 0, i32 0 - %9 = getelementptr inbounds { i64, i64 }, { i64, i64 }* %7, i32 0, i32 1 - store i64 %2, i64* %8 + %6 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Qubit*, i64 }* getelementptr ({ %Qubit*, i64 }, { %Qubit*, i64 }* null, i32 1) to i64)) + %7 = bitcast %Tuple* %6 to { %Qubit*, i64 }* + %8 = getelementptr inbounds { %Qubit*, i64 }, { %Qubit*, i64 }* %7, i32 0, i32 0 + %9 = getelementptr inbounds { %Qubit*, i64 }, { %Qubit*, i64 }* %7, i32 0, i32 1 + store %Qubit* %2, %Qubit** %8 store i64 %5, i64* %9 - %10 = getelementptr inbounds { %Callable*, i64 }, { %Callable*, i64 }* %0, i32 0, i32 0 + %10 = getelementptr inbounds { %Callable*, i64 }, { %Callable*, i64 }* %3, i32 0, i32 0 %11 = load %Callable*, %Callable** %10 - call void @__quantum__rt__callable_invoke(%Callable* %11, %Tuple* %6, %Tuple* %result-tuple) + %12 = call %Callable* @__quantum__rt__callable_copy(%Callable* %11, i1 false) + call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %12, i64 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %12) + call void @__quantum__rt__callable_invoke(%Callable* %12, %Tuple* %6, %Tuple* %result-tuple) call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %12, i64 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %12, i64 -1) + ret void +} + +define void @Lifted__PartialApplication__2__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1 + %4 = load %Qubit*, %Qubit** %2 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, i64 }* + %6 = getelementptr inbounds { %Callable*, i64 }, { %Callable*, i64 }* %5, i32 0, i32 1 + %7 = load i64, i64* %6 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Qubit*, i64 }* getelementptr ({ %Qubit*, i64 }, { %Qubit*, i64 }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { %Qubit*, i64 }* + %10 = getelementptr inbounds { %Qubit*, i64 }, { %Qubit*, i64 }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { %Qubit*, i64 }, { %Qubit*, i64 }* %9, i32 0, i32 1 + store %Qubit* %4, %Qubit** %10 + store i64 %7, i64* %11 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %13 = bitcast %Tuple* %12 to { %Array*, { %Qubit*, i64 }* }* + %14 = getelementptr inbounds { %Array*, { %Qubit*, i64 }* }, { %Array*, { %Qubit*, i64 }* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, { %Qubit*, i64 }* }, { %Array*, { %Qubit*, i64 }* }* %13, i32 0, i32 1 + store %Array* %3, %Array** %14 + store { %Qubit*, i64 }* %9, { %Qubit*, i64 }** %15 + %16 = getelementptr inbounds { %Callable*, i64 }, { %Callable*, i64 }* %5, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %18, i64 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i64 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %18, i64 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i64 -1) + ret void +} + +define void @Lifted__PartialApplication__2__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1 + %4 = load %Qubit*, %Qubit** %2 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, i64 }* + %6 = getelementptr inbounds { %Callable*, i64 }, { %Callable*, i64 }* %5, i32 0, i32 1 + %7 = load i64, i64* %6 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Qubit*, i64 }* getelementptr ({ %Qubit*, i64 }, { %Qubit*, i64 }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { %Qubit*, i64 }* + %10 = getelementptr inbounds { %Qubit*, i64 }, { %Qubit*, i64 }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { %Qubit*, i64 }, { %Qubit*, i64 }* %9, i32 0, i32 1 + store %Qubit* %4, %Qubit** %10 + store i64 %7, i64* %11 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %13 = bitcast %Tuple* %12 to { %Array*, { %Qubit*, i64 }* }* + %14 = getelementptr inbounds { %Array*, { %Qubit*, i64 }* }, { %Array*, { %Qubit*, i64 }* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, { %Qubit*, i64 }* }, { %Array*, { %Qubit*, i64 }* }* %13, i32 0, i32 1 + store %Array* %3, %Array** %14 + store { %Qubit*, i64 }* %9, { %Qubit*, i64 }** %15 + %16 = getelementptr inbounds { %Callable*, i64 }, { %Callable*, i64 }* %5, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %18, i64 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %18) + call void @__quantum__rt__callable_make_controlled(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i64 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i64 -1) + call void @__quantum__rt__callable_memory_management(i32 0, %Callable* %18, i64 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i64 -1) ret void } @@ -1462,6 +1480,44 @@ entry: ret void } +declare %Callable* @__quantum__rt__callable_copy(%Callable*, i1) + +declare void @__quantum__rt__callable_make_adjoint(%Callable*) + +declare void @__quantum__rt__callable_make_controlled(%Callable*) + +declare %Array* @__quantum__rt__array_create_1d(i32, i64) + +declare %Result* @__quantum__qis__measure__body(%Array*, %Array*) + +define void @Microsoft__Quantum__Intrinsic__K__body(%Qubit* %q) { +entry: + call void @__quantum__qis__k__body(%Qubit* %q) + ret void +} + +define void @Microsoft__Quantum__Intrinsic__K__adj(%Qubit* %q) { +entry: + call void @__quantum__qis__k__body(%Qubit* %q) + ret void +} + +define void @Microsoft__Quantum__Intrinsic__K__ctl(%Array* %__controlQubits__, %Qubit* %q) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i64 1) + call void @__quantum__qis__k__ctl(%Array* %__controlQubits__, %Qubit* %q) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i64 -1) + ret void +} + +define void @Microsoft__Quantum__Intrinsic__K__ctladj(%Array* %__controlQubits__, %Qubit* %q) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i64 1) + call void @__quantum__qis__k__ctl(%Array* %__controlQubits__, %Qubit* %q) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i64 -1) + ret void +} + define void @Microsoft__Quantum__Intrinsic__X__body(%Qubit* %qubit) { entry: call void @__quantum__qis__x__body(%Qubit* %qubit) @@ -1492,6 +1548,18 @@ entry: ret void } +define i1 @Microsoft__Quantum__Intrinsic__IsNegativeInfinity__body(double %d) { +entry: + %0 = call i1 @__quantum__qis__isnegativeinfinity__body(double %d) + ret i1 %0 +} + +define i1 @Microsoft__Quantum__Intrinsic__IsNan__body(double %d) { +entry: + %0 = call i1 @__quantum__qis__isnan__body(double %d) + ret i1 %0 +} + define %Result* @Microsoft__Quantum__Intrinsic__Measure__body(%Array* %bases, %Array* %qubits) { entry: call void @__quantum__rt__array_update_alias_count(%Array* %bases, i64 1) @@ -1502,92 +1570,24 @@ entry: ret %Result* %0 } -declare %Result* @__quantum__qis__measure__body(%Array*, %Array*) - -define i1 @Microsoft__Quantum__Intrinsic__IsInf__body(double %d) { -entry: - %0 = call i1 @__quantum__qis__isinf__body(double %d) - ret i1 %0 -} - -declare i1 @__quantum__qis__isinf__body(double) - define double @Microsoft__Quantum__Intrinsic__NAN__body() { entry: %0 = call double @__quantum__qis__nan__body() ret double %0 } -declare double @__quantum__qis__nan__body() - -define i1 @Microsoft__Quantum__Intrinsic__IsNan__body(double %d) { +define i1 @Microsoft__Quantum__Intrinsic__IsInf__body(double %d) { entry: - %0 = call i1 @__quantum__qis__isnan__body(double %d) + %0 = call i1 @__quantum__qis__isinf__body(double %d) ret i1 %0 } -declare i1 @__quantum__qis__isnan__body(double) - define double @Microsoft__Quantum__Intrinsic__INFINITY__body() { entry: %0 = call double @__quantum__qis__infinity__body() ret double %0 } -declare double @__quantum__qis__infinity__body() - -define i1 @Microsoft__Quantum__Intrinsic__IsNegativeInfinity__body(double %d) { -entry: - %0 = call i1 @__quantum__qis__isnegativeinfinity__body(double %d) - ret i1 %0 -} - -declare i1 @__quantum__qis__isnegativeinfinity__body(double) - -define void @Microsoft__Quantum__Intrinsic__K__body(%Qubit* %q) { -entry: - call void @__quantum__qis__k__body(%Qubit* %q) - ret void -} - -define void @Microsoft__Quantum__Intrinsic__K__adj(%Qubit* %q) { -entry: - call void @__quantum__qis__k__body(%Qubit* %q) - ret void -} - -define void @Microsoft__Quantum__Intrinsic__K__ctl(%Array* %__controlQubits__, %Qubit* %q) { -entry: - call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i64 1) - call void @__quantum__qis__k__ctl(%Array* %__controlQubits__, %Qubit* %q) - call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i64 -1) - ret void -} - -define void @Microsoft__Quantum__Intrinsic__K__ctladj(%Array* %__controlQubits__, %Qubit* %q) { -entry: - call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i64 1) - call void @__quantum__qis__k__ctl(%Array* %__controlQubits__, %Qubit* %q) - call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i64 -1) - ret void -} - -declare double @__quantum__qis__arctan2__body(double, double) - -define double @Microsoft__Quantum__Math__PI__body() { -entry: - ret double 0x400921FB54442D18 -} - -declare double @__quantum__qis__sqrt__body(double) - -define double @Microsoft__Quantum__Math__E__body() { -entry: - ret double 0x4005BF0A8B145769 -} - -declare double @__quantum__qis__log__body(double) - declare %String* @__quantum__rt__pauli_to_string(i2) declare %String* @__quantum__rt__string_concatenate(%String*, %String*) @@ -1602,15 +1602,15 @@ entry: ret double %0 } -define double @Microsoft__Quantum__Math__ArcTan2__body(double %y, double %x) { +define double @Microsoft__Quantum__Math__Sqrt__body(double %d) { entry: - %0 = call double @__quantum__qis__arctan2__body(double %y, double %x) + %0 = call double @__quantum__qis__sqrt__body(double %d) ret double %0 } -define double @Microsoft__Quantum__Math__Sqrt__body(double %d) { +define double @Microsoft__Quantum__Math__ArcTan2__body(double %y, double %x) { entry: - %0 = call double @__quantum__qis__sqrt__body(double %d) + %0 = call double @__quantum__qis__arctan2__body(double %y, double %x) ret double %0 } diff --git a/src/QirRuntime/test/QIR-tracer/generate.py b/src/QirRuntime/test/QIR-tracer/generate.py index 6f109cbfca9..206afb6b6d6 100644 --- a/src/QirRuntime/test/QIR-tracer/generate.py +++ b/src/QirRuntime/test/QIR-tracer/generate.py @@ -1,4 +1,4 @@ -# Copyright (c) Microsoft Corporation. All rights reserved. +# Copyright (c) Microsoft Corporation. # Licensed under the MIT License. import sys, os, platform, subprocess, datetime, shutil @@ -8,7 +8,7 @@ # Accepts arguments: # path to qsc.exe (absolute or rely on Path env) # -# For example: "generate.py qsc.exe" +# For example: "generate.py qsc.exe" or "generate.py c:\qsharp-compiler\qsc.exe" # ============================================================================= # ============================================================================= @@ -18,22 +18,24 @@ def log(message): print(current_time + ": " + message) # ============================================================================= -root_dir = os.path.dirname(os.path.abspath(__file__)) - -# parameters -qsc = sys.argv[1] # argv[0] is the name of this script file - -# find all qs files in this folder -files_to_process = "" -output_file = "tracer-qir" -for file in os.listdir(root_dir): - (file_name, ext) = os.path.splitext(file) - if ext == ".qs": - files_to_process = files_to_process + " " + file - -# Compile as a lib so all functions are retained and don't have to workaround the current limitations of -# @EntryPoint attribute. -command = (qsc + " build --qir s --input " + files_to_process + " --proj " + output_file) -log("Executing: " + command) -subprocess.run(command, shell = True) +if __name__ == '__main__': + # this script is executed as script + root_dir = os.path.dirname(os.path.abspath(__file__)) + + # parameters + qsc = sys.argv[1] # argv[0] is the name of this script file + + # find all qs files in this folder + files_to_process = "" + output_file = "tracer-qir" + for file in os.listdir(root_dir): + (file_name, ext) = os.path.splitext(file) + if ext == ".qs": + files_to_process = files_to_process + " " + file + + # Compile as a lib so all functions are retained and don't have to workaround the current limitations of + # @EntryPoint attribute. + command = (qsc + " build --qir s --input " + files_to_process + " --proj " + output_file) + log("Executing: " + command) + subprocess.run(command, shell = True) diff --git a/src/QirRuntime/test/QIR-tracer/tracer-config.cpp b/src/QirRuntime/test/QIR-tracer/tracer-config.cpp index c4320f8ae17..5843217d1c8 100644 --- a/src/QirRuntime/test/QIR-tracer/tracer-config.cpp +++ b/src/QirRuntime/test/QIR-tracer/tracer-config.cpp @@ -11,8 +11,8 @@ namespace TracerUser { -const std::unordered_map g_operationNames = {{0, "X"}, {1, "CX"}, {2, "MCX"}, - {3, "Y"}, {4, "CY"}, {5, "MCY"} /*etc.*/}; +const std::unordered_map g_operationNames = { + {0, "X"}, {1, "CX"}, {2, "MCX"}, {3, "Y"}, {4, "CY"}, {5, "MCY"} /*etc.*/}; } namespace Microsoft diff --git a/src/QirRuntime/test/QIR-tracer/tracer-config.hpp b/src/QirRuntime/test/QIR-tracer/tracer-config.hpp index 7c1b654c43e..8163231286d 100644 --- a/src/QirRuntime/test/QIR-tracer/tracer-config.hpp +++ b/src/QirRuntime/test/QIR-tracer/tracer-config.hpp @@ -8,20 +8,10 @@ #include #include -namespace Microsoft -{ -namespace Quantum -{ - using OpId = int; - using Time = int; - using Duration = int; - using LayerId = size_t; -} -} +#include "TracerTypes.hpp" namespace TracerUser { -using OpId = int; extern const std::unordered_map g_operationNames; } // namespace TracerUser diff --git a/src/QirRuntime/test/QIR-tracer/tracer-intrinsics.qs b/src/QirRuntime/test/QIR-tracer/tracer-intrinsics.qs index ec8c48888ff..1e23bf5f613 100644 --- a/src/QirRuntime/test/QIR-tracer/tracer-intrinsics.qs +++ b/src/QirRuntime/test/QIR-tracer/tracer-intrinsics.qs @@ -3,6 +3,7 @@ namespace Microsoft.Quantum.Testing.Tracer { open Microsoft.Quantum.Intrinsic; + open Microsoft.Quantum.Tracer; operation TestCoreIntrinsics() : Unit { use qs = Qubit[3]; @@ -19,7 +20,7 @@ namespace Microsoft.Quantum.Testing.Tracer { S(qs[1]); T(qs[2]); - Barrier(42, 1); + Barrier(42, 0); Adjoint X(qs[0]); Adjoint Y(qs[0]); diff --git a/src/QirRuntime/test/QIR-tracer/tracer-target.qs b/src/QirRuntime/test/QIR-tracer/tracer-target.qs index d721f750c9b..19f7d51abde 100644 --- a/src/QirRuntime/test/QIR-tracer/tracer-target.qs +++ b/src/QirRuntime/test/QIR-tracer/tracer-target.qs @@ -26,8 +26,83 @@ namespace Microsoft.Quantum.Instructions { operation joint_measure(op_id: Int, duration: Int, qbs : Qubit[]) : Result { body intrinsic; } + + // Operations, used in Hadamard frame tracking + @Inline() + operation Tz(qb : Qubit) : Unit + is Adj + Ctl { + body (...) { single_qubit_op(11, 1, qb); } + adjoint (...) { single_qubit_op(11, 1, qb); } + controlled (ctls, ...) { single_qubit_op_ctl(12, 1, ctls, qb); } + controlled adjoint (ctls, ...) { single_qubit_op_ctl(12, 1, ctls, qb); } + } + + @Inline() + operation Tx(qb : Qubit) : Unit + is Adj + Ctl { + body (...) { single_qubit_op(13, 1, qb); } + adjoint (...) { single_qubit_op(13, 1, qb); } + controlled (ctls, ...) { single_qubit_op_ctl(14, 1, ctls, qb); } + controlled adjoint (ctls, ...) { single_qubit_op_ctl(14, 1, ctls, qb); } + } + + + @Inline() + operation Sz(qb : Qubit) : Unit + is Adj + Ctl { + body (...) { single_qubit_op(15, 1, qb); } + adjoint (...) { single_qubit_op(15, 1, qb); } + controlled (ctls, ...) { single_qubit_op_ctl(16, 1, ctls, qb); } + controlled adjoint (ctls, ...) { single_qubit_op_ctl(16, 1, ctls, qb); } + } + + @Inline() + operation Sx(qb : Qubit) : Unit + is Adj + Ctl { + body (...) { single_qubit_op(17, 1, qb); } + adjoint (...) { single_qubit_op(17, 1, qb); } + controlled (ctls, ...) { single_qubit_op_ctl(18, 1, ctls, qb); } + controlled adjoint (ctls, ...) { single_qubit_op_ctl(18, 1, ctls, qb); } + } + + @Inline() + operation Mz(qb : Qubit) : Result { + body (...) { return single_qubit_measure(100, 1, qb); } + } + + @Inline() + operation Mx(qb : Qubit) : Result { + body (...) { return single_qubit_measure(101, 1, qb); } + } + + @Inline() + operation Mzz(qubits : Qubit[]) : Result { + body (...) { return joint_measure(102, 1, qubits); } + } + + @Inline() + operation Mxz(qubits : Qubit[]) : Result { + body (...) { return joint_measure(103, 1, qubits); } + } + + @Inline() + operation Mzx(qubits : Qubit[]) : Result { + body (...) { return joint_measure(104, 1, qubits); } + } + + @Inline() + operation Mxx(qubits : Qubit[]) : Result { + body (...) { return joint_measure(105, 1, qubits); } + } } +namespace Microsoft.Quantum.Tracer { + + @TargetInstruction("inject_global_barrier") + operation Barrier(id : Int, duration : Int) : Unit { + body intrinsic; + } +} namespace Microsoft.Quantum.Intrinsic { @@ -83,58 +158,22 @@ namespace Microsoft.Quantum.Intrinsic { controlled (ctls, ...) { Phys.single_qubit_op_ctl(10, 1, ctls, qb); } } - @Inline() - operation Tz(qb : Qubit) : Unit - is Adj + Ctl { - body (...) { Phys.single_qubit_op(11, 1, qb); } - adjoint (...) { Phys.single_qubit_op(11, 1, qb); } - controlled (ctls, ...) { Phys.single_qubit_op_ctl(12, 1, ctls, qb); } - controlled adjoint (ctls, ...) { Phys.single_qubit_op_ctl(12, 1, ctls, qb); } - } - - @Inline() - operation Tx(qb : Qubit) : Unit - is Adj + Ctl { - body (...) { Phys.single_qubit_op(13, 1, qb); } - adjoint (...) { Phys.single_qubit_op(13, 1, qb); } - controlled (ctls, ...) { Phys.single_qubit_op_ctl(14, 1, ctls, qb); } - controlled adjoint (ctls, ...) { Phys.single_qubit_op_ctl(14, 1, ctls, qb); } - } - @Inline() operation T(qb : Qubit) : Unit is Adj + Ctl { - body (...) { Tz(qb); } - adjoint (...) { Adjoint Tz(qb); } - controlled (ctls, ...) { Controlled Tz(ctls, qb); } - controlled adjoint (ctls, ...) { Controlled Adjoint Tz(ctls, qb); } - } - - @Inline() - operation Sz(qb : Qubit) : Unit - is Adj + Ctl { - body (...) { Phys.single_qubit_op(15, 1, qb); } - adjoint (...) { Phys.single_qubit_op(15, 1, qb); } - controlled (ctls, ...) { Phys.single_qubit_op_ctl(16, 1, ctls, qb); } - controlled adjoint (ctls, ...) { Phys.single_qubit_op_ctl(16, 1, ctls, qb); } - } - - @Inline() - operation Sx(qb : Qubit) : Unit - is Adj + Ctl { - body (...) { Phys.single_qubit_op(17, 1, qb); } - adjoint (...) { Phys.single_qubit_op(17, 1, qb); } - controlled (ctls, ...) { Phys.single_qubit_op_ctl(18, 1, ctls, qb); } - controlled adjoint (ctls, ...) { Phys.single_qubit_op_ctl(18, 1, ctls, qb); } + body (...) { Phys.Tz(qb); } + adjoint (...) { Adjoint Phys.Tz(qb); } + controlled (ctls, ...) { Controlled Phys.Tz(ctls, qb); } + controlled adjoint (ctls, ...) { Controlled Adjoint Phys.Tz(ctls, qb); } } @Inline() operation S(qb : Qubit) : Unit is Adj + Ctl { - body (...) { Sz(qb); } - adjoint (...) { Adjoint Sz(qb); } - controlled (ctls, ...) { Controlled Sz(ctls, qb); } - controlled adjoint (ctls, ...) { Controlled Adjoint Sz(ctls, qb); } + body (...) { Phys.Sz(qb); } + adjoint (...) { Adjoint Phys.Sz(qb); } + controlled (ctls, ...) { Controlled Phys.Sz(ctls, qb); } + controlled adjoint (ctls, ...) { Controlled Adjoint Phys.Sz(ctls, qb); } } @Inline() @@ -164,39 +203,9 @@ namespace Microsoft.Quantum.Intrinsic { controlled adjoint (ctls, ...) { Phys.single_qubit_op_ctl(25, 1, ctls, qb); } } - @Inline() - operation Mz(qb : Qubit) : Result { - body (...) { return Phys.single_qubit_measure(100, 1, qb); } - } - - @Inline() - operation Mx(qb : Qubit) : Result { - body (...) { return Phys.single_qubit_measure(101, 1, qb); } - } - @Inline() operation M(qb : Qubit) : Result { - body (...) { return Mz(qb); } - } - - @Inline() - operation Mzz(qubits : Qubit[]) : Result { - body (...) { return Phys.joint_measure(102, 1, qubits); } - } - - @Inline() - operation Mxz(qubits : Qubit[]) : Result { - body (...) { return Phys.joint_measure(103, 1, qubits); } - } - - @Inline() - operation Mzx(qubits : Qubit[]) : Result { - body (...) { return Phys.joint_measure(104, 1, qubits); } - } - - @Inline() - operation Mxx(qubits : Qubit[]) : Result { - body (...) { return Phys.joint_measure(105, 1, qubits); } + body (...) { return Phyz.Mz(qb); } } @Inline() @@ -222,22 +231,16 @@ namespace Microsoft.Quantum.Intrinsic { } // Specialize for two-qubit measurements: Mxx, Mxz, Mzx, Mzz - elif paulis[0] == PauliX and paulis[1] == PauliX { set res = Phys.joint_measure(108, 1, qubits); } - elif paulis[0] == PauliX and paulis[1] == PauliZ { set res = Phys.joint_measure(109, 1, qubits); } - elif paulis[0] == PauliZ and paulis[1] == PauliX { set res = Phys.joint_measure(110, 1, qubits); } - elif paulis[0] == PauliZ and paulis[1] == PauliZ { set res = Phys.joint_measure(111, 1, qubits); } + elif paulis[0] == PauliX and paulis[1] == PauliX { set res = Phys.Mxx(qubits); } + elif paulis[0] == PauliX and paulis[1] == PauliZ { set res = Phys.Mxz(qubits); } + elif paulis[0] == PauliZ and paulis[1] == PauliX { set res = Phys.Mzx(qubits); } + elif paulis[0] == PauliZ and paulis[1] == PauliZ { set res = Phys.Mzz(qubits); } //shouldn't get here return res; } } - - @TargetInstruction("inject_global_barrier") - operation Barrier(id : Int, duration : Int) : Unit { - body intrinsic; - } - // operation SWAP(a : Qubit, b : Qubit) : Unit // is Adj { // body intrinsic; From fef6eadce86555291309eb70f8ac7f6475884bf1 Mon Sep 17 00:00:00 2001 From: Irina Yatsenko Date: Thu, 18 Feb 2021 10:26:45 -0800 Subject: [PATCH 24/27] More readme touches --- src/QirRuntime/lib/Tracer/README.md | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/src/QirRuntime/lib/Tracer/README.md b/src/QirRuntime/lib/Tracer/README.md index 7e7e6f646ea..987a59d9050 100644 --- a/src/QirRuntime/lib/Tracer/README.md +++ b/src/QirRuntime/lib/Tracer/README.md @@ -8,7 +8,8 @@ The purpose of the Resource Tracer is to provide efficient and flexible way to e To run against the tracer, the quantum program should comply with the [QIR specifications](https://github.com/microsoft/qsharp-language/tree/main/Specifications/QIR) as well as: -1. convert _each_ used intrinsic operation into one of the _qis_ operations supported by the tracer (see the list below); +1. convert _each_ used intrinsic operation into one of the Quantum Instruction Set (_qis_) operations supported by the + tracer (see the last section of this readme); 1. (_optional_) provide callbacks for handling of conditional branches on a measurement (if not provided, the estimates would cover only one branch of the execution); 1. (_optional_) provide callbacks for start/end of quantum operations (if not provided, all operations will be treated @@ -17,8 +18,6 @@ To run against the tracer, the quantum program should comply with the 1. (_optional_) provide description of mapping for frame tracking; 1. (_optional_) provide names of operations for output (in the form of `tracer-config.hpp|cpp` files). -The last provisions - The Resource Tracer will consist of: 1. the bridge for the `__quantum__qis__*` methods listed below; From 13db7a9c70b9ffbefcc9155cbf68fc2f53802573 Mon Sep 17 00:00:00 2001 From: Irina Yatsenko Date: Thu, 18 Feb 2021 14:14:22 -0800 Subject: [PATCH 25/27] Post-merge build fixes, remove unnecessary now factory stub --- src/QirRuntime/lib/Tracer/README.md | 16 ++++++++-------- src/QirRuntime/lib/Tracer/tracer-qis.cpp | 2 +- .../test/QIR-tracer/qir-tracer-driver.cpp | 2 +- src/QirRuntime/test/QIR-tracer/tracer-config.cpp | 13 ------------- 4 files changed, 10 insertions(+), 23 deletions(-) diff --git a/src/QirRuntime/lib/Tracer/README.md b/src/QirRuntime/lib/Tracer/README.md index 987a59d9050..0a11d46f895 100644 --- a/src/QirRuntime/lib/Tracer/README.md +++ b/src/QirRuntime/lib/Tracer/README.md @@ -44,11 +44,11 @@ Notice, that gate 9 is dropped because it cannot cross the barrier to be added i ### Definitions ### -___Time___ is an integer-valued function on all quantum operations in a program (gates, measurements, - qubits allocation/release). For each gate there are start and end times. For each qubit, there are times when the qubit - is allocated and released. Start time of a gate cannot be less than allocation time of any of the qubits the gate uses. - If two gates or measurements use the same qubit, one of the gates must have start time greater than or equal to the end - time of the other. +Each quantum operation in a program can be assigned an integer value, which we'll call its ___start time___. Some + operations might have non-zero duration, so they will also have ___end time___. For each qubit, there are also times + when the qubit is allocated and released. Start time of a gate cannot be less than allocation time of any of the qubits + the gate is using. If two gates or measurements use the same qubit, one of the gates must have start time greater than + or equal to the end time of the other. We'll call a particular assignment of times across a program its ___time function___. A sequentially executed quantum program can be assigned a trivial time function, when all quantum operations have duration of 1 and unique start times, ordered to match the flow of the program. Layering compresses the timeline by @@ -66,15 +66,15 @@ A sequential program can be trivially layered such that each layer contains exac zero time to execute, those gates can be added to a layer even if they act on the same qubit another gate in this layer is acting on and have to be executed sequentially within the layer. -_Definition_: A ___barrier___ is a layer that no operations can be added into. - - ### The Resource Tracer's Layering Algorithm ### As the tracer is executing a sequential quantum program, it will compute a time function and corresponding layering using the _conceptual_ algorithm, described below (aka "tetris algorithm"). The actual implementation of layering might be done differently, as long as the resulting layering is the same as if running the conceptual algorithm. +A ___barrier___ is a layer that acts as if it was containing all currently allocated qubits and no operation can be added + into it. + A user can inject _barriers_ by calling `__quantum__qis__global_barrier` function. The user can choose duration of a barrier which would affect start time of the following layers but no operations will be added to a barrier, independent of its duration. diff --git a/src/QirRuntime/lib/Tracer/tracer-qis.cpp b/src/QirRuntime/lib/Tracer/tracer-qis.cpp index 737807f1036..19422ca4ac3 100644 --- a/src/QirRuntime/lib/Tracer/tracer-qis.cpp +++ b/src/QirRuntime/lib/Tracer/tracer-qis.cpp @@ -4,7 +4,7 @@ #include #include "CoreTypes.hpp" -#include "qirTypes.hpp" +#include "QirTypes.hpp" #include "tracer.hpp" namespace Microsoft diff --git a/src/QirRuntime/test/QIR-tracer/qir-tracer-driver.cpp b/src/QirRuntime/test/QIR-tracer/qir-tracer-driver.cpp index 33dda3ccaea..ba2fd6e361a 100644 --- a/src/QirRuntime/test/QIR-tracer/qir-tracer-driver.cpp +++ b/src/QirRuntime/test/QIR-tracer/qir-tracer-driver.cpp @@ -7,7 +7,7 @@ #define CATCH_CONFIG_MAIN // This tells Catch to provide a main() - only do this in one cpp file #include "catch.hpp" -#include "context.hpp" +#include "QirContext.hpp" #include "tracer-config.hpp" #include "tracer.hpp" diff --git a/src/QirRuntime/test/QIR-tracer/tracer-config.cpp b/src/QirRuntime/test/QIR-tracer/tracer-config.cpp index 5843217d1c8..eab02e878af 100644 --- a/src/QirRuntime/test/QIR-tracer/tracer-config.cpp +++ b/src/QirRuntime/test/QIR-tracer/tracer-config.cpp @@ -14,16 +14,3 @@ namespace TracerUser const std::unordered_map g_operationNames = { {0, "X"}, {1, "CX"}, {2, "MCX"}, {3, "Y"}, {4, "CY"}, {5, "MCY"} /*etc.*/}; } - -namespace Microsoft -{ -namespace Quantum -{ - - std::unique_ptr CreateFullstateSimulator() - { - throw std::logic_error("Tracer should not instantiate full state simulator"); - } - -} // namespace Quantum -} // namespace Microsoft \ No newline at end of file From 7010f09919c38f53989341d38eb1632051c1fe1c Mon Sep 17 00:00:00 2001 From: Irina Yatsenko Date: Fri, 19 Feb 2021 15:25:44 -0800 Subject: [PATCH 26/27] readme: clarified the layering algorithm --- src/QirRuntime/lib/Tracer/README.md | 80 +++++++++++++------- src/QirRuntime/lib/Tracer/tracer-bridge.ll | 6 +- src/QirRuntime/lib/Tracer/tracer-qis.cpp | 2 +- src/QirRuntime/test/QIR-tracer/tracer-qir.ll | 4 +- 4 files changed, 57 insertions(+), 35 deletions(-) diff --git a/src/QirRuntime/lib/Tracer/README.md b/src/QirRuntime/lib/Tracer/README.md index 0a11d46f895..372126b136c 100644 --- a/src/QirRuntime/lib/Tracer/README.md +++ b/src/QirRuntime/lib/Tracer/README.md @@ -85,7 +85,11 @@ __Conditional execution on measurement results__: The Tracer will execute LLVM I conditionals into corresponding callbacks to the tracer. The tracer will add operations from _both branches_ into the layers it creates to compute the upper bound estimate. -Conditionals, measurements and operations, that open frames, inside conditional callbacks are _not_ supported. +The following operations are _not_ supported inside conditional callbacks and would cause a runtime failure: + +- nested conditional callbacks; +- measurements; +- opening and closing operations of tracked frames (if tracking is set up). __Caching__ (lower priority): It might be a huge perf win if the Resource Tracer could cache statistics for repeated computations. The Tracer will have an option to cache layering results per quantum module if the boundaries of modules @@ -95,24 +99,33 @@ __Caching__ (lower priority): It might be a huge perf win if the Resource Tracer Note: The tracer assumes that the preferred layer duration is _P_. -1. The first encountered operation of __non-zero__ duration _N_ is added into layer _L(0, max(P,N))_. The value - of _conditional barrier_ variable on the tracer is set to 0. -1. When conditional callback is encountered, the layer _L(t,N)_ of the measurement that produced the result the conditional - is dependent on, is looked up and the _conditional barrier_ is set to _t + N_. At the end of the conditional scope +1. The first encountered operation of duration _N_, where either _N > 0_ or the operation involves multiple qubits, is + added into layer _L(0, max(P,N))_. The value of _conditional barrier_ variable on the tracer is set to 0. +1. When conditional callback is encountered, the layer _L(t,N)_ of the measurement that produced the result used in the + conditional callback, is looked up and the _conditional barrier_ is set to _t + N_. At the end of the conditional callback _conditional barrier_ is reset to 0. (Effectively, no operations, conditioned on the result of a measurement, can happen before or in the same layer as the measurement, even if they don't involve the measured qubits.) - TODO: is it OK for later operations to be added to the layers with ops _inside_ conditional branches? 1. Suppose, there are already layers _L(0,N0), ... , L(k,Nk)_ and the operation being executed is a single-qubit _op_ of duration __0__ (controlled and multi-qubit operations of duration 0 are treated the same as non-zero operations). - Starting at _L(k, Nk)_ and scanning backwards to _L(conditional barrier, Nb)_ find the _first_ layer that contains an - operation that acts on the qubit of _op_. Add _op_ into this layer. If no such layer is found, add _op_ to the list of - pending operations on the qubit. At the end of the program still pending operations are ignored. + + - Scan from [boundaries included] _L(k,Nk)_ to _L(conditional barrier, Nb)_ until find a layer _L(t,Nt)_ + such that _Q(t,Nt)_ contains the qubit of _op_. + - Add _op_ into this layer. + - If no such layer is found, add _op_ to the list of pending operations on the qubit. + - At the end of the program still pending operations will be ignored. + 1. Suppose, there are already layers _L(0,N0), ... , L(k,Nk)_ and the operation being executed is _op_ of duration _N > 0_ - or it involves more than one qubit. Starting at _L(k, Nk)_ and scanning backwards to _L(conditional barrier, Nb)_ find - the _last_ layer _L(t, Nt)_ such that _Qubits(t, Nt)_ don't contain any of the _op_'s qubits and find the _first_ layer - _L(w, Nw)_ such that Qubits(w, Nw) contains some of _op_'s qubits but Nw + N <= P. Add _op_ into one of the two layer - with later time. If neither such layers is found, add _op_ into a new layer _L(k+Nk, max(P, N))_. Add the pending - operations of all involved qubits into the same layer and clear the pending lists. + or it involves more than one qubit. + + - Scan from [boundaries included] _L(k,Nk)_ to _L(conditional barrier,Nb)_ until find a layer _L(w,Nw)_ + such that _Qubits(w,Nw)_ contain some of _op_'s qubits. + - If _L(w,Nw)_ is found and _op_ can be added into it without increasing the layer's duration, add _op_ into + _L(w,Nw)_, otherwise set _w = conditional barrier_. + - If _op_ hasn't been added to a layer, scan from [boundaries included] _L(w,Nw)_ to _L(k,Nk)_ until find + a layer _L(t,Nt)_ such that _Qubits(t, Nt)_ don't contain any of the _op_'s qubits and _N <= Nt_. + - If _L(t,Nt)_ is found, add _op_ into this layer. + - If _op_ hasn't been added to a layer, add _op_ into a new layer _L(k+Nk, max(P, N))_. + - Add the pending operations of all involved qubits into the same layer and clear the pending lists. ## Special handling of SWAP ## @@ -145,22 +158,31 @@ __TBD__: C++ definitions of the structure above + the interface to register fram The tracer will have options to output the estimates into command line or into a file, specified by the user. In both cases the output will be in the same format: -- Tab separated, where: +- column separator is configurable (the regex expressions below use comma as separator) +- the first column specifies the time _t_ of a layer _L(t, n)_ or of a barrier +- the second column contains the optional name of the layer or the barrier +- the remaining columns contain counts per operation in the layer (all zeros in case of a barrier) - - the first column specifies the time _t_ of a layer _L(t, n)_ - - the second column contains an optional name of the layer, that corresponds to a global barrier - - the remaining columns contain counts per operation in the layer - -- The first row is a header row: `layer_id\tname(\t[a-zA-Z]+)*`, where specific operation names are listed, such as - CNOT, Mz, etc., if provided by the user (if not provided, the header row will list operation ids). -- All following rows contain statistics per layer: `[0-9]+\t[a-zA-Z]*(\t([0-9]*))*`. +- The first row is a header row: `layer_id,name(,[0-9a-zA-Z]+)*`. The fragment `(,[0-9a-zA-Z]+)*` lists operation + names or their ids if the names weren't provided by the user. +- The following rows contain statistics per layer: `[0-9]+,[a-zA-Z]*(,([0-9]*))*`. - The rows are sorted in order of increasing layer time. -- Zero counts for any of the statistics _might_ be replaced with empty string. -- The global barrier layer lists the name and no statistics. +- Zero counts for the statistics _can_ be replaced with empty string. The map of operation ids to names can be passed to the tracer's constructor as `std::unordered_map`. The mapping can be partial, ids will be used in the ouput for unnamed operations. +Example of valid output: + +```csv +layer_id,name,Y,Z,5 +0,,0,1,0 +1,,0,0,1 +2,b,0,0,0 +4,,0,1,0 +8,,1,0,0 +``` + ## Depth vs width optimizations ## TBD but lower priority. @@ -169,15 +191,15 @@ TBD but lower priority. | Signature | Description | | :---------------------------------------------------- | :----------------------------------------------------------- | -| `void __quantum__qis__inject_global_barrier(i32 %id, i32 %duration)` | Function to insert a global barrier between layers. The first argument is the id of the barrier and the second item specifies the duration of the barrier. See [Layering](#layering) section for details. | +| `void __quantum__qis__inject_barrier(i32 %id, i32 %duration)` | Function to insert a barrier. The first argument is the id of the barrier that can be used to map it to a user-friendly name in the output and the second argument specifies the duration of the barrier. See [Layering](#layering) section for details. | | `void __quantum__qis__on_module_start(i64 %id)` | Function to identify the start of a quantum module. The argument is a unique _id_ of the module. The tracer will have an option to treat module boundaries as barriers between layers and (_lower priority_) option to cache estimates for a module, executed multiple times. For example, a call to the function might be inserted into QIR, generated by the Q# compiler, immediately before the body code of a Q# `operation`. | | `void __quantum__qis__on_module_end(i64 %id)` | Function to identify the end of a quantum module. The argument is a unique _id_ of the module and must match the _id_ supplied on start of the module. For example, a call to the function might be inserted into QIR, generated by the Q# compiler, immediately after the body code of a Q# `operation`. | | `void __quantum__qis__single_qubit_op(i32 %id, i32 %duration, %Qubit* %q)` | Function for counting operations that involve a single qubit. The first argument is the id of the operation. Multiple intrinsics can be assigned the same id, in which case they will be counted together. The second argument is duration to be assigned to the particular invocation of the operation. | | `void __quantum__qis__multi_qubit_op(i32 %id, i32 %duration, %Array* %qs)` | Function for counting operations that involve multiple qubits.| -| `void __quantum__qis__single_qubit_op__ctl(i32 %id, i32 %duration, %Array* %ctls, %Qubit* %q)` | Function for counting controlled operations with single target qubit. | -| `void __quantum__qis__multi_qubit_op__ctl(i32 %id, i32 %duration, %Array* %ctls, %Array* %qs)` | Function for counting controlled operations with multiple target qubits. | -| `%Result* @__quantum__qis__single_qubit_measure(i32 %id, i32 %duration, %Qubit* %q)` | Function for counting measurements of a single qubit. The user might assign different operation ids for different measurement bases. | -| `%Result* @__quantum__qis__joint_measure(i32 %id, i32 %duration, %Array* %qs)` | Function for counting joint-measurements of qubits. The user might assign different operation ids for different measurement bases. | +| `void __quantum__qis__single_qubit_op__ctl(i32 %id, i32 %duration, %Array* %ctls, %Qubit* %q)` | Function for counting controlled operations with single target qubit and `%ctls` array of controls. | +| `void __quantum__qis__multi_qubit_op__ctl(i32 %id, i32 %duration, %Array* %ctls, %Array* %qs)` | Function for counting controlled operations with multiple target qubits and `%ctls` array of controls. | +| `%Result* @__quantum__qis__single_qubit_measure(i32 %id, i32 %duration, %Qubit* %q)` | Function for counting measurements of a single qubit. The user can assign different operation ids for different measurement bases. | +| `%Result* @__quantum__qis__joint_measure(i32 %id, i32 %duration, %Array* %qs)` | Function for counting joint-measurements of qubits. The user can assign different operation ids for different measurement bases. | | `void __quantum__qis__swap(%Qubit* %q1, %Qubit* %q2)` | See [Special handling of SWAP](#special-handling-of-swap) for details. | | TODO: handling of conditionals on measurement results | | diff --git a/src/QirRuntime/lib/Tracer/tracer-bridge.ll b/src/QirRuntime/lib/Tracer/tracer-bridge.ll index b851de0bf43..1f0fa216c76 100644 --- a/src/QirRuntime/lib/Tracer/tracer-bridge.ll +++ b/src/QirRuntime/lib/Tracer/tracer-bridge.ll @@ -25,7 +25,7 @@ declare void @quantum__qis__single_qubit_op(i32 %id, i32 %duration, %class.QUBIT declare void @quantum__qis__single_qubit_op_ctl(i32 %id, i32 %duration, %struct.QirArray*, %class.QUBIT*) declare void @quantum__qis__multi_qubit_op(i32 %id, i32 %duration, %struct.QirArray*) declare void @quantum__qis__multi_qubit_op_ctl(i32 %id, i32 %duration, %struct.QirArray*, %struct.QirArray*) -declare void @quantum__qis__inject_global_barrier(i32 %id, i32 %duration) +declare void @quantum__qis__inject_barrier(i32 %id, i32 %duration) declare %class.RESULT* @quantum__qis__single_qubit_measure(i32 %id, i32 %duration, %class.QUBIT*) declare %class.RESULT* @quantum__qis__joint_measure(i32 %id, i32 %duration, %struct.QirArray*) @@ -62,9 +62,9 @@ define void @__quantum__qis__multi_qubit_op_ctl(i32 %id, i32 %duration, %Array* ret void } -define void @__quantum__qis__inject_global_barrier(i32 %id, i32 %duration) +define void @__quantum__qis__inject_barrier(i32 %id, i32 %duration) { - call void @quantum__qis__inject_global_barrier(i32 %id, i32 %duration) + call void @quantum__qis__inject_barrier(i32 %id, i32 %duration) ret void } diff --git a/src/QirRuntime/lib/Tracer/tracer-qis.cpp b/src/QirRuntime/lib/Tracer/tracer-qis.cpp index 19422ca4ac3..ed7a5eeb370 100644 --- a/src/QirRuntime/lib/Tracer/tracer-qis.cpp +++ b/src/QirRuntime/lib/Tracer/tracer-qis.cpp @@ -49,7 +49,7 @@ extern "C" reinterpret_cast(targets->buffer)); } - void quantum__qis__inject_global_barrier(int32_t id, int32_t duration) // NOLINT + void quantum__qis__inject_barrier(int32_t id, int32_t duration) // NOLINT { (void)tracer->InjectGlobalBarrier(id, duration); } diff --git a/src/QirRuntime/test/QIR-tracer/tracer-qir.ll b/src/QirRuntime/test/QIR-tracer/tracer-qir.ll index ea46d25e9a8..73d9cf34372 100644 --- a/src/QirRuntime/test/QIR-tracer/tracer-qir.ll +++ b/src/QirRuntime/test/QIR-tracer/tracer-qir.ll @@ -895,7 +895,7 @@ entry: %23 = bitcast i8* %22 to %Qubit** %qb__9 = load %Qubit*, %Qubit** %23 call void @__quantum__qis__single_qubit_op(i64 11, i64 1, %Qubit* %qb__9) - call void @__quantum__qis__inject_global_barrier(i64 42, i64 1) + call void @__quantum__qis__inject_barrier(i64 42, i64 1) %24 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 0) %25 = bitcast i8* %24 to %Qubit** %qb__11 = load %Qubit*, %Qubit** %25 @@ -1181,7 +1181,7 @@ declare %Qubit* @__quantum__rt__qubit_allocate() declare %Array* @__quantum__rt__qubit_allocate_array(i64) -declare void @__quantum__qis__inject_global_barrier(i64, i64) +declare void @__quantum__qis__inject_barrier(i64, i64) declare void @__quantum__rt__qubit_release(%Qubit*) From b0f4dd0c1f9218650c86926808d20f0af259c02d Mon Sep 17 00:00:00 2001 From: Irina Yatsenko Date: Fri, 19 Feb 2021 17:49:51 -0800 Subject: [PATCH 27/27] Readme: finishing touches --- src/QirRuntime/lib/Tracer/README.md | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/src/QirRuntime/lib/Tracer/README.md b/src/QirRuntime/lib/Tracer/README.md index 372126b136c..3ba42c9ca35 100644 --- a/src/QirRuntime/lib/Tracer/README.md +++ b/src/QirRuntime/lib/Tracer/README.md @@ -100,16 +100,16 @@ __Caching__ (lower priority): It might be a huge perf win if the Resource Tracer Note: The tracer assumes that the preferred layer duration is _P_. 1. The first encountered operation of duration _N_, where either _N > 0_ or the operation involves multiple qubits, is - added into layer _L(0, max(P,N))_. The value of _conditional barrier_ variable on the tracer is set to 0. + added into layer _L(0, max(P,N))_. The value of _conditional fence_ variable on the tracer is set to 0. 1. When conditional callback is encountered, the layer _L(t,N)_ of the measurement that produced the result used in the - conditional callback, is looked up and the _conditional barrier_ is set to _t + N_. At the end of the conditional callback - _conditional barrier_ is reset to 0. (Effectively, no operations, conditioned on the result of a measurement, can happen + conditional callback, is looked up and the _conditional fence_ is set to _t + N_. At the end of the conditional callback + _conditional fence_ is reset to 0. (Effectively, no operations, conditioned on the result of a measurement, can happen before or in the same layer as the measurement, even if they don't involve the measured qubits.) 1. Suppose, there are already layers _L(0,N0), ... , L(k,Nk)_ and the operation being executed is a single-qubit _op_ of duration __0__ (controlled and multi-qubit operations of duration 0 are treated the same as non-zero operations). - - Scan from [boundaries included] _L(k,Nk)_ to _L(conditional barrier, Nb)_ until find a layer _L(t,Nt)_ - such that _Q(t,Nt)_ contains the qubit of _op_. + - Scan from [boundaries included] _L(k,Nk)_ to _L(conditional fence,Nf)_ until find a layer _L(t,Nt)_ + such that _Qubits(t,Nt)_ contains the qubit of _op_. - Add _op_ into this layer. - If no such layer is found, add _op_ to the list of pending operations on the qubit. - At the end of the program still pending operations will be ignored. @@ -117,15 +117,15 @@ Note: The tracer assumes that the preferred layer duration is _P_. 1. Suppose, there are already layers _L(0,N0), ... , L(k,Nk)_ and the operation being executed is _op_ of duration _N > 0_ or it involves more than one qubit. - - Scan from [boundaries included] _L(k,Nk)_ to _L(conditional barrier,Nb)_ until find a layer _L(w,Nw)_ + - Scan from [boundaries included] _L(k,Nk)_ to _L(conditional fence,Nf)_ until find a layer _L(w,Nw)_ such that _Qubits(w,Nw)_ contain some of _op_'s qubits. - If _L(w,Nw)_ is found and _op_ can be added into it without increasing the layer's duration, add _op_ into - _L(w,Nw)_, otherwise set _w = conditional barrier_. + _L(w,Nw)_, otherwise set _L(w,Nw) = L(conditional fence,Nf)_. - If _op_ hasn't been added to a layer, scan from [boundaries included] _L(w,Nw)_ to _L(k,Nk)_ until find - a layer _L(t,Nt)_ such that _Qubits(t, Nt)_ don't contain any of the _op_'s qubits and _N <= Nt_. + a layer _L(t,Nt)_ such that _N <= Nt_ (notice, that this layer cannot contain any qubits from _op_). - If _L(t,Nt)_ is found, add _op_ into this layer. - If _op_ hasn't been added to a layer, add _op_ into a new layer _L(k+Nk, max(P, N))_. - - Add the pending operations of all involved qubits into the same layer and clear the pending lists. + - Add the pending operations of all _op_'s qubits into the same layer and clear the pending lists of these qubits. ## Special handling of SWAP ##