diff --git a/src/QirRuntime/CMakeLists.txt b/src/QirRuntime/CMakeLists.txt index 28ea5e8c349..63221cd4d5b 100644 --- a/src/QirRuntime/CMakeLists.txt +++ b/src/QirRuntime/CMakeLists.txt @@ -114,12 +114,13 @@ endmacro(compile_from_qir) if (WIN32) set(QIR_BRIDGE_UTILITY_LIB "${PROJECT_BINARY_DIR}/lib/QIR/bridge-rt-u.lib") set(QIR_BRIDGE_QIS_UTILITY_LIB "${PROJECT_BINARY_DIR}/lib/QIR/bridge-qis-u.lib") + set(QIR_BRIDGE_TRACER_UTILITY_LIB "${PROJECT_BINARY_DIR}/lib/Tracer/tracer-bridge-u.lib") else() set(QIR_BRIDGE_UTILITY_LIB "${PROJECT_BINARY_DIR}/lib/QIR/libbridge-rt-u.a") set(QIR_BRIDGE_QIS_UTILITY_LIB "${PROJECT_BINARY_DIR}/lib/QIR/libbridge-qis-u.a") + set(QIR_BRIDGE_TRACER_UTILITY_LIB "${PROJECT_BINARY_DIR}/lib/Tracer/libtracer-bridge-u.a") endif() - add_subdirectory(lib) add_subdirectory(test) diff --git a/src/QirRuntime/lib/CMakeLists.txt b/src/QirRuntime/lib/CMakeLists.txt index 789d0ba7503..ad1361d4074 100644 --- a/src/QirRuntime/lib/CMakeLists.txt +++ b/src/QirRuntime/lib/CMakeLists.txt @@ -1,3 +1,4 @@ add_subdirectory(QIR) add_subdirectory(Simulators) +add_subdirectory(Tracer) add_subdirectory(qdk) diff --git a/src/QirRuntime/lib/Tracer/CMakeLists.txt b/src/QirRuntime/lib/Tracer/CMakeLists.txt new file mode 100644 index 00000000000..78b5adf145b --- /dev/null +++ b/src/QirRuntime/lib/Tracer/CMakeLists.txt @@ -0,0 +1,20 @@ +# build the utility lib for tracer's bridge +compile_from_qir(tracer-bridge tracer-bridge) + +# build the native part of the tracer +set(component_name "tracer") + +set(source_files + "tracer-qis.cpp" + "tracer.cpp" +) + +set(includes + "${public_includes}" + "${PROJECT_SOURCE_DIR}/lib/QIR" +) + +add_library(${component_name} STATIC ${source_files}) +target_include_directories(${component_name} PUBLIC ${includes}) + +add_dependencies(${component_name} tracer-bridge) diff --git a/src/QirRuntime/lib/Tracer/README.md b/src/QirRuntime/lib/Tracer/README.md new file mode 100644 index 00000000000..3ba42c9ca35 --- /dev/null +++ b/src/QirRuntime/lib/Tracer/README.md @@ -0,0 +1,214 @@ +# Resource Tracer Design Document # + +The purpose of the Resource Tracer is to provide efficient and flexible way to estimate resources of a quantum program + in QIR representation. The estimates are calculated by simulating execution of the program (as opposed to the static + analysis). Please see [Resource Estimator](https://docs.microsoft.com/en-us/azure/quantum/user-guide/machines/resources-estimator) + for more background on resource estimation for quantum programs. + +To run against the tracer, the quantum program should comply with the + [QIR specifications](https://github.com/microsoft/qsharp-language/tree/main/Specifications/QIR) as well as: + +1. convert _each_ used intrinsic operation into one of the Quantum Instruction Set (_qis_) operations supported by the + tracer (see the last section of this readme); +1. (_optional_) provide callbacks for handling of conditional branches on a measurement (if not provided, the estimates + would cover only one branch of the execution); +1. (_optional_) provide callbacks for start/end of quantum operations (if not provided, all operations will be treated + as inlined as if the whole program consisted of a single operation); +1. (_optional_) provide callbacks for global barriers; +1. (_optional_) provide description of mapping for frame tracking; +1. (_optional_) provide names of operations for output (in the form of `tracer-config.hpp|cpp` files). + +The Resource Tracer will consist of: + +1. the bridge for the `__quantum__qis__*` methods listed below; +2. the native implementation to back the `__quantum__qis__*` methods; +3. the logic for partitioning gates into layers; +4. the logic for frame tracking; +5. output of the collected statistics; +6. (_lower priority_) the scheduling component to optimize depth and/or width of the circuit. + +## Layering ## + +One of the goals of the tracer is to compute which of the quantum operations can be executed in parallel. Further in + this section we provide the defintions of used concepts and the description of how we group the operations into + _layers_, however, we hope that the following example of layering is intuitively clear. + +### Example of layering ### + +The diagram below shows an example of how a sequential program, represented by the left circuit, could be layered. The gates in light gray are of duration zero, the preferrred layer duration is 1, and the barrier, + represented by a vertical squiggle, is set to have duration 0. + +![layering example](layering_example.png?raw=true "Layering example diagram") + +Notice, that gate 9 is dropped because it cannot cross the barrier to be added into L(2,1). + +### Definitions ### + +Each quantum operation in a program can be assigned an integer value, which we'll call its ___start time___. Some + operations might have non-zero duration, so they will also have ___end time___. For each qubit, there are also times + when the qubit is allocated and released. Start time of a gate cannot be less than allocation time of any of the qubits + the gate is using. If two gates or measurements use the same qubit, one of the gates must have start time greater than + or equal to the end time of the other. We'll call a particular assignment of times across a program its ___time function___. + +A sequentially executed quantum program can be assigned a trivial time function, when all quantum operations have + duration of 1 and unique start times, ordered to match the flow of the program. Layering compresses the timeline by + assuming that some operations might be executed simultaneously while allowing for different operations to have various + durations. + +Provided a valid _time_ function for the program a ___layer of duration N at time T, denoted as L(T,N),___ + is a subset of operations in the program such that all of these operations have start time greater or equal _T_ and + finish time less than _T + N_. The program is ___layered___ if all gates in it are partitioned into layers, that don't + overlap in time. The union of all qubits that are involved in operations of a given layer, will be denoted _Qubits(T,N)_. + +A sequential program can be trivially layered such that each layer contains exactly one operation. Notice, that the + definition of layer doesn't require the gates to be executed _in parallel_. For example, all gates in a fully sequential + program can be also placed into a single layer L(0, infinity). Some gates might be considered to be very cheap and take + zero time to execute, those gates can be added to a layer even if they act on the same qubit another gate in this layer + is acting on and have to be executed sequentially within the layer. + +### The Resource Tracer's Layering Algorithm ### + +As the tracer is executing a sequential quantum program, it will compute a time function and corresponding layering + using the _conceptual_ algorithm, described below (aka "tetris algorithm"). The actual implementation of layering might + be done differently, as long as the resulting layering is the same as if running the conceptual algorithm. + +A ___barrier___ is a layer that acts as if it was containing all currently allocated qubits and no operation can be added + into it. + +A user can inject _barriers_ by calling `__quantum__qis__global_barrier` function. The user can choose duration of + a barrier which would affect start time of the following layers but no operations will be added to a barrier, + independent of its duration. + +__Conditional execution on measurement results__: The Tracer will execute LLVM IR's branching structures "as is", + depending on the values of the corresponding variables at runtime. To enable estimation of branches that depend on a + measurement result, the source Q# program will have to be authored in such a way that the Q# compiler will translate the + conditionals into corresponding callbacks to the tracer. The tracer will add operations from _both branches_ into the + layers it creates to compute the upper bound estimate. + +The following operations are _not_ supported inside conditional callbacks and would cause a runtime failure: + +- nested conditional callbacks; +- measurements; +- opening and closing operations of tracked frames (if tracking is set up). + +__Caching__ (lower priority): It might be a huge perf win if the Resource Tracer could cache statistics for repeated + computations. The Tracer will have an option to cache layering results per quantum module if the boundaries of modules + are treated as barriers. + +#### The conceptual algorithm #### + +Note: The tracer assumes that the preferred layer duration is _P_. + +1. The first encountered operation of duration _N_, where either _N > 0_ or the operation involves multiple qubits, is + added into layer _L(0, max(P,N))_. The value of _conditional fence_ variable on the tracer is set to 0. +1. When conditional callback is encountered, the layer _L(t,N)_ of the measurement that produced the result used in the + conditional callback, is looked up and the _conditional fence_ is set to _t + N_. At the end of the conditional callback + _conditional fence_ is reset to 0. (Effectively, no operations, conditioned on the result of a measurement, can happen + before or in the same layer as the measurement, even if they don't involve the measured qubits.) +1. Suppose, there are already layers _L(0,N0), ... , L(k,Nk)_ and the operation being executed is a single-qubit _op_ of + duration __0__ (controlled and multi-qubit operations of duration 0 are treated the same as non-zero operations). + + - Scan from [boundaries included] _L(k,Nk)_ to _L(conditional fence,Nf)_ until find a layer _L(t,Nt)_ + such that _Qubits(t,Nt)_ contains the qubit of _op_. + - Add _op_ into this layer. + - If no such layer is found, add _op_ to the list of pending operations on the qubit. + - At the end of the program still pending operations will be ignored. + +1. Suppose, there are already layers _L(0,N0), ... , L(k,Nk)_ and the operation being executed is _op_ of duration _N > 0_ + or it involves more than one qubit. + + - Scan from [boundaries included] _L(k,Nk)_ to _L(conditional fence,Nf)_ until find a layer _L(w,Nw)_ + such that _Qubits(w,Nw)_ contain some of _op_'s qubits. + - If _L(w,Nw)_ is found and _op_ can be added into it without increasing the layer's duration, add _op_ into + _L(w,Nw)_, otherwise set _L(w,Nw) = L(conditional fence,Nf)_. + - If _op_ hasn't been added to a layer, scan from [boundaries included] _L(w,Nw)_ to _L(k,Nk)_ until find + a layer _L(t,Nt)_ such that _N <= Nt_ (notice, that this layer cannot contain any qubits from _op_). + - If _L(t,Nt)_ is found, add _op_ into this layer. + - If _op_ hasn't been added to a layer, add _op_ into a new layer _L(k+Nk, max(P, N))_. + - Add the pending operations of all _op_'s qubits into the same layer and clear the pending lists of these qubits. + +## Special handling of SWAP ## + +The tracer will provide a way to handle SWAP as, effectively, renaming of the involved qubits. The users will have the + choice of using the special handling versus treating the gate as a standard counted intrinsic. + +## Frame tracking ## + +A user might want to count differently operations that are applied in a different state. For example, if Hadamard gate + is applied to a qubit and then Rz gate, a user might want to count it as if Rz were executed instead. + The frame is closed when the state of the qubit is reset (in Hadamard's case, another Hadamard operator is applied to + the qubit). The user will be able to register the required frame tracking with the tracer via a C++ registration + callback. + +The descriptor of the frame will contain the following information and will be provided to the Tracer when initializing + it in C++. + +- openingOp: the operation id that opens the frame on the qubits this operation is applied to +- closingOp: the operation id that closes the frame on the qubits this operation is applied to +- vector of: { bitmask_ctls, bitmask_targets, operationIdOriginal, operationIdMapped } + +The closing operation will be ignored if the frame on the qubit hasn't been open. The bitmasks define which of the qubits + should be in an open frame to trigger the mapping. For non-controlled operations the first mask will be ignored. To + begin with, the tracer will support frame mapping for up to 8 control/target qubits. + +__TBD__: C++ definitions of the structure above + the interface to register frame tracking with the Tracer. + +## Output format ## + +The tracer will have options to output the estimates into command line or into a file, specified by the user. In both + cases the output will be in the same format: + +- column separator is configurable (the regex expressions below use comma as separator) +- the first column specifies the time _t_ of a layer _L(t, n)_ or of a barrier +- the second column contains the optional name of the layer or the barrier +- the remaining columns contain counts per operation in the layer (all zeros in case of a barrier) + +- The first row is a header row: `layer_id,name(,[0-9a-zA-Z]+)*`. The fragment `(,[0-9a-zA-Z]+)*` lists operation + names or their ids if the names weren't provided by the user. +- The following rows contain statistics per layer: `[0-9]+,[a-zA-Z]*(,([0-9]*))*`. +- The rows are sorted in order of increasing layer time. +- Zero counts for the statistics _can_ be replaced with empty string. + +The map of operation ids to names can be passed to the tracer's constructor as `std::unordered_map`. + The mapping can be partial, ids will be used in the ouput for unnamed operations. + +Example of valid output: + +```csv +layer_id,name,Y,Z,5 +0,,0,1,0 +1,,0,0,1 +2,b,0,0,0 +4,,0,1,0 +8,,1,0,0 +``` + +## Depth vs width optimizations ## + +TBD but lower priority. + +## List of `__quantum__qis__*` methods, supported by the Tracer ## + +| Signature | Description | +| :---------------------------------------------------- | :----------------------------------------------------------- | +| `void __quantum__qis__inject_barrier(i32 %id, i32 %duration)` | Function to insert a barrier. The first argument is the id of the barrier that can be used to map it to a user-friendly name in the output and the second argument specifies the duration of the barrier. See [Layering](#layering) section for details. | +| `void __quantum__qis__on_module_start(i64 %id)` | Function to identify the start of a quantum module. The argument is a unique _id_ of the module. The tracer will have an option to treat module boundaries as barriers between layers and (_lower priority_) option to cache estimates for a module, executed multiple times. For example, a call to the function might be inserted into QIR, generated by the Q# compiler, immediately before the body code of a Q# `operation`. | +| `void __quantum__qis__on_module_end(i64 %id)` | Function to identify the end of a quantum module. The argument is a unique _id_ of the module and must match the _id_ supplied on start of the module. For example, a call to the function might be inserted into QIR, generated by the Q# compiler, immediately after the body code of a Q# `operation`. | +| `void __quantum__qis__single_qubit_op(i32 %id, i32 %duration, %Qubit* %q)` | Function for counting operations that involve a single qubit. The first argument is the id of the operation. Multiple intrinsics can be assigned the same id, in which case they will be counted together. The second argument is duration to be assigned to the particular invocation of the operation. | +| `void __quantum__qis__multi_qubit_op(i32 %id, i32 %duration, %Array* %qs)` | Function for counting operations that involve multiple qubits.| +| `void __quantum__qis__single_qubit_op__ctl(i32 %id, i32 %duration, %Array* %ctls, %Qubit* %q)` | Function for counting controlled operations with single target qubit and `%ctls` array of controls. | +| `void __quantum__qis__multi_qubit_op__ctl(i32 %id, i32 %duration, %Array* %ctls, %Array* %qs)` | Function for counting controlled operations with multiple target qubits and `%ctls` array of controls. | +| `%Result* @__quantum__qis__single_qubit_measure(i32 %id, i32 %duration, %Qubit* %q)` | Function for counting measurements of a single qubit. The user can assign different operation ids for different measurement bases. | +| `%Result* @__quantum__qis__joint_measure(i32 %id, i32 %duration, %Array* %qs)` | Function for counting joint-measurements of qubits. The user can assign different operation ids for different measurement bases. | +| `void __quantum__qis__swap(%Qubit* %q1, %Qubit* %q2)` | See [Special handling of SWAP](#special-handling-of-swap) for details. | +| TODO: handling of conditionals on measurement results | | + +_Note on operation ids_: The user is responsible for using operation ids in a consistent manner. Operations with the + same id will be counted by the tracer as the _same_ operation, even accross invocations with different number of target + qubits or when different functors are applied. + +_Note on mapping Q# intrinsics to the methods above_: Q# compiler will support Tracer as a special target and will let + the user to either choose some default mapping or specify their custom mapping. For example, see QIR-tracer tests in + this project (`tracer-target.qs` specifies the mapping). + +The Resource Tracer will reuse qir-rt library while implementing the qis methods specified above. diff --git a/src/QirRuntime/lib/Tracer/layering_example.png b/src/QirRuntime/lib/Tracer/layering_example.png new file mode 100644 index 00000000000..5713843d859 Binary files /dev/null and b/src/QirRuntime/lib/Tracer/layering_example.png differ diff --git a/src/QirRuntime/lib/Tracer/tracer-bridge.ll b/src/QirRuntime/lib/Tracer/tracer-bridge.ll new file mode 100644 index 00000000000..1f0fa216c76 --- /dev/null +++ b/src/QirRuntime/lib/Tracer/tracer-bridge.ll @@ -0,0 +1,85 @@ +; Copyright (c) Microsoft Corporation. +; Licensed under the MIT License. + +;======================================================================================================================= +; QIR types +; +%Array = type opaque +%Qubit = type opaque +%Result = type opaque + + +;======================================================================================================================= +; Native types +; +%class.QUBIT = type opaque +%class.RESULT = type opaque +%struct.QirArray = type opaque + + +;=============================================================================== +; declarations of the native methods this bridge delegates to +; + +declare void @quantum__qis__single_qubit_op(i32 %id, i32 %duration, %class.QUBIT*) +declare void @quantum__qis__single_qubit_op_ctl(i32 %id, i32 %duration, %struct.QirArray*, %class.QUBIT*) +declare void @quantum__qis__multi_qubit_op(i32 %id, i32 %duration, %struct.QirArray*) +declare void @quantum__qis__multi_qubit_op_ctl(i32 %id, i32 %duration, %struct.QirArray*, %struct.QirArray*) +declare void @quantum__qis__inject_barrier(i32 %id, i32 %duration) +declare %class.RESULT* @quantum__qis__single_qubit_measure(i32 %id, i32 %duration, %class.QUBIT*) +declare %class.RESULT* @quantum__qis__joint_measure(i32 %id, i32 %duration, %struct.QirArray*) + +;=============================================================================== +; quantum__trc namespace implementations +; +define void @__quantum__qis__single_qubit_op(i32 %id, i32 %duration, %Qubit* %.q) +{ + %q = bitcast %Qubit* %.q to %class.QUBIT* + call void @quantum__qis__single_qubit_op(i32 %id, i32 %duration, %class.QUBIT* %q) + ret void +} + +define void @__quantum__qis__single_qubit_op_ctl(i32 %id, i32 %duration, %Array* %.ctls, %Qubit* %.q) +{ + %q = bitcast %Qubit* %.q to %class.QUBIT* + %ctls = bitcast %Array* %.ctls to %struct.QirArray* + call void @quantum__qis__single_qubit_op_ctl(i32 %id, i32 %duration, %struct.QirArray* %ctls, %class.QUBIT* %q) + ret void +} + +define void @__quantum__qis__multi_qubit_op(i32 %id, i32 %duration, %Array* %.qs) +{ + %qs = bitcast %Array* %.qs to %struct.QirArray* + call void @quantum__qis__multi_qubit_op(i32 %id, i32 %duration, %struct.QirArray* %qs) + ret void +} + +define void @__quantum__qis__multi_qubit_op_ctl(i32 %id, i32 %duration, %Array* %.ctls, %Array* %.qs) +{ + %ctls = bitcast %Array* %.ctls to %struct.QirArray* + %qs = bitcast %Array* %.qs to %struct.QirArray* + call void @quantum__qis__multi_qubit_op_ctl(i32 %id, i32 %duration, %struct.QirArray* %ctls, %struct.QirArray* %qs) + ret void +} + +define void @__quantum__qis__inject_barrier(i32 %id, i32 %duration) +{ + call void @quantum__qis__inject_barrier(i32 %id, i32 %duration) + ret void +} + +define %Result* @__quantum__qis__single_qubit_measure(i32 %id, i32 %duration, %Qubit* %.q) +{ + %q = bitcast %Qubit* %.q to %class.QUBIT* + %r = call %class.RESULT* @quantum__qis__single_qubit_measure(i32 %id, i32 %duration, %class.QUBIT* %q) + %.r = bitcast %class.RESULT* %r to %Result* + ret %Result* %.r +} + +define %Result* @__quantum__qis__joint_measure(i32 %id, i32 %duration, %Array* %.qs) +{ + %qs = bitcast %Array* %.qs to %struct.QirArray* + %r = call %class.RESULT* @quantum__qis__joint_measure(i32 %id, i32 %duration, %struct.QirArray* %qs) + %.r = bitcast %class.RESULT* %r to %Result* + ret %Result* %.r +} \ No newline at end of file diff --git a/src/QirRuntime/lib/Tracer/tracer-qis.cpp b/src/QirRuntime/lib/Tracer/tracer-qis.cpp new file mode 100644 index 00000000000..ed7a5eeb370 --- /dev/null +++ b/src/QirRuntime/lib/Tracer/tracer-qis.cpp @@ -0,0 +1,66 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +#include + +#include "CoreTypes.hpp" +#include "QirTypes.hpp" +#include "tracer.hpp" + +namespace Microsoft +{ +namespace Quantum +{ + extern thread_local std::shared_ptr tracer; +} +} // namespace Microsoft + +using namespace Microsoft::Quantum; +extern "C" +{ + void quantum__qis__on_operation_start(int64_t id) // NOLINT + { + } + void quantum__qis__on_operation_end(int64_t id) // NOLINT + { + } + + void quantum__qis__swap(Qubit q1, Qubit q2) // NOLINT + { + } + + void quantum__qis__single_qubit_op(int32_t id, int32_t duration, Qubit target) // NOLINT + { + (void)tracer->TraceSingleQubitOp(id, duration, target); + } + void quantum__qis__single_qubit_op_ctl(int32_t id, int32_t duration, QirArray* ctls, Qubit target) // NOLINT + { + (void)tracer->TraceMultiQubitOp(id, duration, ctls->count, reinterpret_cast(ctls->buffer), 1, &target); + } + void quantum__qis__multi_qubit_op(int32_t id, int32_t duration, QirArray* targets) // NOLINT + { + (void)tracer->TraceMultiQubitOp( + id, duration, 0, nullptr, targets->count, reinterpret_cast(targets->buffer)); + } + void quantum__qis__multi_qubit_op_ctl(int32_t id, int32_t duration, QirArray* ctls, QirArray* targets) // NOLINT + { + (void)tracer->TraceMultiQubitOp( + id, duration, ctls->count, reinterpret_cast(ctls->buffer), targets->count, + reinterpret_cast(targets->buffer)); + } + + void quantum__qis__inject_barrier(int32_t id, int32_t duration) // NOLINT + { + (void)tracer->InjectGlobalBarrier(id, duration); + } + + RESULT* quantum__qis__single_qubit_measure(int32_t id, int32_t duration, QUBIT* q) // NOLINT + { + return tracer->TraceSingleQubitMeasurement(id, duration, q); + } + + RESULT* quantum__qis__joint_measure(int32_t id, int32_t duration, QirArray* qs) // NOLINT + { + return tracer->TraceMultiQubitMeasurement(id, duration, qs->count, reinterpret_cast(qs->buffer)); + } +} \ No newline at end of file diff --git a/src/QirRuntime/lib/Tracer/tracer.cpp b/src/QirRuntime/lib/Tracer/tracer.cpp new file mode 100644 index 00000000000..c9e906f4a29 --- /dev/null +++ b/src/QirRuntime/lib/Tracer/tracer.cpp @@ -0,0 +1,309 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +#include +#include +#include + +#include "tracer.hpp" + +using namespace std; + +namespace Microsoft +{ +namespace Quantum +{ + thread_local std::shared_ptr tracer = nullptr; + std::shared_ptr CreateTracer(int preferredLayerDuration) + { + tracer = std::make_shared(preferredLayerDuration); + return tracer; + } + std::shared_ptr CreateTracer(int preferredLayerDuration, const std::unordered_map& opNames) + { + tracer = std::make_shared(preferredLayerDuration, opNames); + return tracer; + } + + //------------------------------------------------------------------------------------------------------------------ + // CTracer's ISimulator implementation + //------------------------------------------------------------------------------------------------------------------ + Qubit CTracer::AllocateQubit() + { + size_t qubit = qubits.size(); + qubits.emplace_back(QubitState{}); + return reinterpret_cast(qubit); + } + + void CTracer::ReleaseQubit(Qubit /*qubit*/) + { + // nothing for now + } + + // TODO: what would be meaningful information we could printout for a qubit? + std::string CTracer::QubitToString(Qubit q) + { + size_t qubitIndex = reinterpret_cast(q); + const QubitState& qstate = this->UseQubit(q); + + stringstream str(std::to_string(qubitIndex)); + str << " last used in layer " << qstate.layer << "(pending zero ops: " << qstate.pendingZeroDurationOps.size() << ")"; + return str.str(); + } + + void CTracer::ReleaseResult(Result /*result*/) + { + // nothing to do, we don't allocate results on measurement [yet] + } + + // Although the tracer should never compare results or get their values, it still has to implement UseZero and + // UseOne methods as they are invoked by the QIR initialization. + Result CTracer::UseZero() + { + return reinterpret_cast(INVALID); + } + + Result CTracer::UseOne() + { + return reinterpret_cast(INVALID); + } + + //------------------------------------------------------------------------------------------------------------------ + // CTracer::CreateNewLayer + //------------------------------------------------------------------------------------------------------------------ + LayerId CTracer::CreateNewLayer(Duration minRequiredDuration) + { + // Create a new layer for the operation. + Time layerStartTime = 0; + if (!this->metricsByLayer.empty()) + { + const Layer& lastLayer = this->metricsByLayer.back(); + layerStartTime = lastLayer.startTime + lastLayer.duration; + } + this->metricsByLayer.emplace_back( + Layer {layerStartTime, max(this->preferredLayerDuration, minRequiredDuration)}); + + return this->metricsByLayer.size() - 1; + } + + //------------------------------------------------------------------------------------------------------------------ + // CTracer::FindLayerToInsertOperationInto + //------------------------------------------------------------------------------------------------------------------ + LayerId CTracer::FindLayerToInsertOperationInto(Qubit q, Duration opDuration) const + { + const QubitState& qstate = this->UseQubit(q); + + LayerId layerToInsertInto = INVALID; + + const LayerId firstLayerAfterBarrier = + this->globalBarrier == INVALID + ? this->metricsByLayer.empty() ? INVALID : 0 + : this->globalBarrier + 1 == this->metricsByLayer.size() ? INVALID : this->globalBarrier + 1; + + LayerId candidate = max(qstate.layer, firstLayerAfterBarrier); + + if (candidate != INVALID) + { + // Find the earliest layer that the operation fits in by duration + const Layer& candidateLayer = this->metricsByLayer[candidate]; + const Time lastUsedTime = max(qstate.lastUsedTime, candidateLayer.startTime); + if (lastUsedTime + opDuration <= candidateLayer.startTime + candidateLayer.duration) + { + layerToInsertInto = candidate; + } + else + { + for (candidate += 1; candidate < this->metricsByLayer.size(); ++candidate) + { + if (opDuration <= this->metricsByLayer[candidate].duration) + { + layerToInsertInto = candidate; + break; + } + } + } + } + else if (opDuration <= this->preferredLayerDuration) + { + layerToInsertInto = firstLayerAfterBarrier; + } + + return layerToInsertInto; + } + + //------------------------------------------------------------------------------------------------------------------ + // CTracer::AddOperationToLayer + //------------------------------------------------------------------------------------------------------------------ + void CTracer::AddOperationToLayer(OpId id, LayerId layer) + { + assert(layer < this->metricsByLayer.size()); + this->metricsByLayer[layer].operations[id] += 1; + } + + //------------------------------------------------------------------------------------------------------------------ + // CTracer::UpdateQubitState + //------------------------------------------------------------------------------------------------------------------ + void CTracer::UpdateQubitState(Qubit q, LayerId layer, Duration opDuration) + { + QubitState& qstate = this->UseQubit(q); + for (OpId idPending : qstate.pendingZeroDurationOps) + { + this->AddOperationToLayer(idPending, layer); + } + + // Update the qubit state. + qstate.layer = layer; + const Time layerStart = this->metricsByLayer[layer].startTime; + qstate.lastUsedTime = max(layerStart, qstate.lastUsedTime) + opDuration; + qstate.pendingZeroDurationOps.clear(); + } + + //------------------------------------------------------------------------------------------------------------------ + // CTracer::TraceSingleQubitOp + //------------------------------------------------------------------------------------------------------------------ + LayerId CTracer::TraceSingleQubitOp(OpId id, Duration opDuration, Qubit target) + { + this->seenOps.insert(id); + + QubitState& qstate = this->UseQubit(target); + if (opDuration == 0 && + (qstate.layer == INVALID || (this->globalBarrier != INVALID && qstate.layer < this->globalBarrier))) + { + qstate.pendingZeroDurationOps.push_back(id); + return INVALID; + } + + // Figure out the layer this operation should go into. + LayerId layerToInsertInto = this->FindLayerToInsertOperationInto(target, opDuration); + if (layerToInsertInto == INVALID) + { + layerToInsertInto = this->CreateNewLayer(opDuration); + } + + // Add the operation and the pending zero-duration ones into the layer. + this->AddOperationToLayer(id, layerToInsertInto); + this->UpdateQubitState(target, layerToInsertInto, opDuration); + + return layerToInsertInto; + } + + //------------------------------------------------------------------------------------------------------------------ + // CTracer::TraceControlledSingleQubitOp + //------------------------------------------------------------------------------------------------------------------ + LayerId CTracer::TraceMultiQubitOp( + OpId id, + Duration opDuration, + long nFirstGroup, + Qubit* firstGroup, + long nSecondGroup, + Qubit* secondGroup) + { + assert(nFirstGroup >= 0); + assert(nSecondGroup > 0); + + // Special-casing operations of duration zero enables potentially better reuse of qubits, when we'll start + // optimizing for circuit width. However, tracking _the same_ pending operation across _multiple_ qubits is + // tricky and not worth the effort, so we only do single qubit case. + if (opDuration == 0 && nFirstGroup == 0 && nSecondGroup == 1) + { + return this->TraceSingleQubitOp(id, opDuration, secondGroup[0]); + } + + this->seenOps.insert(id); + + // Figure out the layer this operation should go into. + LayerId layerToInsertInto = this->FindLayerToInsertOperationInto(secondGroup[0], opDuration); + for (long i = 1; i < nSecondGroup && layerToInsertInto != INVALID; i++) + { + layerToInsertInto = + max(layerToInsertInto, this->FindLayerToInsertOperationInto(secondGroup[i], opDuration)); + } + for (long i = 0; i < nFirstGroup && layerToInsertInto != INVALID; i++) + { + layerToInsertInto = max(layerToInsertInto, this->FindLayerToInsertOperationInto(firstGroup[i], opDuration)); + } + if (layerToInsertInto == INVALID) + { + layerToInsertInto = this->CreateNewLayer(opDuration); + } + + // Add the operation into the layer. + this->AddOperationToLayer(id, layerToInsertInto); + + // Update the state of the involved qubits. + for (long i = 0; i < nFirstGroup; i++) + { + this->UpdateQubitState(firstGroup[i], layerToInsertInto, opDuration); + } + for (long i = 0; i < nSecondGroup; i++) + { + this->UpdateQubitState(secondGroup[i], layerToInsertInto, opDuration); + } + + return layerToInsertInto; + } + + LayerId CTracer::InjectGlobalBarrier(OpId id, Duration duration) + { + LayerId layer = this->CreateNewLayer(duration); + this->metricsByLayer[layer].barrierId = id; + this->globalBarrier = layer; + return layer; + } + + Result CTracer::TraceSingleQubitMeasurement(OpId id, Duration duration, Qubit target) + { + LayerId layerId = this->TraceSingleQubitOp(id, duration, target); + return reinterpret_cast(layerId); + } + + Result CTracer::TraceMultiQubitMeasurement(OpId id, Duration duration, long nTargets, Qubit* targets) + { + LayerId layerId = this->TraceMultiQubitOp(id, duration, 0, nullptr, nTargets, targets); + return reinterpret_cast(layerId); + } + + //------------------------------------------------------------------------------------------------------------------ + // CTracer::PrintLayerMetrics + //------------------------------------------------------------------------------------------------------------------ + static std::string GetOperationName(OpId opId, const std::unordered_map& opNames) + { + if (opId < 0) + { + return ""; + } + + auto nameIt = opNames.find(opId); + return nameIt == opNames.end() ? std::to_string(opId) : nameIt->second; + } + void CTracer::PrintLayerMetrics(std::ostream& out, const std::string& separator, bool printZeroMetrics) const + { + // Sort the operations by id so the output is deterministic. + std::set seenOpsOrderedById(this->seenOps.begin(), this->seenOps.end()); + + // header row + out << "layer_id" << separator << "name"; + for (OpId opId : seenOpsOrderedById) + { + out << separator << GetOperationName(opId, this->opNames); + } + out << std::endl; + + // data rows + const std::string zeroString = printZeroMetrics ? "0" : ""; + for (const Layer& layer : this->metricsByLayer) + { + out << layer.startTime; + out << separator << GetOperationName(layer.barrierId, this->opNames); + + for (OpId opId : seenOpsOrderedById) + { + auto foundInLayer = layer.operations.find(opId); + out << separator + << ((foundInLayer == layer.operations.end()) ? zeroString : std::to_string(foundInLayer->second)); + } + out << std::endl; + } + } +} // namespace Quantum +} // namespace Microsoft \ No newline at end of file diff --git a/src/QirRuntime/lib/Tracer/tracer.hpp b/src/QirRuntime/lib/Tracer/tracer.hpp new file mode 100644 index 00000000000..c5548e4bd18 --- /dev/null +++ b/src/QirRuntime/lib/Tracer/tracer.hpp @@ -0,0 +1,206 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. +#pragma once + +#include +#include +#include +#include +#include + +#include "CoreTypes.hpp" +#include "TracerTypes.hpp" +#include "QuantumApi_I.hpp" + +namespace Microsoft +{ +namespace Quantum +{ + /*================================================================================================================== + Layer + ==================================================================================================================*/ + struct Layer + { + // Start time of the layer. + const Time startTime; + + // Width of the layer on the time axis. + const Duration duration; + + // Quantum operations, assigned to this layer. + std::unordered_map operations; + + // Optional id, if the layer represents a global barrier. + OpId barrierId = -1; + + Layer(Time startTime, Duration duration) + : startTime(startTime) + , duration(duration) + { + } + }; + + /*================================================================================================================== + QubitState + ==================================================================================================================*/ + struct QubitState + { + // The last layer this qubit was used in, `INVALID` means the qubit haven't been used yet in any + // operations of non-zero duration. + LayerId layer = INVALID; + + // `lastUsedTime` stores the end time of the last operation, the qubit participated in. It might not match the + // end time of a layer, if the duration of the last operation is less than duration of the layer. Tracking this + // time allows us to possibly fit multiple short operations on the same qubit into a single layer. + Time lastUsedTime = 0; + + std::vector pendingZeroDurationOps; + }; + + /*================================================================================================================== + The tracer implements resource estimation. See readme in this folder for details. + ==================================================================================================================*/ + class CTracer : public ISimulator + { + // For now the tracer assumes no reuse of qubits. + std::vector qubits; + + // The preferred duration of a layer. An operation with longer duration will make the containing layer longer. + const int preferredLayerDuration = 0; + + // The index into the vector is treated as implicit id of the layer. + std::vector metricsByLayer; + + // The last global barrier, injected by the user. No new operations can be added to the barrier or to any of the + // layer that preceeded it, even if the new operations involve completely new qubits. + LayerId globalBarrier = INVALID; + + // Mapping of operation ids to user-chosen names, for operations that user didn't name, the output will use + // operation ids. + std::unordered_map opNames; + + // Operations we've seen so far (to be able to trim output to include only those that were encounted). + std::unordered_set seenOps; + + private: + QubitState& UseQubit(Qubit q) + { + size_t qubitIndex = reinterpret_cast(q); + assert(qubitIndex < this->qubits.size()); + return this->qubits[qubitIndex]; + } + const QubitState& UseQubit(Qubit q) const + { + size_t qubitIndex = reinterpret_cast(q); + assert(qubitIndex < this->qubits.size()); + return this->qubits[qubitIndex]; + } + + // If no appropriate layer found, return `INVALID` + LayerId FindLayerToInsertOperationInto(Qubit q, Duration opDuration) const; + + // Returns the index of the created layer. + LayerId CreateNewLayer(Duration minRequiredDuration); + + // Adds operation with given id into the given layer. Assumes that duration contraints have been satisfied. + void AddOperationToLayer(OpId id, LayerId layer); + + // Update the qubit state with the new layer information + void UpdateQubitState(Qubit q, LayerId layer, Duration opDuration); + + public: + explicit CTracer(int preferredLayerDuration) + : preferredLayerDuration(preferredLayerDuration) + { + } + + CTracer(int preferredLayerDuration, const std::unordered_map& opNames) + : preferredLayerDuration(preferredLayerDuration) + , opNames(opNames) + { + } + + // ------------------------------------------------------------------------------------------------------------- + // ISimulator interface + // ------------------------------------------------------------------------------------------------------------- + Qubit AllocateQubit() override; + void ReleaseQubit(Qubit qubit) override; + std::string QubitToString(Qubit qubit) override; + void ReleaseResult(Result result) override; + + IQuantumGateSet* AsQuantumGateSet() override + { + throw std::logic_error("Not supported: all intrinsics must be converted to tracing operations"); + } + IDiagnostics* AsDiagnostics() override + { + return nullptr; + } + Result M(Qubit target) override + { + throw std::logic_error("Not supported: all measurements must be converted to tracing operations"); + } + Result Measure(long numBases, PauliId bases[], long numTargets, Qubit targets[]) override + { + throw std::logic_error("Not supported: all measurements must be converted to tracing operations"); + } + bool AreEqualResults(Result r1, Result r2) override + { + throw std::logic_error("Cannot compare results while tracing!"); + } + ResultValue GetResultValue(Result result) override + { + throw std::logic_error("Result values aren't available while tracing!"); + } + Result UseZero() override; + Result UseOne() override; + + // ------------------------------------------------------------------------------------------------------------- + // Instead of implementing IQuantumGateSet, the tracer provides 'tracing-by-id' methods. The QIR generation + // should translate all intrinsics to invoke these methods. + // The tracer doesn't differentiate between control and target qubits. However, While it could provide a single + // generic tracing method for and array of qubits, that would require the clients to copy control and target + // qubits into the same array. To avoid the copy, the tracer provides a method that takes two groups of qubits, + // where the first one can be empty or can be viewed as the set of controls. + // ------------------------------------------------------------------------------------------------------------- + LayerId TraceSingleQubitOp(OpId id, Duration duration, Qubit target); + LayerId TraceMultiQubitOp( + OpId id, + Duration duration, + long nFirstGroup, + Qubit* firstGroup, + long nSecondGroup, + Qubit* secondGroup); + + Result TraceSingleQubitMeasurement(OpId id, Duration duration, Qubit target); + Result TraceMultiQubitMeasurement(OpId id, Duration duration, long nTargets, Qubit* targets); + LayerId GetLayerIdOfSourceMeasurement(Result r) const + { + return reinterpret_cast(r); + } + + // ------------------------------------------------------------------------------------------------------------- + // Backing of the rest of the bridge methods. + // ------------------------------------------------------------------------------------------------------------- + LayerId InjectGlobalBarrier(OpId id, Duration duration); + + // ------------------------------------------------------------------------------------------------------------- + // Configuring the tracer and getting data back from it. + // ------------------------------------------------------------------------------------------------------------- + // Temporary method for initial testing + // TODO: replace with a safer accessor + const std::vector& UseLayers() + { + return this->metricsByLayer; + } + + void PrintLayerMetrics(std::ostream& out, const std::string& separator, bool printZeroMetrics) const; + }; + + std::shared_ptr CreateTracer(int preferredLayerDuration); + std::shared_ptr CreateTracer( + int preferredLayerDuration, + const std::unordered_map& opNames); + +} // namespace Quantum +} // namespace Microsoft \ No newline at end of file diff --git a/src/QirRuntime/public/CoreTypes.hpp b/src/QirRuntime/public/CoreTypes.hpp index 99319f8beaf..28abfb2a9eb 100644 --- a/src/QirRuntime/public/CoreTypes.hpp +++ b/src/QirRuntime/public/CoreTypes.hpp @@ -5,7 +5,6 @@ // The core types will be exposed in the C-interfaces for interop, thus no // namespaces or scoped enums can be used to define them. - /*============================================================================== Qubit & Result @@ -36,5 +35,3 @@ enum PauliId : int32_t PauliId_Z = 2, PauliId_Y = 3, }; - - diff --git a/src/QirRuntime/public/TracerTypes.hpp b/src/QirRuntime/public/TracerTypes.hpp new file mode 100644 index 00000000000..9b7d242f9f7 --- /dev/null +++ b/src/QirRuntime/public/TracerTypes.hpp @@ -0,0 +1,17 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. +#pragma once + +#include +namespace Microsoft +{ +namespace Quantum +{ + using OpId = int; + using Time = int; + using Duration = int; + using LayerId = size_t; + + constexpr LayerId INVALID = std::numeric_limits::max(); +} +} \ No newline at end of file diff --git a/src/QirRuntime/test.py b/src/QirRuntime/test.py index 7c3d3450806..869eea365cb 100644 --- a/src/QirRuntime/test.py +++ b/src/QirRuntime/test.py @@ -20,80 +20,83 @@ def log(message): print(current_time + ": " + message) # ============================================================================= -root_dir = os.path.dirname(os.path.abspath(__file__)) +if __name__ == '__main__': + # this script is executed as script + root_dir = os.path.dirname(os.path.abspath(__file__)) -# parameters -flavor = "Debug" -nobuild = False -noqirgen = False -for arg in sys.argv: - arg = arg.lower() - if arg == "test.py": - continue - elif arg == "debug": - flavor = "Debug" - elif arg == "release": - flavor = "Release" - elif arg == "nobuild": - nobuild = True - noqirgen = True - elif arg == "noqirgen": - noqirgen = True - else: - log("unrecognized argument: " + arg) - sys.exit() + # parameters + flavor = "Debug" + nobuild = False + noqirgen = False + for arg in sys.argv: + arg = arg.lower() + if arg == "test.py": + continue + elif arg == "debug": + flavor = "Debug" + elif arg == "release": + flavor = "Release" + elif arg == "nobuild": + nobuild = True + noqirgen = True + elif arg == "noqirgen": + noqirgen = True + else: + log("unrecognized argument: " + arg) + sys.exit() -if not noqirgen: - if generateqir.do_generate_all(root_dir) != 0: - log("build failed to generate QIR => won't execute the tests") - log("to execute the tests from the last successful build run `test.py nobuild`") - sys.exit() + if not noqirgen: + if generateqir.do_generate_all(root_dir) != 0: + log("build failed to generate QIR => won't execute the tests") + log("to execute the tests from the last successful build run `test.py nobuild`") + sys.exit() -if not nobuild: - result = build.do_build(root_dir, True, True, flavor) # should_make, should_build - if result.returncode != 0: - log("build failed with exit code {0} => won't execute the tests".format(result.returncode)) - log("to execute the tests from the last successful build run `test.py nobuild`") - sys.exit() + if not nobuild: + result = build.do_build(root_dir, True, True, flavor) # should_make, should_build + if result.returncode != 0: + log("build failed with exit code {0} => won't execute the tests".format(result.returncode)) + log("to execute the tests from the last successful build run `test.py nobuild`") + sys.exit() -install_dir = os.path.join(root_dir, "build", platform.system(), flavor, "bin") -if not os.path.isdir(install_dir): - log("please build first: 'build.py [debug|release] [ir]'") - sys.exit() + install_dir = os.path.join(root_dir, "build", platform.system(), flavor, "bin") + if not os.path.isdir(install_dir): + log("please build first: 'build.py [debug|release] [ir]'") + sys.exit() -print("\n") + print("\n") -# Configure DLL lookup locations to include full state simulator and qdk -exe_ext = "" -fullstate_sim_dir = os.path.join(root_dir, "..", "Simulation", "Native", "build", flavor) -if platform.system() == "Windows": - exe_ext = ".exe" - os.environ['PATH'] = os.environ['PATH'] + ";" + fullstate_sim_dir + ";" + install_dir -else: - # add the folder to the list of locations to load libraries from - old = os.environ.get("LD_LIBRARY_PATH") - if old: - os.environ["LD_LIBRARY_PATH"] = old + ":" + fullstate_sim_dir + ":" + install_dir + # Configure DLL lookup locations to include full state simulator and qdk + exe_ext = "" + fullstate_sim_dir = os.path.join(root_dir, "..", "Simulation", "Native", "build", flavor) + if platform.system() == "Windows": + exe_ext = ".exe" + os.environ['PATH'] = os.environ['PATH'] + ";" + fullstate_sim_dir + ";" + install_dir else: - os.environ["LD_LIBRARY_PATH"] = fullstate_sim_dir + ":" + install_dir + # add the folder to the list of locations to load libraries from + old = os.environ.get("LD_LIBRARY_PATH") + if old: + os.environ["LD_LIBRARY_PATH"] = old + ":" + fullstate_sim_dir + ":" + install_dir + else: + os.environ["LD_LIBRARY_PATH"] = fullstate_sim_dir + ":" + install_dir - old = os.environ.get("DYLD_LIBRARY_PATH") - if old: - os.environ["DYLD_LIBRARY_PATH"] = old + ":" + fullstate_sim_dir + ":" + install_dir - else: - os.environ["DYLD_LIBRARY_PATH"] = fullstate_sim_dir + ":" + install_dir + old = os.environ.get("DYLD_LIBRARY_PATH") + if old: + os.environ["DYLD_LIBRARY_PATH"] = old + ":" + fullstate_sim_dir + ":" + install_dir + else: + os.environ["DYLD_LIBRARY_PATH"] = fullstate_sim_dir + ":" + install_dir -log("========= Running native tests =========") -test_binaries = [ - "fullstate-simulator-tests", - "qir-runtime-unittests", - "qir-static-tests", - "qir-dynamic-tests" -] + log("========= Running native tests =========") + test_binaries = [ + "fullstate-simulator-tests", + "qir-runtime-unittests", + "qir-static-tests", + "qir-dynamic-tests", + "qir-tracer-tests" + ] -for name in test_binaries: - test_binary = os.path.join(install_dir, name + exe_ext) - log(test_binary) - subprocess.run(test_binary + " ~[skip]", shell = True) + for name in test_binaries: + test_binary = os.path.join(install_dir, name + exe_ext) + log(test_binary) + subprocess.run(test_binary + " ~[skip]", shell = True) -print("\n") \ No newline at end of file + print("\n") \ No newline at end of file diff --git a/src/QirRuntime/test/CMakeLists.txt b/src/QirRuntime/test/CMakeLists.txt index 8d2fac8c254..e5541f78589 100644 --- a/src/QirRuntime/test/CMakeLists.txt +++ b/src/QirRuntime/test/CMakeLists.txt @@ -1,4 +1,5 @@ add_subdirectory(FullstateSimulator) add_subdirectory(QIR-dynamic) add_subdirectory(QIR-static) +add_subdirectory(QIR-tracer) add_subdirectory(unittests) diff --git a/src/QirRuntime/test/QIR-tracer/CMakeLists.txt b/src/QirRuntime/test/QIR-tracer/CMakeLists.txt new file mode 100644 index 00000000000..e48b4c619f7 --- /dev/null +++ b/src/QirRuntime/test/QIR-tracer/CMakeLists.txt @@ -0,0 +1,28 @@ + +compile_from_qir(tracer-qir tracer_qir) + +#============================================================================== +# The executable target for QIR tests triggers the custom actions to compile ll files +# +add_executable(qir-tracer-tests + qir-tracer-driver.cpp + tracer-config.cpp +) + +target_link_libraries(qir-tracer-tests PUBLIC + ${QIR_UTILITY_LIB} # set by compile_from_qir + ${QIR_BRIDGE_UTILITY_LIB} + ${QIR_BRIDGE_TRACER_UTILITY_LIB} + tracer + qir-rt-support +) + +target_include_directories(qir-tracer-tests PUBLIC + "${test_includes}" + "${public_includes}" + "${PROJECT_SOURCE_DIR}/lib/Tracer" +) +add_dependencies(qir-tracer-tests tracer_qir) + +install(TARGETS qir-tracer-tests RUNTIME DESTINATION "${CMAKE_BINARY_DIR}/bin") +add_unit_test(qir-tracer-tests) diff --git a/src/QirRuntime/test/QIR-tracer/generate.py b/src/QirRuntime/test/QIR-tracer/generate.py new file mode 100644 index 00000000000..206afb6b6d6 --- /dev/null +++ b/src/QirRuntime/test/QIR-tracer/generate.py @@ -0,0 +1,41 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. + +import sys, os, platform, subprocess, datetime, shutil + +# ============================================================================= +# Generates QIR files for all *.qs files in this folder +# Accepts arguments: +# path to qsc.exe (absolute or rely on Path env) +# +# For example: "generate.py qsc.exe" or "generate.py c:\qsharp-compiler\qsc.exe" +# ============================================================================= + +# ============================================================================= +def log(message): + now = datetime.datetime.now() + current_time = now.strftime("%H:%M:%S") + print(current_time + ": " + message) +# ============================================================================= + +if __name__ == '__main__': + # this script is executed as script + root_dir = os.path.dirname(os.path.abspath(__file__)) + + # parameters + qsc = sys.argv[1] # argv[0] is the name of this script file + + # find all qs files in this folder + files_to_process = "" + output_file = "tracer-qir" + for file in os.listdir(root_dir): + (file_name, ext) = os.path.splitext(file) + if ext == ".qs": + files_to_process = files_to_process + " " + file + + # Compile as a lib so all functions are retained and don't have to workaround the current limitations of + # @EntryPoint attribute. + command = (qsc + " build --qir s --input " + files_to_process + " --proj " + output_file) + log("Executing: " + command) + subprocess.run(command, shell = True) + diff --git a/src/QirRuntime/test/QIR-tracer/qir-tracer-driver.cpp b/src/QirRuntime/test/QIR-tracer/qir-tracer-driver.cpp new file mode 100644 index 00000000000..ba2fd6e361a --- /dev/null +++ b/src/QirRuntime/test/QIR-tracer/qir-tracer-driver.cpp @@ -0,0 +1,47 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +#include +#include + +#define CATCH_CONFIG_MAIN // This tells Catch to provide a main() - only do this in one cpp file +#include "catch.hpp" + +#include "QirContext.hpp" +#include "tracer-config.hpp" +#include "tracer.hpp" + +using namespace std; +using namespace Microsoft::Quantum; + +namespace TracerUser +{ + +TEST_CASE("Invoke each intrinsic from Q# core once", "[qir-tracer]") +{ + shared_ptr tr = CreateTracer(1 /*layer duration*/, g_operationNames); + QirContextScope qirctx(tr.get(), false /*trackAllocatedObjects*/); + + REQUIRE_NOTHROW(Microsoft__Quantum__Testing__Tracer__TestCoreIntrinsics__body()); + const vector& layers = tr->UseLayers(); + + std::stringstream out; + tr->PrintLayerMetrics(out, ",", true /*printZeroMetrics*/); + INFO(out.str()); + + // TestCoreIntrinsics happens to produce 24 layers right now and we are not checking whether that's expected -- as + // testing of layering logic is better done by unit tests. + CHECK(layers.size() == 24); +} + +TEST_CASE("Measurements can be counted but cannot be compared", "[qir-tracer]") +{ + shared_ptr tr = CreateTracer(1 /*layer duration*/, g_operationNames); + QirContextScope qirctx(tr.get(), false /*trackAllocatedObjects*/); + + REQUIRE_NOTHROW(Microsoft__Quantum__Testing__Tracer__TestMeasurements__body(false /*compare*/)); + CHECK(tr->UseLayers().size() == 1); + + REQUIRE_THROWS(Microsoft__Quantum__Testing__Tracer__TestMeasurements__body(true /*compare*/)); +} +} \ No newline at end of file diff --git a/src/QirRuntime/test/QIR-tracer/tracer-config.cpp b/src/QirRuntime/test/QIR-tracer/tracer-config.cpp new file mode 100644 index 00000000000..eab02e878af --- /dev/null +++ b/src/QirRuntime/test/QIR-tracer/tracer-config.cpp @@ -0,0 +1,16 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +// TODO: ideally, this file should be generated by the Q# compiler alongside the qir, using the mappings specified in +// target.qs. + +#include + +#include "QuantumApi_I.hpp" +#include "tracer-config.hpp" + +namespace TracerUser +{ +const std::unordered_map g_operationNames = { + {0, "X"}, {1, "CX"}, {2, "MCX"}, {3, "Y"}, {4, "CY"}, {5, "MCY"} /*etc.*/}; +} diff --git a/src/QirRuntime/test/QIR-tracer/tracer-config.hpp b/src/QirRuntime/test/QIR-tracer/tracer-config.hpp new file mode 100644 index 00000000000..8163231286d --- /dev/null +++ b/src/QirRuntime/test/QIR-tracer/tracer-config.hpp @@ -0,0 +1,20 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +// TODO: ideally, this file should be generated by the Q# compiler alongside the qir + +#pragma once + +#include +#include + +#include "TracerTypes.hpp" + +namespace TracerUser +{ +extern const std::unordered_map g_operationNames; +} // namespace TracerUser + +// Available function in generated QIR +extern "C" void Microsoft__Quantum__Testing__Tracer__TestCoreIntrinsics__body(); // NOLINT +extern "C" void Microsoft__Quantum__Testing__Tracer__TestMeasurements__body(bool compare); // NOLINT diff --git a/src/QirRuntime/test/QIR-tracer/tracer-core.qs b/src/QirRuntime/test/QIR-tracer/tracer-core.qs new file mode 100644 index 00000000000..84e57ae32e3 --- /dev/null +++ b/src/QirRuntime/test/QIR-tracer/tracer-core.qs @@ -0,0 +1,30 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +namespace Microsoft.Quantum.Core { + + @Attribute() + newtype Attribute = Unit; + + @Attribute() + newtype Inline = Unit; + + @Attribute() + newtype EntryPoint = Unit; + + function Length<'T> (array : 'T[]) : Int { body intrinsic; } + + function RangeStart (range : Range) : Int { body intrinsic; } + + function RangeStep (range : Range) : Int { body intrinsic; } + + function RangeEnd (range : Range) : Int { body intrinsic; } + + function RangeReverse (range : Range) : Range { body intrinsic; } +} + +namespace Microsoft.Quantum.Targeting { + + @Attribute() + newtype TargetInstruction = String; +} \ No newline at end of file diff --git a/src/QirRuntime/test/QIR-tracer/tracer-intrinsics.qs b/src/QirRuntime/test/QIR-tracer/tracer-intrinsics.qs new file mode 100644 index 00000000000..1e23bf5f613 --- /dev/null +++ b/src/QirRuntime/test/QIR-tracer/tracer-intrinsics.qs @@ -0,0 +1,63 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +namespace Microsoft.Quantum.Testing.Tracer { + open Microsoft.Quantum.Intrinsic; + open Microsoft.Quantum.Tracer; + + operation TestCoreIntrinsics() : Unit { + use qs = Qubit[3]; + + X(qs[0]); + Y(qs[0]); + Z(qs[1]); + H(qs[1]); + CNOT(qs[1], qs[2]); + Rx(0.3, qs[0]); + Ry(0.4, qs[1]); + Rz(0.5, qs[2]); + //SWAP(qs[0], qs[2]); + S(qs[1]); + T(qs[2]); + + Barrier(42, 0); + + Adjoint X(qs[0]); + Adjoint Y(qs[0]); + Adjoint Z(qs[1]); + Adjoint H(qs[1]); + Adjoint CNOT(qs[1], qs[2]); + Adjoint Rx(0.3, qs[0]); + Adjoint Ry(0.4, qs[1]); + Adjoint Rz(0.5, qs[2]); + //Adjoint SWAP(qs[0], qs[2]); + Adjoint S(qs[1]); + Adjoint T(qs[2]); + + use c = Qubit() { + Controlled X([c], (qs[0])); + Controlled Y([c], (qs[0])); + Controlled Z([c], (qs[1])); + Controlled H([c], (qs[1])); + Controlled Rx([c], (0.3, qs[0])); + Controlled Ry([c], (0.4, qs[1])); + Controlled Rz([c], (0.5, qs[2])); + //Controlled SWAP([c], (qs[0], qs[2])); + Controlled S([c], (qs[1])); + Controlled T([c], (qs[2])); + } + + use cc = Qubit[2] { + Controlled X(cc, (qs[0])); + Controlled Y(cc, (qs[0])); + Controlled Z(cc, (qs[1])); + Controlled H(cc, (qs[1])); + Controlled Rx(cc, (0.3, qs[0])); + Controlled Ry(cc, (0.4, qs[1])); + Controlled Rz(cc, (0.5, qs[2])); + //Controlled SWAP(cc, (qs[0], qs[2])); + Controlled S(cc, (qs[1])); + Controlled T(cc, (qs[2])); + } + } +} diff --git a/src/QirRuntime/test/QIR-tracer/tracer-measurements.qs b/src/QirRuntime/test/QIR-tracer/tracer-measurements.qs new file mode 100644 index 00000000000..7c4aab5eea1 --- /dev/null +++ b/src/QirRuntime/test/QIR-tracer/tracer-measurements.qs @@ -0,0 +1,27 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +namespace Microsoft.Quantum.Testing.Tracer { + open Microsoft.Quantum.Intrinsic; + + operation Fixup(qs : Qubit[]) : Unit { + for i in 0..Length(qs)-1 { + X(qs[i]); + } + } + + operation TestMeasurements(compare : Bool) : Unit { + use qs = Qubit[3]; + let r0 = M(qs[0]); + let qs12 = [qs[1], qs[2]]; + let r12 = Measure([PauliY, PauliX], qs12); + + if compare { + if r0 == Zero { + X(qs[1]); + } + + //ApplyIfOne(r12, (Fixup, qs12)); + } + } +} \ No newline at end of file diff --git a/src/QirRuntime/test/QIR-tracer/tracer-qir.ll b/src/QirRuntime/test/QIR-tracer/tracer-qir.ll new file mode 100644 index 00000000000..73d9cf34372 --- /dev/null +++ b/src/QirRuntime/test/QIR-tracer/tracer-qir.ll @@ -0,0 +1,1462 @@ + +%Result = type opaque +%Range = type { i64, i64, i64 } +%Tuple = type opaque +%Qubit = type opaque +%Array = type opaque +%String = type opaque + +@ResultZero = external global %Result* +@ResultOne = external global %Result* +@PauliI = constant i2 0 +@PauliX = constant i2 1 +@PauliY = constant i2 -1 +@PauliZ = constant i2 -2 +@EmptyRange = internal constant %Range { i64 0, i64 1, i64 -1 } + +define %Tuple* @Microsoft__Quantum__Core__Attribute__body() { +entry: + ret %Tuple* null +} + +define %Tuple* @Microsoft__Quantum__Core__EntryPoint__body() { +entry: + ret %Tuple* null +} + +define %Tuple* @Microsoft__Quantum__Core__Inline__body() { +entry: + ret %Tuple* null +} + +define void @Microsoft__Quantum__Intrinsic__CNOT__body(%Qubit* %control, %Qubit* %target) { +entry: + %ctls = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %0 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ctls, i64 0) + %1 = bitcast i8* %0 to %Qubit** + store %Qubit* %control, %Qubit** %1 + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 1) + br i1 true, label %then0__1, label %else__1 + +then0__1: ; preds = %entry + call void @__quantum__qis__single_qubit_op_ctl(i64 1, i64 1, %Array* %ctls, %Qubit* %target) + br label %continue__1 + +else__1: ; preds = %entry + call void @__quantum__qis__single_qubit_op_ctl(i64 2, i64 1, %Array* %ctls, %Qubit* %target) + br label %continue__1 + +continue__1: ; preds = %else__1, %then0__1 + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %ctls, i64 -1) + ret void +} + +declare %Array* @__quantum__rt__array_create_1d(i32, i64) + +declare i8* @__quantum__rt__array_get_element_ptr_1d(%Array*, i64) + +declare void @__quantum__rt__array_update_alias_count(%Array*, i64) + +declare void @__quantum__qis__single_qubit_op_ctl(i64, i64, %Array*, %Qubit*) + +declare void @__quantum__rt__array_update_reference_count(%Array*, i64) + +define void @Microsoft__Quantum__Intrinsic__CNOT__adj(%Qubit* %control, %Qubit* %target) { +entry: + %ctls = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %0 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ctls, i64 0) + %1 = bitcast i8* %0 to %Qubit** + store %Qubit* %control, %Qubit** %1 + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 1) + br i1 true, label %then0__1, label %else__1 + +then0__1: ; preds = %entry + call void @__quantum__qis__single_qubit_op_ctl(i64 1, i64 1, %Array* %ctls, %Qubit* %target) + br label %continue__1 + +else__1: ; preds = %entry + call void @__quantum__qis__single_qubit_op_ctl(i64 2, i64 1, %Array* %ctls, %Qubit* %target) + br label %continue__1 + +continue__1: ; preds = %else__1, %then0__1 + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %ctls, i64 -1) + ret void +} + +define void @Microsoft__Quantum__Intrinsic__H__body(%Qubit* %qb) { +entry: + call void @__quantum__qis__single_qubit_op(i64 9, i64 1, %Qubit* %qb) + ret void +} + +declare void @__quantum__qis__single_qubit_op(i64, i64, %Qubit*) + +define void @Microsoft__Quantum__Intrinsic__H__adj(%Qubit* %qb) { +entry: + call void @__quantum__qis__single_qubit_op(i64 9, i64 1, %Qubit* %qb) + ret void +} + +define void @Microsoft__Quantum__Intrinsic__H__ctl(%Array* %ctls, %Qubit* %qb) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 1) + call void @__quantum__qis__single_qubit_op_ctl(i64 10, i64 1, %Array* %ctls, %Qubit* %qb) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 -1) + ret void +} + +define void @Microsoft__Quantum__Intrinsic__H__ctladj(%Array* %ctls, %Qubit* %qb) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 1) + call void @__quantum__qis__single_qubit_op_ctl(i64 10, i64 1, %Array* %ctls, %Qubit* %qb) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 -1) + ret void +} + +define %Result* @Microsoft__Quantum__Intrinsic__M__body(%Qubit* %qb) { +entry: + %0 = call %Result* @__quantum__qis__single_qubit_measure(i64 100, i64 1, %Qubit* %qb) + ret %Result* %0 +} + +declare %Result* @__quantum__qis__single_qubit_measure(i64, i64, %Qubit*) + +define %Result* @Microsoft__Quantum__Intrinsic__Measure__body(%Array* %paulis, %Array* %qubits) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i64 1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i64 1) + %0 = load %Result*, %Result** @ResultOne + %res = alloca %Result* + store %Result* %0, %Result** %res + call void @__quantum__rt__result_update_reference_count(%Result* %0, i64 1) + %haveY = alloca i1 + store i1 false, i1* %haveY + %1 = call i64 @__quantum__rt__array_get_size_1d(%Array* %paulis) + %2 = sub i64 %1, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %i = phi i64 [ 0, %entry ], [ %15, %exiting__1 ] + %3 = icmp sle i64 %i, %2 + br i1 %3, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 %i) + %5 = bitcast i8* %4 to i2* + %6 = load i2, i2* %5 + %7 = load i2, i2* @PauliY + %8 = icmp eq i2 %6, %7 + %9 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 %i) + %10 = bitcast i8* %9 to i2* + %11 = load i2, i2* %10 + %12 = load i2, i2* @PauliI + %13 = icmp eq i2 %11, %12 + %14 = or i1 %8, %13 + br i1 %14, label %then0__1, label %continue__1 + +then0__1: ; preds = %body__1 + store i1 true, i1* %haveY + br label %continue__1 + +continue__1: ; preds = %then0__1, %body__1 + br label %exiting__1 + +exiting__1: ; preds = %continue__1 + %15 = add i64 %i, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + %16 = load i1, i1* %haveY + br i1 %16, label %then0__2, label %test1__1 + +then0__2: ; preds = %exit__1 + %17 = call %Result* @__quantum__qis__joint_measure(i64 106, i64 1, %Array* %qubits) + call void @__quantum__rt__result_update_reference_count(%Result* %17, i64 1) + store %Result* %17, %Result** %res + call void @__quantum__rt__result_update_reference_count(%Result* %17, i64 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %0, i64 -1) + br label %continue__2 + +test1__1: ; preds = %exit__1 + %18 = icmp sgt i64 %1, 2 + br i1 %18, label %then1__1, label %test2__1 + +then1__1: ; preds = %test1__1 + %19 = call %Result* @__quantum__qis__joint_measure(i64 107, i64 1, %Array* %qubits) + call void @__quantum__rt__result_update_reference_count(%Result* %19, i64 1) + %20 = load %Result*, %Result** %res + store %Result* %19, %Result** %res + call void @__quantum__rt__result_update_reference_count(%Result* %19, i64 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %20, i64 -1) + br label %continue__2 + +test2__1: ; preds = %test1__1 + %21 = icmp eq i64 %1, 1 + br i1 %21, label %then2__1, label %test3__1 + +then2__1: ; preds = %test2__1 + %22 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 0) + %23 = bitcast i8* %22 to i2* + %24 = load i2, i2* %23 + %25 = load i2, i2* @PauliX + %26 = icmp eq i2 %24, %25 + br i1 %26, label %then0__3, label %else__1 + +then0__3: ; preds = %then2__1 + %27 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qubits, i64 0) + %28 = bitcast i8* %27 to %Qubit** + %qb = load %Qubit*, %Qubit** %28 + %29 = call %Result* @__quantum__qis__single_qubit_measure(i64 101, i64 1, %Qubit* %qb) + call void @__quantum__rt__result_update_reference_count(%Result* %29, i64 1) + %30 = load %Result*, %Result** %res + store %Result* %29, %Result** %res + call void @__quantum__rt__result_update_reference_count(%Result* %29, i64 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %30, i64 -1) + br label %continue__3 + +else__1: ; preds = %then2__1 + %31 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qubits, i64 0) + %32 = bitcast i8* %31 to %Qubit** + %qb__1 = load %Qubit*, %Qubit** %32 + %33 = call %Result* @__quantum__qis__single_qubit_measure(i64 100, i64 1, %Qubit* %qb__1) + call void @__quantum__rt__result_update_reference_count(%Result* %33, i64 1) + %34 = load %Result*, %Result** %res + store %Result* %33, %Result** %res + call void @__quantum__rt__result_update_reference_count(%Result* %33, i64 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %34, i64 -1) + br label %continue__3 + +continue__3: ; preds = %else__1, %then0__3 + br label %continue__2 + +test3__1: ; preds = %test2__1 + %35 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 0) + %36 = bitcast i8* %35 to i2* + %37 = load i2, i2* %36 + %38 = load i2, i2* @PauliX + %39 = icmp eq i2 %37, %38 + %40 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 1) + %41 = bitcast i8* %40 to i2* + %42 = load i2, i2* %41 + %43 = load i2, i2* @PauliX + %44 = icmp eq i2 %42, %43 + %45 = and i1 %39, %44 + br i1 %45, label %then3__1, label %test4__1 + +then3__1: ; preds = %test3__1 + %46 = call %Result* @__quantum__qis__joint_measure(i64 108, i64 1, %Array* %qubits) + call void @__quantum__rt__result_update_reference_count(%Result* %46, i64 1) + %47 = load %Result*, %Result** %res + store %Result* %46, %Result** %res + call void @__quantum__rt__result_update_reference_count(%Result* %46, i64 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %47, i64 -1) + br label %continue__2 + +test4__1: ; preds = %test3__1 + %48 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 0) + %49 = bitcast i8* %48 to i2* + %50 = load i2, i2* %49 + %51 = load i2, i2* @PauliX + %52 = icmp eq i2 %50, %51 + %53 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 1) + %54 = bitcast i8* %53 to i2* + %55 = load i2, i2* %54 + %56 = load i2, i2* @PauliZ + %57 = icmp eq i2 %55, %56 + %58 = and i1 %52, %57 + br i1 %58, label %then4__1, label %test5__1 + +then4__1: ; preds = %test4__1 + %59 = call %Result* @__quantum__qis__joint_measure(i64 109, i64 1, %Array* %qubits) + call void @__quantum__rt__result_update_reference_count(%Result* %59, i64 1) + %60 = load %Result*, %Result** %res + store %Result* %59, %Result** %res + call void @__quantum__rt__result_update_reference_count(%Result* %59, i64 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %60, i64 -1) + br label %continue__2 + +test5__1: ; preds = %test4__1 + %61 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 0) + %62 = bitcast i8* %61 to i2* + %63 = load i2, i2* %62 + %64 = load i2, i2* @PauliZ + %65 = icmp eq i2 %63, %64 + %66 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 1) + %67 = bitcast i8* %66 to i2* + %68 = load i2, i2* %67 + %69 = load i2, i2* @PauliX + %70 = icmp eq i2 %68, %69 + %71 = and i1 %65, %70 + br i1 %71, label %then5__1, label %test6__1 + +then5__1: ; preds = %test5__1 + %72 = call %Result* @__quantum__qis__joint_measure(i64 110, i64 1, %Array* %qubits) + call void @__quantum__rt__result_update_reference_count(%Result* %72, i64 1) + %73 = load %Result*, %Result** %res + store %Result* %72, %Result** %res + call void @__quantum__rt__result_update_reference_count(%Result* %72, i64 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %73, i64 -1) + br label %continue__2 + +test6__1: ; preds = %test5__1 + %74 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 0) + %75 = bitcast i8* %74 to i2* + %76 = load i2, i2* %75 + %77 = load i2, i2* @PauliZ + %78 = icmp eq i2 %76, %77 + %79 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 1) + %80 = bitcast i8* %79 to i2* + %81 = load i2, i2* %80 + %82 = load i2, i2* @PauliZ + %83 = icmp eq i2 %81, %82 + %84 = and i1 %78, %83 + br i1 %84, label %then6__1, label %continue__2 + +then6__1: ; preds = %test6__1 + %85 = call %Result* @__quantum__qis__joint_measure(i64 111, i64 1, %Array* %qubits) + call void @__quantum__rt__result_update_reference_count(%Result* %85, i64 1) + %86 = load %Result*, %Result** %res + store %Result* %85, %Result** %res + call void @__quantum__rt__result_update_reference_count(%Result* %85, i64 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %86, i64 -1) + br label %continue__2 + +continue__2: ; preds = %then6__1, %test6__1, %then5__1, %then4__1, %then3__1, %continue__3, %then1__1, %then0__2 + %87 = load %Result*, %Result** %res + call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i64 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i64 -1) + ret %Result* %87 +} + +declare void @__quantum__rt__result_update_reference_count(%Result*, i64) + +declare i64 @__quantum__rt__array_get_size_1d(%Array*) + +declare %Result* @__quantum__qis__joint_measure(i64, i64, %Array*) + +define %Result* @Microsoft__Quantum__Intrinsic__Mx__body(%Qubit* %qb) { +entry: + %0 = call %Result* @__quantum__qis__single_qubit_measure(i64 101, i64 1, %Qubit* %qb) + ret %Result* %0 +} + +define %Result* @Microsoft__Quantum__Intrinsic__Mxx__body(%Array* %qubits) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i64 1) + %0 = call %Result* @__quantum__qis__joint_measure(i64 105, i64 1, %Array* %qubits) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i64 -1) + ret %Result* %0 +} + +define %Result* @Microsoft__Quantum__Intrinsic__Mxz__body(%Array* %qubits) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i64 1) + %0 = call %Result* @__quantum__qis__joint_measure(i64 103, i64 1, %Array* %qubits) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i64 -1) + ret %Result* %0 +} + +define %Result* @Microsoft__Quantum__Intrinsic__Mz__body(%Qubit* %qb) { +entry: + %0 = call %Result* @__quantum__qis__single_qubit_measure(i64 100, i64 1, %Qubit* %qb) + ret %Result* %0 +} + +define %Result* @Microsoft__Quantum__Intrinsic__Mzx__body(%Array* %qubits) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i64 1) + %0 = call %Result* @__quantum__qis__joint_measure(i64 104, i64 1, %Array* %qubits) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i64 -1) + ret %Result* %0 +} + +define %Result* @Microsoft__Quantum__Intrinsic__Mzz__body(%Array* %qubits) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i64 1) + %0 = call %Result* @__quantum__qis__joint_measure(i64 102, i64 1, %Array* %qubits) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i64 -1) + ret %Result* %0 +} + +define void @Microsoft__Quantum__Intrinsic__Rx__body(double %theta, %Qubit* %qb) { +entry: + call void @__quantum__qis__single_qubit_op(i64 19, i64 1, %Qubit* %qb) + ret void +} + +define void @Microsoft__Quantum__Intrinsic__Rx__adj(double %theta, %Qubit* %qb) { +entry: + call void @__quantum__qis__single_qubit_op(i64 19, i64 1, %Qubit* %qb) + ret void +} + +define void @Microsoft__Quantum__Intrinsic__Rx__ctl(%Array* %ctls, { double, %Qubit* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 1) + %1 = getelementptr { double, %Qubit* }, { double, %Qubit* }* %0, i64 0, i32 0 + %theta = load double, double* %1 + %2 = getelementptr { double, %Qubit* }, { double, %Qubit* }* %0, i64 0, i32 1 + %qb = load %Qubit*, %Qubit** %2 + call void @__quantum__qis__single_qubit_op_ctl(i64 20, i64 1, %Array* %ctls, %Qubit* %qb) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 -1) + ret void +} + +define void @Microsoft__Quantum__Intrinsic__Rx__ctladj(%Array* %ctls, { double, %Qubit* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 1) + %1 = getelementptr { double, %Qubit* }, { double, %Qubit* }* %0, i64 0, i32 0 + %theta = load double, double* %1 + %2 = getelementptr { double, %Qubit* }, { double, %Qubit* }* %0, i64 0, i32 1 + %qb = load %Qubit*, %Qubit** %2 + call void @__quantum__qis__single_qubit_op_ctl(i64 20, i64 1, %Array* %ctls, %Qubit* %qb) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 -1) + ret void +} + +define void @Microsoft__Quantum__Intrinsic__Ry__body(double %theta, %Qubit* %qb) { +entry: + call void @__quantum__qis__single_qubit_op(i64 21, i64 1, %Qubit* %qb) + ret void +} + +define void @Microsoft__Quantum__Intrinsic__Ry__adj(double %theta, %Qubit* %qb) { +entry: + call void @__quantum__qis__single_qubit_op(i64 21, i64 1, %Qubit* %qb) + ret void +} + +define void @Microsoft__Quantum__Intrinsic__Ry__ctl(%Array* %ctls, { double, %Qubit* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 1) + %1 = getelementptr { double, %Qubit* }, { double, %Qubit* }* %0, i64 0, i32 0 + %theta = load double, double* %1 + %2 = getelementptr { double, %Qubit* }, { double, %Qubit* }* %0, i64 0, i32 1 + %qb = load %Qubit*, %Qubit** %2 + call void @__quantum__qis__single_qubit_op_ctl(i64 22, i64 1, %Array* %ctls, %Qubit* %qb) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 -1) + ret void +} + +define void @Microsoft__Quantum__Intrinsic__Ry__ctladj(%Array* %ctls, { double, %Qubit* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 1) + %1 = getelementptr { double, %Qubit* }, { double, %Qubit* }* %0, i64 0, i32 0 + %theta = load double, double* %1 + %2 = getelementptr { double, %Qubit* }, { double, %Qubit* }* %0, i64 0, i32 1 + %qb = load %Qubit*, %Qubit** %2 + call void @__quantum__qis__single_qubit_op_ctl(i64 22, i64 1, %Array* %ctls, %Qubit* %qb) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 -1) + ret void +} + +define void @Microsoft__Quantum__Intrinsic__Rz__body(double %theta, %Qubit* %qb) { +entry: + call void @__quantum__qis__single_qubit_op(i64 23, i64 1, %Qubit* %qb) + ret void +} + +define void @Microsoft__Quantum__Intrinsic__Rz__adj(double %theta, %Qubit* %qb) { +entry: + call void @__quantum__qis__single_qubit_op(i64 24, i64 1, %Qubit* %qb) + ret void +} + +define void @Microsoft__Quantum__Intrinsic__Rz__ctl(%Array* %ctls, { double, %Qubit* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 1) + %1 = getelementptr { double, %Qubit* }, { double, %Qubit* }* %0, i64 0, i32 0 + %theta = load double, double* %1 + %2 = getelementptr { double, %Qubit* }, { double, %Qubit* }* %0, i64 0, i32 1 + %qb = load %Qubit*, %Qubit** %2 + call void @__quantum__qis__single_qubit_op_ctl(i64 25, i64 1, %Array* %ctls, %Qubit* %qb) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 -1) + ret void +} + +define void @Microsoft__Quantum__Intrinsic__Rz__ctladj(%Array* %ctls, { double, %Qubit* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 1) + %1 = getelementptr { double, %Qubit* }, { double, %Qubit* }* %0, i64 0, i32 0 + %theta = load double, double* %1 + %2 = getelementptr { double, %Qubit* }, { double, %Qubit* }* %0, i64 0, i32 1 + %qb = load %Qubit*, %Qubit** %2 + call void @__quantum__qis__single_qubit_op_ctl(i64 25, i64 1, %Array* %ctls, %Qubit* %qb) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 -1) + ret void +} + +define void @Microsoft__Quantum__Intrinsic__S__body(%Qubit* %qb) { +entry: + call void @__quantum__qis__single_qubit_op(i64 15, i64 1, %Qubit* %qb) + ret void +} + +define void @Microsoft__Quantum__Intrinsic__S__adj(%Qubit* %qb) { +entry: + call void @__quantum__qis__single_qubit_op(i64 15, i64 1, %Qubit* %qb) + ret void +} + +define void @Microsoft__Quantum__Intrinsic__S__ctl(%Array* %ctls, %Qubit* %qb) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 1) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 1) + call void @__quantum__qis__single_qubit_op_ctl(i64 16, i64 1, %Array* %ctls, %Qubit* %qb) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 -1) + ret void +} + +define void @Microsoft__Quantum__Intrinsic__S__ctladj(%Array* %ctls, %Qubit* %qb) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 1) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 1) + call void @__quantum__qis__single_qubit_op_ctl(i64 16, i64 1, %Array* %ctls, %Qubit* %qb) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 -1) + ret void +} + +define void @Microsoft__Quantum__Intrinsic__Sx__body(%Qubit* %qb) { +entry: + call void @__quantum__qis__single_qubit_op(i64 17, i64 1, %Qubit* %qb) + ret void +} + +define void @Microsoft__Quantum__Intrinsic__Sx__adj(%Qubit* %qb) { +entry: + call void @__quantum__qis__single_qubit_op(i64 17, i64 1, %Qubit* %qb) + ret void +} + +define void @Microsoft__Quantum__Intrinsic__Sx__ctl(%Array* %ctls, %Qubit* %qb) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 1) + call void @__quantum__qis__single_qubit_op_ctl(i64 18, i64 1, %Array* %ctls, %Qubit* %qb) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 -1) + ret void +} + +define void @Microsoft__Quantum__Intrinsic__Sx__ctladj(%Array* %ctls, %Qubit* %qb) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 1) + call void @__quantum__qis__single_qubit_op_ctl(i64 18, i64 1, %Array* %ctls, %Qubit* %qb) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 -1) + ret void +} + +define void @Microsoft__Quantum__Intrinsic__Sz__body(%Qubit* %qb) { +entry: + call void @__quantum__qis__single_qubit_op(i64 15, i64 1, %Qubit* %qb) + ret void +} + +define void @Microsoft__Quantum__Intrinsic__Sz__adj(%Qubit* %qb) { +entry: + call void @__quantum__qis__single_qubit_op(i64 15, i64 1, %Qubit* %qb) + ret void +} + +define void @Microsoft__Quantum__Intrinsic__Sz__ctl(%Array* %ctls, %Qubit* %qb) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 1) + call void @__quantum__qis__single_qubit_op_ctl(i64 16, i64 1, %Array* %ctls, %Qubit* %qb) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 -1) + ret void +} + +define void @Microsoft__Quantum__Intrinsic__Sz__ctladj(%Array* %ctls, %Qubit* %qb) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 1) + call void @__quantum__qis__single_qubit_op_ctl(i64 16, i64 1, %Array* %ctls, %Qubit* %qb) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 -1) + ret void +} + +define void @Microsoft__Quantum__Intrinsic__T__body(%Qubit* %qb) { +entry: + call void @__quantum__qis__single_qubit_op(i64 11, i64 1, %Qubit* %qb) + ret void +} + +define void @Microsoft__Quantum__Intrinsic__T__adj(%Qubit* %qb) { +entry: + call void @__quantum__qis__single_qubit_op(i64 11, i64 1, %Qubit* %qb) + ret void +} + +define void @Microsoft__Quantum__Intrinsic__T__ctl(%Array* %ctls, %Qubit* %qb) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 1) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 1) + call void @__quantum__qis__single_qubit_op_ctl(i64 12, i64 1, %Array* %ctls, %Qubit* %qb) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 -1) + ret void +} + +define void @Microsoft__Quantum__Intrinsic__T__ctladj(%Array* %ctls, %Qubit* %qb) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 1) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 1) + call void @__quantum__qis__single_qubit_op_ctl(i64 12, i64 1, %Array* %ctls, %Qubit* %qb) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 -1) + ret void +} + +define void @Microsoft__Quantum__Intrinsic__Tx__body(%Qubit* %qb) { +entry: + call void @__quantum__qis__single_qubit_op(i64 13, i64 1, %Qubit* %qb) + ret void +} + +define void @Microsoft__Quantum__Intrinsic__Tx__adj(%Qubit* %qb) { +entry: + call void @__quantum__qis__single_qubit_op(i64 13, i64 1, %Qubit* %qb) + ret void +} + +define void @Microsoft__Quantum__Intrinsic__Tx__ctl(%Array* %ctls, %Qubit* %qb) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 1) + call void @__quantum__qis__single_qubit_op_ctl(i64 14, i64 1, %Array* %ctls, %Qubit* %qb) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 -1) + ret void +} + +define void @Microsoft__Quantum__Intrinsic__Tx__ctladj(%Array* %ctls, %Qubit* %qb) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 1) + call void @__quantum__qis__single_qubit_op_ctl(i64 14, i64 1, %Array* %ctls, %Qubit* %qb) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 -1) + ret void +} + +define void @Microsoft__Quantum__Intrinsic__Tz__body(%Qubit* %qb) { +entry: + call void @__quantum__qis__single_qubit_op(i64 11, i64 1, %Qubit* %qb) + ret void +} + +define void @Microsoft__Quantum__Intrinsic__Tz__adj(%Qubit* %qb) { +entry: + call void @__quantum__qis__single_qubit_op(i64 11, i64 1, %Qubit* %qb) + ret void +} + +define void @Microsoft__Quantum__Intrinsic__Tz__ctl(%Array* %ctls, %Qubit* %qb) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 1) + call void @__quantum__qis__single_qubit_op_ctl(i64 12, i64 1, %Array* %ctls, %Qubit* %qb) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 -1) + ret void +} + +define void @Microsoft__Quantum__Intrinsic__Tz__ctladj(%Array* %ctls, %Qubit* %qb) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 1) + call void @__quantum__qis__single_qubit_op_ctl(i64 12, i64 1, %Array* %ctls, %Qubit* %qb) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 -1) + ret void +} + +define void @Microsoft__Quantum__Intrinsic__X__body(%Qubit* %qb) { +entry: + call void @__quantum__qis__single_qubit_op(i64 0, i64 1, %Qubit* %qb) + ret void +} + +define void @Microsoft__Quantum__Intrinsic__X__adj(%Qubit* %qb) { +entry: + call void @__quantum__qis__single_qubit_op(i64 0, i64 1, %Qubit* %qb) + ret void +} + +define void @Microsoft__Quantum__Intrinsic__X__ctl(%Array* %ctls, %Qubit* %qb) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 1) + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %ctls) + %1 = icmp eq i64 %0, 1 + br i1 %1, label %then0__1, label %else__1 + +then0__1: ; preds = %entry + call void @__quantum__qis__single_qubit_op_ctl(i64 1, i64 1, %Array* %ctls, %Qubit* %qb) + br label %continue__1 + +else__1: ; preds = %entry + call void @__quantum__qis__single_qubit_op_ctl(i64 2, i64 1, %Array* %ctls, %Qubit* %qb) + br label %continue__1 + +continue__1: ; preds = %else__1, %then0__1 + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 -1) + ret void +} + +define void @Microsoft__Quantum__Intrinsic__X__ctladj(%Array* %ctls, %Qubit* %qb) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 1) + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %ctls) + %1 = icmp eq i64 %0, 1 + br i1 %1, label %then0__1, label %else__1 + +then0__1: ; preds = %entry + call void @__quantum__qis__single_qubit_op_ctl(i64 1, i64 1, %Array* %ctls, %Qubit* %qb) + br label %continue__1 + +else__1: ; preds = %entry + call void @__quantum__qis__single_qubit_op_ctl(i64 2, i64 1, %Array* %ctls, %Qubit* %qb) + br label %continue__1 + +continue__1: ; preds = %else__1, %then0__1 + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 -1) + ret void +} + +define void @Microsoft__Quantum__Intrinsic__Y__body(%Qubit* %qb) { +entry: + call void @__quantum__qis__single_qubit_op(i64 3, i64 1, %Qubit* %qb) + ret void +} + +define void @Microsoft__Quantum__Intrinsic__Y__adj(%Qubit* %qb) { +entry: + call void @__quantum__qis__single_qubit_op(i64 3, i64 1, %Qubit* %qb) + ret void +} + +define void @Microsoft__Quantum__Intrinsic__Y__ctl(%Array* %ctls, %Qubit* %qb) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 1) + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %ctls) + %1 = icmp eq i64 %0, 1 + br i1 %1, label %then0__1, label %else__1 + +then0__1: ; preds = %entry + call void @__quantum__qis__single_qubit_op_ctl(i64 4, i64 1, %Array* %ctls, %Qubit* %qb) + br label %continue__1 + +else__1: ; preds = %entry + call void @__quantum__qis__single_qubit_op_ctl(i64 5, i64 1, %Array* %ctls, %Qubit* %qb) + br label %continue__1 + +continue__1: ; preds = %else__1, %then0__1 + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 -1) + ret void +} + +define void @Microsoft__Quantum__Intrinsic__Y__ctladj(%Array* %ctls, %Qubit* %qb) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 1) + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %ctls) + %1 = icmp eq i64 %0, 1 + br i1 %1, label %then0__1, label %else__1 + +then0__1: ; preds = %entry + call void @__quantum__qis__single_qubit_op_ctl(i64 4, i64 1, %Array* %ctls, %Qubit* %qb) + br label %continue__1 + +else__1: ; preds = %entry + call void @__quantum__qis__single_qubit_op_ctl(i64 5, i64 1, %Array* %ctls, %Qubit* %qb) + br label %continue__1 + +continue__1: ; preds = %else__1, %then0__1 + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 -1) + ret void +} + +define void @Microsoft__Quantum__Intrinsic__Z__body(%Qubit* %qb) { +entry: + call void @__quantum__qis__single_qubit_op(i64 6, i64 1, %Qubit* %qb) + ret void +} + +define void @Microsoft__Quantum__Intrinsic__Z__adj(%Qubit* %qb) { +entry: + call void @__quantum__qis__single_qubit_op(i64 6, i64 1, %Qubit* %qb) + ret void +} + +define void @Microsoft__Quantum__Intrinsic__Z__ctl(%Array* %ctls, %Qubit* %qb) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 1) + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %ctls) + %1 = icmp eq i64 %0, 1 + br i1 %1, label %then0__1, label %else__1 + +then0__1: ; preds = %entry + call void @__quantum__qis__single_qubit_op_ctl(i64 7, i64 1, %Array* %ctls, %Qubit* %qb) + br label %continue__1 + +else__1: ; preds = %entry + call void @__quantum__qis__single_qubit_op_ctl(i64 8, i64 1, %Array* %ctls, %Qubit* %qb) + br label %continue__1 + +continue__1: ; preds = %else__1, %then0__1 + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 -1) + ret void +} + +define void @Microsoft__Quantum__Intrinsic__Z__ctladj(%Array* %ctls, %Qubit* %qb) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 1) + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %ctls) + %1 = icmp eq i64 %0, 1 + br i1 %1, label %then0__1, label %else__1 + +then0__1: ; preds = %entry + call void @__quantum__qis__single_qubit_op_ctl(i64 7, i64 1, %Array* %ctls, %Qubit* %qb) + br label %continue__1 + +else__1: ; preds = %entry + call void @__quantum__qis__single_qubit_op_ctl(i64 8, i64 1, %Array* %ctls, %Qubit* %qb) + br label %continue__1 + +continue__1: ; preds = %else__1, %then0__1 + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 -1) + ret void +} + +define void @Microsoft__Quantum__Testing__Tracer__Fixup__body(%Array* %qs) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %qs, i64 1) + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %qs) + %1 = sub i64 %0, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %i = phi i64 [ 0, %entry ], [ %5, %exiting__1 ] + %2 = icmp sle i64 %i, %1 + br i1 %2, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %3 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 %i) + %4 = bitcast i8* %3 to %Qubit** + %qb = load %Qubit*, %Qubit** %4 + call void @__quantum__qis__single_qubit_op(i64 0, i64 1, %Qubit* %qb) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %5 = add i64 %i, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %qs, i64 -1) + ret void +} + +define void @Microsoft__Quantum__Testing__Tracer__TestCoreIntrinsics__body() { +entry: + %qs = call %Array* @__quantum__rt__qubit_allocate_array(i64 3) + call void @__quantum__rt__array_update_alias_count(%Array* %qs, i64 1) + %0 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 0) + %1 = bitcast i8* %0 to %Qubit** + %qb = load %Qubit*, %Qubit** %1 + call void @__quantum__qis__single_qubit_op(i64 0, i64 1, %Qubit* %qb) + %2 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 0) + %3 = bitcast i8* %2 to %Qubit** + %qb__1 = load %Qubit*, %Qubit** %3 + call void @__quantum__qis__single_qubit_op(i64 3, i64 1, %Qubit* %qb__1) + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 1) + %5 = bitcast i8* %4 to %Qubit** + %qb__2 = load %Qubit*, %Qubit** %5 + call void @__quantum__qis__single_qubit_op(i64 6, i64 1, %Qubit* %qb__2) + %6 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 1) + %7 = bitcast i8* %6 to %Qubit** + %qb__3 = load %Qubit*, %Qubit** %7 + call void @__quantum__qis__single_qubit_op(i64 9, i64 1, %Qubit* %qb__3) + %8 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 1) + %9 = bitcast i8* %8 to %Qubit** + %10 = load %Qubit*, %Qubit** %9 + %11 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 2) + %12 = bitcast i8* %11 to %Qubit** + %13 = load %Qubit*, %Qubit** %12 + call void @Microsoft__Quantum__Intrinsic__CNOT__body(%Qubit* %10, %Qubit* %13) + %14 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 0) + %15 = bitcast i8* %14 to %Qubit** + %qb__4 = load %Qubit*, %Qubit** %15 + call void @__quantum__qis__single_qubit_op(i64 19, i64 1, %Qubit* %qb__4) + %16 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 1) + %17 = bitcast i8* %16 to %Qubit** + %qb__5 = load %Qubit*, %Qubit** %17 + call void @__quantum__qis__single_qubit_op(i64 21, i64 1, %Qubit* %qb__5) + %18 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 2) + %19 = bitcast i8* %18 to %Qubit** + %qb__6 = load %Qubit*, %Qubit** %19 + call void @__quantum__qis__single_qubit_op(i64 23, i64 1, %Qubit* %qb__6) + %20 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 1) + %21 = bitcast i8* %20 to %Qubit** + %qb__7 = load %Qubit*, %Qubit** %21 + call void @__quantum__qis__single_qubit_op(i64 15, i64 1, %Qubit* %qb__7) + %22 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 2) + %23 = bitcast i8* %22 to %Qubit** + %qb__9 = load %Qubit*, %Qubit** %23 + call void @__quantum__qis__single_qubit_op(i64 11, i64 1, %Qubit* %qb__9) + call void @__quantum__qis__inject_barrier(i64 42, i64 1) + %24 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 0) + %25 = bitcast i8* %24 to %Qubit** + %qb__11 = load %Qubit*, %Qubit** %25 + call void @__quantum__qis__single_qubit_op(i64 0, i64 1, %Qubit* %qb__11) + %26 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 0) + %27 = bitcast i8* %26 to %Qubit** + %qb__12 = load %Qubit*, %Qubit** %27 + call void @__quantum__qis__single_qubit_op(i64 3, i64 1, %Qubit* %qb__12) + %28 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 1) + %29 = bitcast i8* %28 to %Qubit** + %qb__13 = load %Qubit*, %Qubit** %29 + call void @__quantum__qis__single_qubit_op(i64 6, i64 1, %Qubit* %qb__13) + %30 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 1) + %31 = bitcast i8* %30 to %Qubit** + %qb__14 = load %Qubit*, %Qubit** %31 + call void @__quantum__qis__single_qubit_op(i64 9, i64 1, %Qubit* %qb__14) + %32 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 1) + %33 = bitcast i8* %32 to %Qubit** + %34 = load %Qubit*, %Qubit** %33 + %35 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 2) + %36 = bitcast i8* %35 to %Qubit** + %37 = load %Qubit*, %Qubit** %36 + call void @Microsoft__Quantum__Intrinsic__CNOT__adj(%Qubit* %34, %Qubit* %37) + %38 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 0) + %39 = bitcast i8* %38 to %Qubit** + %qb__15 = load %Qubit*, %Qubit** %39 + call void @__quantum__qis__single_qubit_op(i64 19, i64 1, %Qubit* %qb__15) + %40 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 1) + %41 = bitcast i8* %40 to %Qubit** + %qb__16 = load %Qubit*, %Qubit** %41 + call void @__quantum__qis__single_qubit_op(i64 21, i64 1, %Qubit* %qb__16) + %42 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 2) + %43 = bitcast i8* %42 to %Qubit** + %qb__17 = load %Qubit*, %Qubit** %43 + call void @__quantum__qis__single_qubit_op(i64 24, i64 1, %Qubit* %qb__17) + %44 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 1) + %45 = bitcast i8* %44 to %Qubit** + %qb__18 = load %Qubit*, %Qubit** %45 + call void @__quantum__qis__single_qubit_op(i64 15, i64 1, %Qubit* %qb__18) + %46 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 2) + %47 = bitcast i8* %46 to %Qubit** + %qb__20 = load %Qubit*, %Qubit** %47 + call void @__quantum__qis__single_qubit_op(i64 11, i64 1, %Qubit* %qb__20) + %c = call %Qubit* @__quantum__rt__qubit_allocate() + %ctls = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %48 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ctls, i64 0) + %49 = bitcast i8* %48 to %Qubit** + store %Qubit* %c, %Qubit** %49 + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 1) + %50 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 0) + %51 = bitcast i8* %50 to %Qubit** + %qb__22 = load %Qubit*, %Qubit** %51 + br i1 true, label %then0__1, label %else__1 + +then0__1: ; preds = %entry + call void @__quantum__qis__single_qubit_op_ctl(i64 1, i64 1, %Array* %ctls, %Qubit* %qb__22) + br label %continue__1 + +else__1: ; preds = %entry + call void @__quantum__qis__single_qubit_op_ctl(i64 2, i64 1, %Array* %ctls, %Qubit* %qb__22) + br label %continue__1 + +continue__1: ; preds = %else__1, %then0__1 + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i64 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %ctls, i64 -1) + %ctls__1 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %52 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ctls__1, i64 0) + %53 = bitcast i8* %52 to %Qubit** + store %Qubit* %c, %Qubit** %53 + call void @__quantum__rt__array_update_alias_count(%Array* %ctls__1, i64 1) + %54 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 0) + %55 = bitcast i8* %54 to %Qubit** + %qb__23 = load %Qubit*, %Qubit** %55 + br i1 true, label %then0__2, label %else__2 + +then0__2: ; preds = %continue__1 + call void @__quantum__qis__single_qubit_op_ctl(i64 4, i64 1, %Array* %ctls__1, %Qubit* %qb__23) + br label %continue__2 + +else__2: ; preds = %continue__1 + call void @__quantum__qis__single_qubit_op_ctl(i64 5, i64 1, %Array* %ctls__1, %Qubit* %qb__23) + br label %continue__2 + +continue__2: ; preds = %else__2, %then0__2 + call void @__quantum__rt__array_update_alias_count(%Array* %ctls__1, i64 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %ctls__1, i64 -1) + %ctls__2 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %56 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ctls__2, i64 0) + %57 = bitcast i8* %56 to %Qubit** + store %Qubit* %c, %Qubit** %57 + call void @__quantum__rt__array_update_alias_count(%Array* %ctls__2, i64 1) + %58 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 1) + %59 = bitcast i8* %58 to %Qubit** + %qb__24 = load %Qubit*, %Qubit** %59 + br i1 true, label %then0__3, label %else__3 + +then0__3: ; preds = %continue__2 + call void @__quantum__qis__single_qubit_op_ctl(i64 7, i64 1, %Array* %ctls__2, %Qubit* %qb__24) + br label %continue__3 + +else__3: ; preds = %continue__2 + call void @__quantum__qis__single_qubit_op_ctl(i64 8, i64 1, %Array* %ctls__2, %Qubit* %qb__24) + br label %continue__3 + +continue__3: ; preds = %else__3, %then0__3 + call void @__quantum__rt__array_update_alias_count(%Array* %ctls__2, i64 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %ctls__2, i64 -1) + %ctls__3 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %60 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ctls__3, i64 0) + %61 = bitcast i8* %60 to %Qubit** + store %Qubit* %c, %Qubit** %61 + call void @__quantum__rt__array_update_alias_count(%Array* %ctls__3, i64 1) + %62 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 1) + %63 = bitcast i8* %62 to %Qubit** + %qb__25 = load %Qubit*, %Qubit** %63 + call void @__quantum__qis__single_qubit_op_ctl(i64 10, i64 1, %Array* %ctls__3, %Qubit* %qb__25) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls__3, i64 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %ctls__3, i64 -1) + %ctls__4 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %64 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ctls__4, i64 0) + %65 = bitcast i8* %64 to %Qubit** + store %Qubit* %c, %Qubit** %65 + call void @__quantum__rt__array_update_alias_count(%Array* %ctls__4, i64 1) + %66 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 0) + %67 = bitcast i8* %66 to %Qubit** + %qb__26 = load %Qubit*, %Qubit** %67 + call void @__quantum__qis__single_qubit_op_ctl(i64 20, i64 1, %Array* %ctls__4, %Qubit* %qb__26) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls__4, i64 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %ctls__4, i64 -1) + %ctls__5 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %68 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ctls__5, i64 0) + %69 = bitcast i8* %68 to %Qubit** + store %Qubit* %c, %Qubit** %69 + call void @__quantum__rt__array_update_alias_count(%Array* %ctls__5, i64 1) + %70 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 1) + %71 = bitcast i8* %70 to %Qubit** + %qb__27 = load %Qubit*, %Qubit** %71 + call void @__quantum__qis__single_qubit_op_ctl(i64 22, i64 1, %Array* %ctls__5, %Qubit* %qb__27) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls__5, i64 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %ctls__5, i64 -1) + %ctls__6 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %72 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ctls__6, i64 0) + %73 = bitcast i8* %72 to %Qubit** + store %Qubit* %c, %Qubit** %73 + call void @__quantum__rt__array_update_alias_count(%Array* %ctls__6, i64 1) + %74 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 2) + %75 = bitcast i8* %74 to %Qubit** + %qb__28 = load %Qubit*, %Qubit** %75 + call void @__quantum__qis__single_qubit_op_ctl(i64 25, i64 1, %Array* %ctls__6, %Qubit* %qb__28) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls__6, i64 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %ctls__6, i64 -1) + %ctls__7 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %76 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ctls__7, i64 0) + %77 = bitcast i8* %76 to %Qubit** + store %Qubit* %c, %Qubit** %77 + call void @__quantum__rt__array_update_alias_count(%Array* %ctls__7, i64 1) + %78 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 1) + %79 = bitcast i8* %78 to %Qubit** + %qb__29 = load %Qubit*, %Qubit** %79 + call void @__quantum__rt__array_update_alias_count(%Array* %ctls__7, i64 1) + call void @__quantum__qis__single_qubit_op_ctl(i64 16, i64 1, %Array* %ctls__7, %Qubit* %qb__29) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls__7, i64 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls__7, i64 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %ctls__7, i64 -1) + %ctls__9 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %80 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ctls__9, i64 0) + %81 = bitcast i8* %80 to %Qubit** + store %Qubit* %c, %Qubit** %81 + call void @__quantum__rt__array_update_alias_count(%Array* %ctls__9, i64 1) + %82 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 2) + %83 = bitcast i8* %82 to %Qubit** + %qb__31 = load %Qubit*, %Qubit** %83 + call void @__quantum__rt__array_update_alias_count(%Array* %ctls__9, i64 1) + call void @__quantum__qis__single_qubit_op_ctl(i64 12, i64 1, %Array* %ctls__9, %Qubit* %qb__31) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls__9, i64 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls__9, i64 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %ctls__9, i64 -1) + call void @__quantum__rt__qubit_release(%Qubit* %c) + %cc = call %Array* @__quantum__rt__qubit_allocate_array(i64 2) + call void @__quantum__rt__array_update_alias_count(%Array* %cc, i64 1) + call void @__quantum__rt__array_update_alias_count(%Array* %cc, i64 1) + %84 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 0) + %85 = bitcast i8* %84 to %Qubit** + %qb__33 = load %Qubit*, %Qubit** %85 + %86 = call i64 @__quantum__rt__array_get_size_1d(%Array* %cc) + %87 = icmp eq i64 %86, 1 + br i1 %87, label %then0__4, label %else__4 + +then0__4: ; preds = %continue__3 + call void @__quantum__qis__single_qubit_op_ctl(i64 1, i64 1, %Array* %cc, %Qubit* %qb__33) + br label %continue__4 + +else__4: ; preds = %continue__3 + call void @__quantum__qis__single_qubit_op_ctl(i64 2, i64 1, %Array* %cc, %Qubit* %qb__33) + br label %continue__4 + +continue__4: ; preds = %else__4, %then0__4 + call void @__quantum__rt__array_update_alias_count(%Array* %cc, i64 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %cc, i64 1) + %88 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 0) + %89 = bitcast i8* %88 to %Qubit** + %qb__34 = load %Qubit*, %Qubit** %89 + %90 = icmp eq i64 %86, 1 + br i1 %90, label %then0__5, label %else__5 + +then0__5: ; preds = %continue__4 + call void @__quantum__qis__single_qubit_op_ctl(i64 4, i64 1, %Array* %cc, %Qubit* %qb__34) + br label %continue__5 + +else__5: ; preds = %continue__4 + call void @__quantum__qis__single_qubit_op_ctl(i64 5, i64 1, %Array* %cc, %Qubit* %qb__34) + br label %continue__5 + +continue__5: ; preds = %else__5, %then0__5 + call void @__quantum__rt__array_update_alias_count(%Array* %cc, i64 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %cc, i64 1) + %91 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 1) + %92 = bitcast i8* %91 to %Qubit** + %qb__35 = load %Qubit*, %Qubit** %92 + %93 = icmp eq i64 %86, 1 + br i1 %93, label %then0__6, label %else__6 + +then0__6: ; preds = %continue__5 + call void @__quantum__qis__single_qubit_op_ctl(i64 7, i64 1, %Array* %cc, %Qubit* %qb__35) + br label %continue__6 + +else__6: ; preds = %continue__5 + call void @__quantum__qis__single_qubit_op_ctl(i64 8, i64 1, %Array* %cc, %Qubit* %qb__35) + br label %continue__6 + +continue__6: ; preds = %else__6, %then0__6 + call void @__quantum__rt__array_update_alias_count(%Array* %cc, i64 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %cc, i64 1) + %94 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 1) + %95 = bitcast i8* %94 to %Qubit** + %qb__36 = load %Qubit*, %Qubit** %95 + call void @__quantum__qis__single_qubit_op_ctl(i64 10, i64 1, %Array* %cc, %Qubit* %qb__36) + call void @__quantum__rt__array_update_alias_count(%Array* %cc, i64 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %cc, i64 1) + %96 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 0) + %97 = bitcast i8* %96 to %Qubit** + %qb__37 = load %Qubit*, %Qubit** %97 + call void @__quantum__qis__single_qubit_op_ctl(i64 20, i64 1, %Array* %cc, %Qubit* %qb__37) + call void @__quantum__rt__array_update_alias_count(%Array* %cc, i64 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %cc, i64 1) + %98 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 1) + %99 = bitcast i8* %98 to %Qubit** + %qb__38 = load %Qubit*, %Qubit** %99 + call void @__quantum__qis__single_qubit_op_ctl(i64 22, i64 1, %Array* %cc, %Qubit* %qb__38) + call void @__quantum__rt__array_update_alias_count(%Array* %cc, i64 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %cc, i64 1) + %100 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 2) + %101 = bitcast i8* %100 to %Qubit** + %qb__39 = load %Qubit*, %Qubit** %101 + call void @__quantum__qis__single_qubit_op_ctl(i64 25, i64 1, %Array* %cc, %Qubit* %qb__39) + call void @__quantum__rt__array_update_alias_count(%Array* %cc, i64 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %cc, i64 1) + %102 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 1) + %103 = bitcast i8* %102 to %Qubit** + %qb__40 = load %Qubit*, %Qubit** %103 + call void @__quantum__rt__array_update_alias_count(%Array* %cc, i64 1) + call void @__quantum__qis__single_qubit_op_ctl(i64 16, i64 1, %Array* %cc, %Qubit* %qb__40) + call void @__quantum__rt__array_update_alias_count(%Array* %cc, i64 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %cc, i64 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %cc, i64 1) + %104 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 2) + %105 = bitcast i8* %104 to %Qubit** + %qb__42 = load %Qubit*, %Qubit** %105 + call void @__quantum__rt__array_update_alias_count(%Array* %cc, i64 1) + call void @__quantum__qis__single_qubit_op_ctl(i64 12, i64 1, %Array* %cc, %Qubit* %qb__42) + call void @__quantum__rt__array_update_alias_count(%Array* %cc, i64 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %cc, i64 -1) + call void @__quantum__rt__qubit_release_array(%Array* %cc) + call void @__quantum__rt__array_update_alias_count(%Array* %cc, i64 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %cc, i64 -1) + call void @__quantum__rt__qubit_release_array(%Array* %qs) + call void @__quantum__rt__array_update_alias_count(%Array* %qs, i64 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qs, i64 -1) + ret void +} + +declare %Qubit* @__quantum__rt__qubit_allocate() + +declare %Array* @__quantum__rt__qubit_allocate_array(i64) + +declare void @__quantum__qis__inject_barrier(i64, i64) + +declare void @__quantum__rt__qubit_release(%Qubit*) + +declare void @__quantum__rt__qubit_release_array(%Array*) + +define void @Microsoft__Quantum__Testing__Tracer__TestMeasurements__body(i1 %compare) { +entry: + %qs = call %Array* @__quantum__rt__qubit_allocate_array(i64 3) + call void @__quantum__rt__array_update_alias_count(%Array* %qs, i64 1) + %0 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 0) + %1 = bitcast i8* %0 to %Qubit** + %qb = load %Qubit*, %Qubit** %1 + %r0 = call %Result* @__quantum__qis__single_qubit_measure(i64 100, i64 1, %Qubit* %qb) + %qs12 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 2) + %2 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs12, i64 0) + %3 = bitcast i8* %2 to %Qubit** + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs12, i64 1) + %5 = bitcast i8* %4 to %Qubit** + %6 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 1) + %7 = bitcast i8* %6 to %Qubit** + %8 = load %Qubit*, %Qubit** %7 + %9 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 2) + %10 = bitcast i8* %9 to %Qubit** + %11 = load %Qubit*, %Qubit** %10 + store %Qubit* %8, %Qubit** %3 + store %Qubit* %11, %Qubit** %5 + call void @__quantum__rt__array_update_alias_count(%Array* %qs12, i64 1) + %paulis = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 2) + %12 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 0) + %13 = bitcast i8* %12 to i2* + %14 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 1) + %15 = bitcast i8* %14 to i2* + %16 = load i2, i2* @PauliY + %17 = load i2, i2* @PauliX + store i2 %16, i2* %13 + store i2 %17, i2* %15 + call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i64 1) + call void @__quantum__rt__array_update_alias_count(%Array* %qs12, i64 1) + %18 = load %Result*, %Result** @ResultOne + %res = alloca %Result* + store %Result* %18, %Result** %res + call void @__quantum__rt__result_update_reference_count(%Result* %18, i64 1) + %haveY = alloca i1 + store i1 false, i1* %haveY + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %i = phi i64 [ 0, %entry ], [ %31, %exiting__1 ] + %19 = icmp sle i64 %i, 1 + br i1 %19, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %20 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 %i) + %21 = bitcast i8* %20 to i2* + %22 = load i2, i2* %21 + %23 = load i2, i2* @PauliY + %24 = icmp eq i2 %22, %23 + %25 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 %i) + %26 = bitcast i8* %25 to i2* + %27 = load i2, i2* %26 + %28 = load i2, i2* @PauliI + %29 = icmp eq i2 %27, %28 + %30 = or i1 %24, %29 + br i1 %30, label %then0__1, label %continue__1 + +then0__1: ; preds = %body__1 + store i1 true, i1* %haveY + br label %continue__1 + +continue__1: ; preds = %then0__1, %body__1 + br label %exiting__1 + +exiting__1: ; preds = %continue__1 + %31 = add i64 %i, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + %32 = load i1, i1* %haveY + br i1 %32, label %then0__2, label %test1__1 + +then0__2: ; preds = %exit__1 + %33 = call %Result* @__quantum__qis__joint_measure(i64 106, i64 1, %Array* %qs12) + call void @__quantum__rt__result_update_reference_count(%Result* %33, i64 1) + store %Result* %33, %Result** %res + call void @__quantum__rt__result_update_reference_count(%Result* %33, i64 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %18, i64 -1) + br label %continue__2 + +test1__1: ; preds = %exit__1 + br i1 false, label %then1__1, label %test2__1 + +then1__1: ; preds = %test1__1 + %34 = call %Result* @__quantum__qis__joint_measure(i64 107, i64 1, %Array* %qs12) + call void @__quantum__rt__result_update_reference_count(%Result* %34, i64 1) + %35 = load %Result*, %Result** %res + store %Result* %34, %Result** %res + call void @__quantum__rt__result_update_reference_count(%Result* %34, i64 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %35, i64 -1) + br label %continue__2 + +test2__1: ; preds = %test1__1 + br i1 false, label %then2__1, label %test3__1 + +then2__1: ; preds = %test2__1 + %36 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 0) + %37 = bitcast i8* %36 to i2* + %38 = load i2, i2* %37 + %39 = load i2, i2* @PauliX + %40 = icmp eq i2 %38, %39 + br i1 %40, label %then0__3, label %else__1 + +then0__3: ; preds = %then2__1 + %41 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs12, i64 0) + %42 = bitcast i8* %41 to %Qubit** + %qb__2 = load %Qubit*, %Qubit** %42 + %43 = call %Result* @__quantum__qis__single_qubit_measure(i64 101, i64 1, %Qubit* %qb__2) + call void @__quantum__rt__result_update_reference_count(%Result* %43, i64 1) + %44 = load %Result*, %Result** %res + store %Result* %43, %Result** %res + call void @__quantum__rt__result_update_reference_count(%Result* %43, i64 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %44, i64 -1) + br label %continue__3 + +else__1: ; preds = %then2__1 + %45 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs12, i64 0) + %46 = bitcast i8* %45 to %Qubit** + %qb__3 = load %Qubit*, %Qubit** %46 + %47 = call %Result* @__quantum__qis__single_qubit_measure(i64 100, i64 1, %Qubit* %qb__3) + call void @__quantum__rt__result_update_reference_count(%Result* %47, i64 1) + %48 = load %Result*, %Result** %res + store %Result* %47, %Result** %res + call void @__quantum__rt__result_update_reference_count(%Result* %47, i64 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %48, i64 -1) + br label %continue__3 + +continue__3: ; preds = %else__1, %then0__3 + br label %continue__2 + +test3__1: ; preds = %test2__1 + %49 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 0) + %50 = bitcast i8* %49 to i2* + %51 = load i2, i2* %50 + %52 = load i2, i2* @PauliX + %53 = icmp eq i2 %51, %52 + %54 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 1) + %55 = bitcast i8* %54 to i2* + %56 = load i2, i2* %55 + %57 = load i2, i2* @PauliX + %58 = icmp eq i2 %56, %57 + %59 = and i1 %53, %58 + br i1 %59, label %then3__1, label %test4__1 + +then3__1: ; preds = %test3__1 + %60 = call %Result* @__quantum__qis__joint_measure(i64 108, i64 1, %Array* %qs12) + call void @__quantum__rt__result_update_reference_count(%Result* %60, i64 1) + %61 = load %Result*, %Result** %res + store %Result* %60, %Result** %res + call void @__quantum__rt__result_update_reference_count(%Result* %60, i64 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %61, i64 -1) + br label %continue__2 + +test4__1: ; preds = %test3__1 + %62 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 0) + %63 = bitcast i8* %62 to i2* + %64 = load i2, i2* %63 + %65 = load i2, i2* @PauliX + %66 = icmp eq i2 %64, %65 + %67 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 1) + %68 = bitcast i8* %67 to i2* + %69 = load i2, i2* %68 + %70 = load i2, i2* @PauliZ + %71 = icmp eq i2 %69, %70 + %72 = and i1 %66, %71 + br i1 %72, label %then4__1, label %test5__1 + +then4__1: ; preds = %test4__1 + %73 = call %Result* @__quantum__qis__joint_measure(i64 109, i64 1, %Array* %qs12) + call void @__quantum__rt__result_update_reference_count(%Result* %73, i64 1) + %74 = load %Result*, %Result** %res + store %Result* %73, %Result** %res + call void @__quantum__rt__result_update_reference_count(%Result* %73, i64 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %74, i64 -1) + br label %continue__2 + +test5__1: ; preds = %test4__1 + %75 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 0) + %76 = bitcast i8* %75 to i2* + %77 = load i2, i2* %76 + %78 = load i2, i2* @PauliZ + %79 = icmp eq i2 %77, %78 + %80 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 1) + %81 = bitcast i8* %80 to i2* + %82 = load i2, i2* %81 + %83 = load i2, i2* @PauliX + %84 = icmp eq i2 %82, %83 + %85 = and i1 %79, %84 + br i1 %85, label %then5__1, label %test6__1 + +then5__1: ; preds = %test5__1 + %86 = call %Result* @__quantum__qis__joint_measure(i64 110, i64 1, %Array* %qs12) + call void @__quantum__rt__result_update_reference_count(%Result* %86, i64 1) + %87 = load %Result*, %Result** %res + store %Result* %86, %Result** %res + call void @__quantum__rt__result_update_reference_count(%Result* %86, i64 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %87, i64 -1) + br label %continue__2 + +test6__1: ; preds = %test5__1 + %88 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 0) + %89 = bitcast i8* %88 to i2* + %90 = load i2, i2* %89 + %91 = load i2, i2* @PauliZ + %92 = icmp eq i2 %90, %91 + %93 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 1) + %94 = bitcast i8* %93 to i2* + %95 = load i2, i2* %94 + %96 = load i2, i2* @PauliZ + %97 = icmp eq i2 %95, %96 + %98 = and i1 %92, %97 + br i1 %98, label %then6__1, label %continue__2 + +then6__1: ; preds = %test6__1 + %99 = call %Result* @__quantum__qis__joint_measure(i64 111, i64 1, %Array* %qs12) + call void @__quantum__rt__result_update_reference_count(%Result* %99, i64 1) + %100 = load %Result*, %Result** %res + store %Result* %99, %Result** %res + call void @__quantum__rt__result_update_reference_count(%Result* %99, i64 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %100, i64 -1) + br label %continue__2 + +continue__2: ; preds = %then6__1, %test6__1, %then5__1, %then4__1, %then3__1, %continue__3, %then1__1, %then0__2 + %r12 = load %Result*, %Result** %res + call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i64 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qs12, i64 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %paulis, i64 -1) + br i1 %compare, label %then0__4, label %continue__4 + +then0__4: ; preds = %continue__2 + %101 = load %Result*, %Result** @ResultZero + %102 = call i1 @__quantum__rt__result_equal(%Result* %r0, %Result* %101) + br i1 %102, label %then0__5, label %continue__5 + +then0__5: ; preds = %then0__4 + %103 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qs, i64 1) + %104 = bitcast i8* %103 to %Qubit** + %qb__4 = load %Qubit*, %Qubit** %104 + call void @__quantum__qis__single_qubit_op(i64 0, i64 1, %Qubit* %qb__4) + br label %continue__5 + +continue__5: ; preds = %then0__5, %then0__4 + br label %continue__4 + +continue__4: ; preds = %continue__5, %continue__2 + call void @__quantum__rt__qubit_release_array(%Array* %qs) + call void @__quantum__rt__array_update_alias_count(%Array* %qs, i64 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qs12, i64 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qs, i64 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %r0, i64 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qs12, i64 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %r12, i64 -1) + ret void +} + +declare i1 @__quantum__rt__result_equal(%Result*, %Result*) + +define { %String* }* @Microsoft__Quantum__Targeting__TargetInstruction__body(%String* %__Item1__) { +entry: + %0 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64)) + %1 = bitcast %Tuple* %0 to { %String* }* + %2 = getelementptr { %String* }, { %String* }* %1, i64 0, i32 0 + store %String* %__Item1__, %String** %2 + call void @__quantum__rt__string_update_reference_count(%String* %__Item1__, i64 1) + ret { %String* }* %1 +} + +declare %Tuple* @__quantum__rt__tuple_create(i64) + +declare void @__quantum__rt__string_update_reference_count(%String*, i64) diff --git a/src/QirRuntime/test/QIR-tracer/tracer-target.qs b/src/QirRuntime/test/QIR-tracer/tracer-target.qs new file mode 100644 index 00000000000..19f7d51abde --- /dev/null +++ b/src/QirRuntime/test/QIR-tracer/tracer-target.qs @@ -0,0 +1,251 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +namespace Microsoft.Quantum.Instructions { + + operation single_qubit_op(op_id: Int, duration: Int, qb : Qubit) : Unit { + body intrinsic; + } + + operation multi_qubit_op(op_id: Int, duration: Int, qbs : Qubit[]) : Unit { + body intrinsic; + } + + operation single_qubit_op_ctl(op_id: Int, duration: Int, ctl : Qubit[], qb : Qubit) : Unit { + body intrinsic; + } + + operation multi_qubit_op_ctl(op_id: Int, duration: Int, ctl : Qubit[], qbs : Qubit[]) : Unit { + body intrinsic; + } + + operation single_qubit_measure(op_id: Int, duration: Int, qb : Qubit) : Result { + body intrinsic; + } + + operation joint_measure(op_id: Int, duration: Int, qbs : Qubit[]) : Result { + body intrinsic; + } + + // Operations, used in Hadamard frame tracking + @Inline() + operation Tz(qb : Qubit) : Unit + is Adj + Ctl { + body (...) { single_qubit_op(11, 1, qb); } + adjoint (...) { single_qubit_op(11, 1, qb); } + controlled (ctls, ...) { single_qubit_op_ctl(12, 1, ctls, qb); } + controlled adjoint (ctls, ...) { single_qubit_op_ctl(12, 1, ctls, qb); } + } + + @Inline() + operation Tx(qb : Qubit) : Unit + is Adj + Ctl { + body (...) { single_qubit_op(13, 1, qb); } + adjoint (...) { single_qubit_op(13, 1, qb); } + controlled (ctls, ...) { single_qubit_op_ctl(14, 1, ctls, qb); } + controlled adjoint (ctls, ...) { single_qubit_op_ctl(14, 1, ctls, qb); } + } + + + @Inline() + operation Sz(qb : Qubit) : Unit + is Adj + Ctl { + body (...) { single_qubit_op(15, 1, qb); } + adjoint (...) { single_qubit_op(15, 1, qb); } + controlled (ctls, ...) { single_qubit_op_ctl(16, 1, ctls, qb); } + controlled adjoint (ctls, ...) { single_qubit_op_ctl(16, 1, ctls, qb); } + } + + @Inline() + operation Sx(qb : Qubit) : Unit + is Adj + Ctl { + body (...) { single_qubit_op(17, 1, qb); } + adjoint (...) { single_qubit_op(17, 1, qb); } + controlled (ctls, ...) { single_qubit_op_ctl(18, 1, ctls, qb); } + controlled adjoint (ctls, ...) { single_qubit_op_ctl(18, 1, ctls, qb); } + } + + @Inline() + operation Mz(qb : Qubit) : Result { + body (...) { return single_qubit_measure(100, 1, qb); } + } + + @Inline() + operation Mx(qb : Qubit) : Result { + body (...) { return single_qubit_measure(101, 1, qb); } + } + + @Inline() + operation Mzz(qubits : Qubit[]) : Result { + body (...) { return joint_measure(102, 1, qubits); } + } + + @Inline() + operation Mxz(qubits : Qubit[]) : Result { + body (...) { return joint_measure(103, 1, qubits); } + } + + @Inline() + operation Mzx(qubits : Qubit[]) : Result { + body (...) { return joint_measure(104, 1, qubits); } + } + + @Inline() + operation Mxx(qubits : Qubit[]) : Result { + body (...) { return joint_measure(105, 1, qubits); } + } +} + +namespace Microsoft.Quantum.Tracer { + + @TargetInstruction("inject_global_barrier") + operation Barrier(id : Int, duration : Int) : Unit { + body intrinsic; + } +} + +namespace Microsoft.Quantum.Intrinsic { + + open Microsoft.Quantum.Core; + open Microsoft.Quantum.Instructions as Phys; + open Microsoft.Quantum.Targeting; + + @Inline() + operation X(qb : Qubit) : Unit + is Adj + Ctl { + body (...) { Phys.single_qubit_op(0, 1, qb); } + adjoint self; + controlled (ctls, ...) { + if Length(ctls) == 1 { Phys.single_qubit_op_ctl(1, 1, ctls, qb); } + else { Phys.single_qubit_op_ctl(2, 1, ctls, qb); } + } + } + + operation CNOT(control : Qubit, target : Qubit) : Unit + is Adj + Ctl { + body (...) { Controlled X([control], target); } + adjoint self; + controlled (ctls, ...) { Controlled X(ctls + control, target); } + } + + @Inline() + operation Y(qb : Qubit) : Unit + is Adj + Ctl{ + body (...) { Phys.single_qubit_op(3, 1, qb); } + adjoint self; + controlled (ctls, ...) { + if Length(ctls) == 1 { Phys.single_qubit_op_ctl(4, 1, ctls, qb); } + else { Phys.single_qubit_op_ctl(5, 1, ctls, qb); } + } + } + + @Inline() + operation Z(qb : Qubit) : Unit + is Adj + Ctl { + body (...) { Phys.single_qubit_op(6, 1, qb); } + adjoint self; + controlled (ctls, ...) { + if Length(ctls) == 1 { Phys.single_qubit_op_ctl(7, 1, ctls, qb); } + else { Phys.single_qubit_op_ctl(8, 1, ctls, qb); } + } + } + + @Inline() + operation H(qb : Qubit) : Unit + is Adj + Ctl { + body (...) { Phys.single_qubit_op(9, 1, qb); } + adjoint self; + controlled (ctls, ...) { Phys.single_qubit_op_ctl(10, 1, ctls, qb); } + } + + @Inline() + operation T(qb : Qubit) : Unit + is Adj + Ctl { + body (...) { Phys.Tz(qb); } + adjoint (...) { Adjoint Phys.Tz(qb); } + controlled (ctls, ...) { Controlled Phys.Tz(ctls, qb); } + controlled adjoint (ctls, ...) { Controlled Adjoint Phys.Tz(ctls, qb); } + } + + @Inline() + operation S(qb : Qubit) : Unit + is Adj + Ctl { + body (...) { Phys.Sz(qb); } + adjoint (...) { Adjoint Phys.Sz(qb); } + controlled (ctls, ...) { Controlled Phys.Sz(ctls, qb); } + controlled adjoint (ctls, ...) { Controlled Adjoint Phys.Sz(ctls, qb); } + } + + @Inline() + operation Rx(theta : Double, qb : Qubit) : Unit + is Adj + Ctl { + body (...) { Phys.single_qubit_op(19, 1, qb); } + adjoint (...) { Phys.single_qubit_op(19, 1, qb); } + controlled (ctls, ...) { Phys.single_qubit_op_ctl(20, 1, ctls, qb); } + controlled adjoint (ctls, ...) { Phys.single_qubit_op_ctl(20, 1, ctls, qb); } + } + + @Inline() + operation Ry(theta : Double, qb : Qubit) : Unit + is Adj + Ctl { + body (...) { Phys.single_qubit_op(21, 1, qb); } + adjoint (...) { Phys.single_qubit_op(21, 1, qb); } + controlled (ctls, ...) { Phys.single_qubit_op_ctl(22, 1, ctls, qb); } + controlled adjoint (ctls, ...) { Phys.single_qubit_op_ctl(22, 1, ctls, qb); } + } + + @Inline() + operation Rz(theta : Double, qb : Qubit) : Unit + is Adj + Ctl { + body (...) { Phys.single_qubit_op(23, 1, qb); } + adjoint (...) { Phys.single_qubit_op(24, 1, qb); } + controlled (ctls, ...) { Phys.single_qubit_op_ctl(25, 1, ctls, qb); } + controlled adjoint (ctls, ...) { Phys.single_qubit_op_ctl(25, 1, ctls, qb); } + } + + @Inline() + operation M(qb : Qubit) : Result { + body (...) { return Phyz.Mz(qb); } + } + + @Inline() + operation Measure(paulis : Pauli[], qubits : Qubit[]) : Result { + body (...) { + mutable res = One; + mutable haveY = false; + // Measurements that involve PauliY or PauliI + for i in 0..Length(paulis)-1 { + if paulis[i] == PauliY or paulis[i] == PauliI { + set haveY = true; + } + } + if haveY { set res = Phys.joint_measure(106, 1, qubits); } + + // More than two qubits (but no PauliY or PauliI) + elif Length(paulis) > 2 { set res = Phys.joint_measure(107, 1, qubits); } + + // Single qubit measurement -- differentiate between Mx and Mz + elif Length(paulis) == 1 { + if (paulis[0] == PauliX) { set res = Mx(qubits[0]); } + else { set res = Mz(qubits[0]); } + } + + // Specialize for two-qubit measurements: Mxx, Mxz, Mzx, Mzz + elif paulis[0] == PauliX and paulis[1] == PauliX { set res = Phys.Mxx(qubits); } + elif paulis[0] == PauliX and paulis[1] == PauliZ { set res = Phys.Mxz(qubits); } + elif paulis[0] == PauliZ and paulis[1] == PauliX { set res = Phys.Mzx(qubits); } + elif paulis[0] == PauliZ and paulis[1] == PauliZ { set res = Phys.Mzz(qubits); } + + //shouldn't get here + return res; + } + } + + // operation SWAP(a : Qubit, b : Qubit) : Unit + // is Adj { + // body intrinsic; + // adjoint self; + // } + + +} diff --git a/src/QirRuntime/test/unittests/CMakeLists.txt b/src/QirRuntime/test/unittests/CMakeLists.txt index a6fb9a30662..22b2bc2802f 100644 --- a/src/QirRuntime/test/unittests/CMakeLists.txt +++ b/src/QirRuntime/test/unittests/CMakeLists.txt @@ -5,18 +5,21 @@ add_executable(qir-runtime-unittests driver.cpp QirRuntimeTests.cpp ToffoliTests.cpp + TracerTests.cpp ) target_link_libraries(qir-runtime-unittests PUBLIC qir-rt-support qir-qis-support simulators + tracer ) target_include_directories(qir-runtime-unittests PUBLIC "${test_includes}" ${public_includes} "${PROJECT_SOURCE_DIR}/lib/QIR" + "${PROJECT_SOURCE_DIR}/lib/Tracer" ) install(TARGETS qir-runtime-unittests RUNTIME DESTINATION "${CMAKE_BINARY_DIR}/bin") add_unit_test(qir-runtime-unittests) diff --git a/src/QirRuntime/test/unittests/TracerTests.cpp b/src/QirRuntime/test/unittests/TracerTests.cpp new file mode 100644 index 00000000000..37f8bc57ad1 --- /dev/null +++ b/src/QirRuntime/test/unittests/TracerTests.cpp @@ -0,0 +1,305 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +#include +#include +#include + +#include "catch.hpp" + +#include "CoreTypes.hpp" +#include "tracer.hpp" + +using namespace std; +using namespace Microsoft::Quantum; + +TEST_CASE("Layering distinct single-qubit operations of non-zero durations", "[tracer]") +{ + shared_ptr tr = CreateTracer(3 /*layer duration*/); + + Qubit q1 = tr->AllocateQubit(); + Qubit q2 = tr->AllocateQubit(); + Qubit q3 = tr->AllocateQubit(); + + CHECK(0 == tr->TraceSingleQubitOp(1, 1, q1)); // L(0,3) should be created + CHECK(0 == tr->TraceSingleQubitOp(2, 2, q1)); // add the op into L(0,3) + CHECK(0 == tr->TraceSingleQubitOp(3, 1, q2)); // add the op into L(0,3) + CHECK(1 == tr->TraceSingleQubitOp(4, 3, q2)); // create new layer L(3,3) + CHECK(2 == tr->TraceSingleQubitOp(5, 4, q2)); // long op! create new layer L(6,4) + CHECK(1 == tr->TraceSingleQubitOp(6, 2, q1)); // add the op into L(3,3) + CHECK(0 == tr->TraceSingleQubitOp(7, 1, q3)); // add the op into L(0,3) + CHECK(2 == tr->TraceSingleQubitOp(8, 4, q3)); // long op! but fits into existing L(6,4) + CHECK(3 == tr->TraceSingleQubitOp(9, 5, q1)); // long op! add the op into L(10,5) + + const vector& layers = tr->UseLayers(); + REQUIRE(layers.size() == 4); + CHECK(layers[0].startTime == 0); + CHECK(layers[0].operations.size() == 4); + CHECK(layers[1].startTime == 3); + CHECK(layers[1].operations.size() == 2); + CHECK(layers[2].startTime == 6); + CHECK(layers[2].operations.size() == 2); + CHECK(layers[3].startTime == 10); + CHECK(layers[3].operations.size() == 1); +} + +TEST_CASE("Layering single-qubit operations of zero duration", "[tracer]") +{ + shared_ptr tr = CreateTracer(3 /*layer duration*/); + + Qubit q1 = tr->AllocateQubit(); + Qubit q2 = tr->AllocateQubit(); + Qubit q3 = tr->AllocateQubit(); + + CHECK(0 == tr->TraceSingleQubitOp(1, 1, q1)); // L(0,3) should be created + CHECK(0 == tr->TraceSingleQubitOp(2, 0, q1)); // add the op into L(0,3) + CHECK(INVALID == tr->TraceSingleQubitOp(3, 0, q3)); // pending zero op (will remain orphan) + CHECK(INVALID == tr->TraceSingleQubitOp(4, 0, q2)); // pending zero op + CHECK(INVALID == tr->TraceSingleQubitOp(5, 0, q2)); // another pending zero op + CHECK(0 == tr->TraceSingleQubitOp(6, 1, q2)); // add the op into L(0,3) together with the pending ones + + const vector& layers = tr->UseLayers(); + REQUIRE(layers.size() == 1); + CHECK(layers[0].operations.size() == 5); +} + +TEST_CASE("Layering distinct controlled single-qubit operations", "[tracer]") +{ + shared_ptr tr = CreateTracer(3 /*layer duration*/); + + Qubit q1 = tr->AllocateQubit(); + Qubit q2 = tr->AllocateQubit(); + Qubit q3 = tr->AllocateQubit(); + Qubit q4 = tr->AllocateQubit(); + Qubit q5 = tr->AllocateQubit(); + Qubit q6 = tr->AllocateQubit(); + + CHECK(0 == tr->TraceMultiQubitOp(1, 1, 1 /*nFirst*/, &q1 /*first*/, 1 /*nSecond*/, &q2 /*second*/)); + CHECK(0 == tr->TraceMultiQubitOp(2, 2, 0 /*nFirst*/, nullptr /*first*/, 1 /*nSecond*/, &q2 /*second*/)); + // q2 now is at the limit of the layer duration + + Qubit qs12[2] = {q1, q2}; + CHECK(1 == tr->TraceMultiQubitOp(3, 1, 0 /*nFirst*/, nullptr /*first*/, 2 /*nSecond*/, qs12 /*second*/)); + CHECK(1 == tr->TraceMultiQubitOp(4, 1, 1 /*nFirst*/, &q2 /*first*/, 1 /*nSecond*/, &q3 /*second*/)); + // because of q2, both ops should have been added to a new layer, which now "catches" q1, q2, q3 + + CHECK(0 == tr->TraceMultiQubitOp(5, 0, 1 /*nFirst*/, &q4 /*first*/, 1 /*nSecond*/, &q5 /*second*/)); + CHECK(0 == tr->TraceSingleQubitOp(6, 1, q6)); + // these ops should fall through into the first layer (notice no special handling of duration zero) + + CHECK(1 == tr->TraceMultiQubitOp(7, 1, 1 /*nFirst*/, &q1 /*first*/, 1 /*nSecond*/, &q6 /*second*/)); + CHECK(1 == tr->TraceMultiQubitOp(8, 1, 1 /*nFirst*/, &q3 /*first*/, 1 /*nSecond*/, &q4 /*second*/)); + // because of q1 and q3, thiese ops should be added into the second layer, which now has all but q5 + + CHECK(0 == tr->TraceSingleQubitOp(9, 1, q5)); + // should fall through to the first layer + + Qubit qs46[2] = {q4, q6}; + CHECK(1 == tr->TraceMultiQubitOp(10, 1, 2 /*nFirst*/, qs46 /*first*/, 1 /*nSecond*/, &q5 /*second*/)); + // because of the controls, should be added into the second layer + + const vector& layers = tr->UseLayers(); + REQUIRE(layers.size() == 2); + + CHECK(layers[0].operations.size() == 5); + const auto& ops0 = layers[0].operations; + CHECK(ops0.find(1) != ops0.end()); + CHECK(ops0.find(2) != ops0.end()); + CHECK(ops0.find(5) != ops0.end()); + CHECK(ops0.find(6) != ops0.end()); + CHECK(ops0.find(9) != ops0.end()); + + CHECK(layers[1].operations.size() == 5); + const auto& ops1 = layers[1].operations; + CHECK(ops1.find(3) != ops1.end()); + CHECK(ops1.find(4) != ops1.end()); + CHECK(ops1.find(7) != ops1.end()); + CHECK(ops1.find(8) != ops1.end()); + CHECK(ops1.find(10) != ops1.end()); +} + +// TODO: add multi-qubit ops +TEST_CASE("Operations with same id are counted together", "[tracer]") +{ + shared_ptr tr = CreateTracer(3 /*layer duration*/); + + Qubit q1 = tr->AllocateQubit(); + Qubit q2 = tr->AllocateQubit(); + Qubit q3 = tr->AllocateQubit(); + + // All of these ops should fit into a single layer L(0,3) + tr->TraceSingleQubitOp(1, 1, q1); + tr->TraceSingleQubitOp(2, 2, q1); + tr->TraceSingleQubitOp(1, 1, q2); + tr->TraceSingleQubitOp(2, 1, q2); + tr->TraceSingleQubitOp(1, 1, q2); + tr->TraceSingleQubitOp(3, 2, q3); + + const vector& layers = tr->UseLayers(); + REQUIRE(layers.size() == 1); + CHECK(layers[0].operations.size() == 3); + const auto& ops = layers[0].operations; + CHECK(ops.find(1)->second == 3); + CHECK(ops.find(2)->second == 2); + CHECK(ops.find(3)->second == 1); +} + +TEST_CASE("Global barrier", "[tracer]") +{ + shared_ptr tr = CreateTracer(2 /*layer duration*/); + + Qubit q1 = tr->AllocateQubit(); + Qubit q2 = tr->AllocateQubit(); + Qubit q3 = tr->AllocateQubit(); + Qubit q4 = tr->AllocateQubit(); + + CHECK(0 == tr->TraceSingleQubitOp(1, 4, q1)); // L(0,4) created + CHECK(0 == tr->TraceSingleQubitOp(2, 1, q4)); // added to L(0,4) + CHECK(1 == tr->InjectGlobalBarrier(42, 1)); // creates L(4,2) + + CHECK(2 == tr->TraceMultiQubitOp(3, 1, 1 /*nFirst*/, &q2 /*first*/, 1 /*nSecond*/, &q3 /*second*/)); + // the barrier shouldn't allow this op to fall through into L(0,4), so should create L(6,2) + + CHECK(INVALID == tr->TraceSingleQubitOp(4, 0, q1)); + // the barrier shouldn't allow this op to fall through into L(0,4), so should create pending op + + CHECK(2 == tr->TraceSingleQubitOp(5, 1, q1)); + // should be added into L(6,2) together with the pending op `3` + + CHECK(3 == tr->TraceSingleQubitOp(6, 3, q2)); + // long op, with no existing wide layers to host it, so should create L(8,3) + + CHECK(3 == tr->TraceSingleQubitOp(7, 3, q4)); + // long op but can be added into L(8,3), which is post the barrier + + const vector& layers = tr->UseLayers(); + REQUIRE(layers.size() == 4); + CHECK(layers[0].operations.size() == 2); + CHECK(layers[1].operations.size() == 0); + CHECK(layers[2].operations.size() == 3); + CHECK(layers[3].operations.size() == 2); + + const auto& ops0 = layers[0].operations; + CHECK(ops0.find(1) != ops0.end()); + CHECK(ops0.find(2) != ops0.end()); + + CHECK(42 == layers[1].barrierId); + + const auto& ops2 = layers[2].operations; + CHECK(ops2.find(3) != ops2.end()); + CHECK(ops2.find(4) != ops2.end()); + CHECK(ops2.find(5) != ops2.end()); + + const auto& ops3 = layers[3].operations; + CHECK(ops3.find(6) != ops3.end()); + CHECK(ops3.find(7) != ops3.end()); +} + +// For layering purposes, measurements behave pretty much the same as other operations +TEST_CASE("Layering measurements", "[tracer]") +{ + shared_ptr tr = CreateTracer(1 /*layer duration*/); + + Qubit q1 = tr->AllocateQubit(); + Qubit q2 = tr->AllocateQubit(); + Qubit q3 = tr->AllocateQubit(); + Qubit q4 = tr->AllocateQubit(); + + CHECK(0 == tr->GetLayerIdOfSourceMeasurement(tr->TraceSingleQubitMeasurement(1, 1, q1))); + Qubit qs12[2] = {q1, q2}; + CHECK(1 == tr->GetLayerIdOfSourceMeasurement(tr->TraceMultiQubitMeasurement(2, 1, 2, qs12))); + CHECK(0 == tr->TraceSingleQubitOp(3, 1, q4)); + CHECK(0 == tr->GetLayerIdOfSourceMeasurement(tr->TraceSingleQubitMeasurement(4, 1, q3))); + Qubit qs23[2] = {q2, q3}; + CHECK(2 == tr->GetLayerIdOfSourceMeasurement(tr->TraceMultiQubitMeasurement(5, 1, 2, qs23))); + CHECK(1 == tr->TraceSingleQubitOp(3, 1, q4)); + + const vector& layers = tr->UseLayers(); + REQUIRE(layers.size() == 3); + CHECK(layers[0].operations.size() == 3); + CHECK(layers[1].operations.size() == 2); + CHECK(layers[2].operations.size() == 1); +} + +TEST_CASE("Output: to string", "[tracer]") +{ + std::unordered_map opNames = {{1, "X"}, {2, "Y"}, {3, "Z"}, {4, "b"}}; + shared_ptr tr = CreateTracer(1 /*layer duration*/, opNames); + + Qubit q1 = tr->AllocateQubit(); + tr->TraceSingleQubitOp(3, 1, q1); + tr->TraceSingleQubitOp(5, 1, q1); + tr->InjectGlobalBarrier(4, 2); + tr->TraceSingleQubitOp(3, 4, q1); + tr->TraceSingleQubitOp(2, 1, q1); + + { + std::stringstream out; + tr->PrintLayerMetrics(out, ",", true /*printZeroMetrics*/); + std::string metrics = out.str(); + + std::stringstream expected; + expected << "layer_id,name,Y,Z,5" << std::endl; + expected << "0,,0,1,0" << std::endl; + expected << "1,,0,0,1" << std::endl; + expected << "2,b,0,0,0" << std::endl; + expected << "4,,0,1,0" << std::endl; + expected << "8,,1,0,0" << std::endl; + + INFO(metrics); + CHECK(metrics == expected.str()); + } + + { + std::stringstream out; + tr->PrintLayerMetrics(out, ",", false /*printZeroMetrics*/); + std::string metrics = out.str(); + + std::stringstream expected; + expected << "layer_id,name,Y,Z,5" << std::endl; + expected << "0,,,1," << std::endl; + expected << "1,,,,1" << std::endl; + expected << "2,b,,," << std::endl; + expected << "4,,,1," << std::endl; + expected << "8,,1,," << std::endl; + + INFO(metrics); + CHECK(metrics == expected.str()); + } +} + +TEST_CASE("Output: to file", "[tracer]") +{ + std::unordered_map opNames = {{1, "X"}, {2, "Y"}, {3, "Z"}, {4, "b"}}; + shared_ptr tr = CreateTracer(1 /*layer duration*/, opNames); + + Qubit q1 = tr->AllocateQubit(); + tr->TraceSingleQubitOp(3, 1, q1); + tr->TraceSingleQubitOp(5, 1, q1); + tr->InjectGlobalBarrier(4, 2); + tr->TraceSingleQubitOp(3, 4, q1); + tr->TraceSingleQubitOp(2, 1, q1); + + const std::string fileName = "tracer-test.txt"; + std::ofstream out; + out.open(fileName); + tr->PrintLayerMetrics(out, "\t", false /*printZeroMetrics*/); + out.close(); + + std::ifstream in(fileName); + string line; + REQUIRE(in.is_open()); + std::string metrics(std::istreambuf_iterator{in}, {}); + in.close(); + + std::stringstream expected; + expected << "layer_id\tname\tY\tZ\t5" << std::endl; + expected << "0\t\t\t1\t" << std::endl; + expected << "1\t\t\t\t1" << std::endl; + expected << "2\tb\t\t\t" << std::endl; + expected << "4\t\t\t1\t" << std::endl; + expected << "8\t\t1\t\t" << std::endl; + + INFO(metrics); + CHECK(metrics == expected.str()); +}