From 139c9375bf36090099fcf10306731b4001c3b062 Mon Sep 17 00:00:00 2001 From: Christopher Granade Date: Mon, 13 Jan 2020 14:18:07 -0800 Subject: [PATCH 1/4] Begin preparing for API review. --- MachineLearning/src/Classification.qs | 16 ++--- MachineLearning/src/Convert.qs | 66 -------------------- MachineLearning/src/Features.qs | 9 --- MachineLearning/src/GradientEstimation.qs | 57 +++++++++-------- MachineLearning/src/InputEncoding.qs | 26 ++++++-- MachineLearning/src/{Utils.qs => Private.qs} | 2 +- MachineLearning/src/RotationSequences.qs | 46 +++++++++----- MachineLearning/src/Structure.qs | 16 ++--- MachineLearning/src/Training.qs | 8 +-- MachineLearning/src/Types.qs | 2 +- MachineLearning/src/Validation.qs | 50 +-------------- 11 files changed, 105 insertions(+), 193 deletions(-) delete mode 100644 MachineLearning/src/Convert.qs delete mode 100644 MachineLearning/src/Features.qs rename MachineLearning/src/{Utils.qs => Private.qs} (87%) diff --git a/MachineLearning/src/Classification.qs b/MachineLearning/src/Classification.qs index 771270819ca..417c7cdaedf 100644 --- a/MachineLearning/src/Classification.qs +++ b/MachineLearning/src/Classification.qs @@ -11,27 +11,27 @@ namespace Microsoft.Quantum.MachineLearning { operation _PrepareClassification( encoder : (LittleEndian => Unit is Adj + Ctl), parameters : Double[], - gates : GateSequence, + structure : SequentialClassifierStructure, target : Qubit[] ) : Unit is Adj { encoder(LittleEndian(target)); - _ApplyGates(parameters, gates, target); + ApplySequentialClassifier(parameters, structure, target); } operation EstimateClassificationProbability( - tolerance: Double, + tolerance : Double, parameters : Double[], - gates: GateSequence, - sample: Double[], + structure : SequentialClassifierStructure, + sample : Double[], nMeasurements: Int ) : Double { let nQubits = FeatureRegisterSize(sample); - let circEnc = ApproximateInputEncoder(tolerance / IntAsDouble(Length(gates!)), sample); + let circEnc = ApproximateInputEncoder(tolerance / IntAsDouble(Length(structure!)), sample); let encodedSample = StateGenerator(nQubits, circEnc); return 1.0 - EstimateFrequencyA( - _PrepareClassification(encodedSample::Apply, parameters, gates, _), + _PrepareClassification(encodedSample::Apply, parameters, structure, _), _TailMeasurement(encodedSample::NQubits), encodedSample::NQubits, nMeasurements @@ -41,7 +41,7 @@ namespace Microsoft.Quantum.MachineLearning { operation EstimateClassificationProbabilities( tolerance : Double, parameters : Double[], - structure : GateSequence, + structure : SequentialClassifierStructure, samples : Double[][], nMeasurements : Int ) diff --git a/MachineLearning/src/Convert.qs b/MachineLearning/src/Convert.qs deleted file mode 100644 index 9149c146e3b..00000000000 --- a/MachineLearning/src/Convert.qs +++ /dev/null @@ -1,66 +0,0 @@ -// Copyright (c) Microsoft Corporation. -// Licensed under the MIT License. - -namespace Microsoft.Quantum.MachineLearning { - open Microsoft.Quantum.Intrinsic; - open Microsoft.Quantum.Canon; - open Microsoft.Quantum.Math; - - function unFlattenSchedule(sc : Int[][]) : SamplingSchedule - { - mutable ret = new Range[0]; - for (flattenedRange in sc) { - set ret += [flattenedRange[0]..flattenedRange[1]..flattenedRange[2]]; - } - return SamplingSchedule(ret); - } - - function unFlattenLabeledSamples(dat:Double[][], labs:Int[]) : LabeledSample[] { - mutable cnt = MinI(Length(dat), Length(labs)); - mutable ret = new LabeledSample[cnt]; - for (j in 0..(cnt - 1)) { - set ret w/= j <- LabeledSample(dat[j], labs[j]); - } - return ret; - } - - /// Debugging prop - operation unFlattenPauli(p:Int): Pauli - { - if (p==1) - { - return PauliX; - } - if (p==2) - { - return PauliY; - } - if (p==3) - { - return PauliZ; - } - return PauliI; - } - - /// Debugging prop - /// upcasting controlled rotation in flat representation (paramIx,pauliIx,gateSpan) - operation unFlattenControlledRotation(cod:Int[]): ControlledRotation { - return ControlledRotation( - GateSpan( - cod[2], cod[3...] - ), - unFlattenPauli(cod[1]), - cod[0] - ); - } - - /// Debugging prop - operation unFlattenGateSequence(seq: Int[][]) : GateSequence { - mutable tmp = new ControlledRotation[Length(seq)]; - for (icr in 0..(Length(seq) - 1)) { - set tmp w/= icr <- unFlattenControlledRotation(seq[icr]); - } - return GateSequence(tmp); - } - -} \ No newline at end of file diff --git a/MachineLearning/src/Features.qs b/MachineLearning/src/Features.qs deleted file mode 100644 index 94b882868d0..00000000000 --- a/MachineLearning/src/Features.qs +++ /dev/null @@ -1,9 +0,0 @@ -namespace Microsoft.Quantum.MachineLearning { - open Microsoft.Quantum.Math; - open Microsoft.Quantum.Convert; - - function FeatureRegisterSize(sample : Double[]) : Int { - return Ceiling(Lg(IntAsDouble(Length(sample)))); - } - -} diff --git a/MachineLearning/src/GradientEstimation.qs b/MachineLearning/src/GradientEstimation.qs index 26fd7d039e4..d6b12e3b3a3 100644 --- a/MachineLearning/src/GradientEstimation.qs +++ b/MachineLearning/src/GradientEstimation.qs @@ -19,58 +19,61 @@ namespace Microsoft.Quantum.MachineLearning { operation _EstimateDerivativeWithParameterShift( inputEncoder : StateGenerator, - gates : GateSequence, + gates : SequentialClassifierStructure, parameters : (Double[], Double[]), nQubits : Int, nMeasurements : Int ) : Double { return EstimateRealOverlapBetweenStates( _ApplyLEOperationToRawRegister(inputEncoder::Apply, _), - _ApplyGates(Fst(parameters), gates, _), - _ApplyGates(Snd(parameters), gates, _), + ApplySequentialClassifier(Fst(parameters), gates, _), + ApplySequentialClassifier(Snd(parameters), gates, _), nQubits, nMeasurements ); } /// # Summary - /// polymorphic classical/quantum gradient estimator + /// Estimates the training gradient for a sequential classifier at a + /// particular set of parameters and for a given encoded input. /// /// # Input - /// ## param - /// circuit parameters - /// /// ## gates - /// sequence of gates in the circuits - /// + /// The structure of the sequential classifier as a sequence of quantum + /// operations. + /// ## param + /// A set of parameters for the given classifier structure. /// ## sg - /// generates quantum encoding of a subject sample (either simulated or true) - /// - /// ## measCount - /// number of true quantum measurements to estimate probabilities. - /// IMPORTANT: measCount==0 implies simulator deployment + /// An input to the sequential classifier, encoded into a state preparation + /// operation. + /// ## nMeasurements + /// The number of measurements to use in estimating the gradient. /// /// # Output - /// the gradient + /// An estimate of the training gradient at the given input and model + /// parameters. /// + /// # Remarks + /// This operation uses a Hadamard test and the parameter shift technique + /// together to estimate the gradient. operation EstimateGradient( - gates : GateSequence, + gates : SequentialClassifierStructure, param : Double[], sg : StateGenerator, nMeasurements : Int ) : (Double[]) { - //Synopsis: Suppose (param,gates) define Circ0 - //Suppose (param1,gates1) define Circ1 that implements one-gate derivative of Circ0 - //The expectation derivative is then 2 Re[] = - // Re[] - Re[] - //We observe SEE THEORY that for (Circ1)=(Circ0)' , Re[]==0 - //Thus we are left to compute Re[] = - // 1 - 1/2 < (Z \otimes Id) Circ0 psi - Circ1 psi | (Z \otimes Id) Circ0 psi - Circ1 psi> - //i.e., 1 - HadamardTestResultHack(Circ1,[Z],Circ0) + // Synopsis: Suppose (param,gates) define Circ0 + // Suppose (param1,gates1) define Circ1 that implements one-gate derivative of Circ0 + // The expectation derivative is then 2 Re[] = + // Re[] - Re[] + // We observe SEE THEORY that for (Circ1)=(Circ0)' , Re[]==0 + // Thus we are left to compute Re[] = + // 1 - 1/2 < (Z \otimes Id) Circ0 psi - Circ1 psi | (Z \otimes Id) Circ0 psi - Circ1 psi> + // i.e., 1 - HadamardTestResultHack(Circ1,[Z],Circ0) - //Now, suppose a gate at which we differentiate is the (Controlled R(\theta))([k0,k1,...,kr],[target]) - //and we want a unitary description of its \theta-derivative. It can be written as + // Now, suppose a gate at which we differentiate is the (Controlled R(\theta))([k0,k1,...,kr],[target]) + // and we want a unitary description of its \theta-derivative. It can be written as // 1/2 {(Controlled R(\theta'))([k0,k1,...,kr],[target]) - (Controlled Z)([k1,...,kr],[k0])(Controlled R(\theta'))([k0,k1,...,kr],[target])} mutable grad = ConstantArray(Length(param), 0.0); let nQubits = MaxI(NQubitsRequired(gates), sg::NQubits); @@ -91,7 +94,7 @@ namespace Microsoft.Quantum.MachineLearning { //controlled gate let controlledShift = paramShift w/ gate::Index <- (param[gate::Index] + 3.0 * PI()); - //Assumption: any rotation R has the property that R(\theta+2 Pi)=(-1).R(\theta) + // Assumption: any rotation R has the property that R(\theta + 2 Pi) = (-1) R(\theta). // NB: This the *antiderivative* of the bracket let newDer1 = _EstimateDerivativeWithParameterShift( sg, gates, (param, controlledShift), nQubits, nMeasurements diff --git a/MachineLearning/src/InputEncoding.qs b/MachineLearning/src/InputEncoding.qs index 1eb4b1db7d3..0091b8b6876 100644 --- a/MachineLearning/src/InputEncoding.qs +++ b/MachineLearning/src/InputEncoding.qs @@ -60,11 +60,26 @@ namespace Microsoft.Quantum.MachineLearning { // Reflect about the negative coefficients to apply the negative signs // at the end. for (idxNegative in negLocs) { - ReflectAboutInteger(idxNegative, reg); //TODO:REVIEW: this assumes that 2^Length(reg) is the minimal pad to Length(coefficients) + ReflectAboutInteger(idxNegative, reg); } } - function ApproximateInputEncoder(tolerance : Double,coefficients : Double[]) + /// # Summary + /// Returns the number of qubits required to encode a particular feature + /// vector. + /// + /// # Input + /// ## sample + /// A sample feature vector to be encoded into a qubit register. + /// + /// # Output + /// The size required to encode `sample` into a qubit register, expressed + /// as a number of qubits. + function FeatureRegisterSize(sample : Double[]) : Int { + return Ceiling(Lg(IntAsDouble(Length(sample)))); + } + + function ApproximateInputEncoder(tolerance : Double, coefficients : Double[]) : (LittleEndian => Unit is Adj + Ctl) { //First quantize the coefficients: for a coef x find such y*tolerance, where y is integer and |x-y*tolerance| \neq tolerance/2 let nCoefficients = Length(coefficients); @@ -93,18 +108,17 @@ namespace Microsoft.Quantum.MachineLearning { // Here, by a "few," we mean fewer than the number of qubits required // to encode features. if ((cNegative > 0) and (IntAsDouble(cNegative) < Lg(IntAsDouble(Length(coefficients))) + 1.0)) { - return _EncodeSparseNegativeInput(cNegative, tolerance, complexCoefficients, _); //TODO:MORE:ACCEPTANCE ("Wines" passing soi far) + return _EncodeSparseNegativeInput(cNegative, tolerance, complexCoefficients, _); } // Finally, we fall back to arbitrary state preparation. return ApproximatelyPrepareArbitraryState(tolerance, complexCoefficients, _); } //EncodeNoisyInput - //TODO:REVIEW: Design consideration! The implicit qubit count must be read off from the state encoder, NOT from the gate sequence! - /// Create amplitude encoding of an array of real-valued coefficients /// The vector of 'coefficients' does not have to be unitary - function InputEncoder(coefficients : Double[]): (LittleEndian => Unit is Adj + Ctl) { + function InputEncoder(coefficients : Double[]) + : (LittleEndian => Unit is Adj + Ctl) { //default implementation, does not respect sparcity mutable complexCoefficients = new ComplexPolar[Length(coefficients)]; for ((idx, coefficient) in Enumerated(coefficients)) { diff --git a/MachineLearning/src/Utils.qs b/MachineLearning/src/Private.qs similarity index 87% rename from MachineLearning/src/Utils.qs rename to MachineLearning/src/Private.qs index 0b710c8bf29..83540965013 100644 --- a/MachineLearning/src/Utils.qs +++ b/MachineLearning/src/Private.qs @@ -5,7 +5,7 @@ namespace Microsoft.Quantum.MachineLearning { open Microsoft.Quantum.Canon; open Microsoft.Quantum.Math; - function _AllNearlyEqualD(v1: Double[], v2: Double[]):Bool { + function _AllNearlyEqualD(v1 : Double[], v2 : Double[]) : Bool { return Length(v1) == Length(v2) and All(NearlyEqualD, Zip(v1, v2)); } diff --git a/MachineLearning/src/RotationSequences.qs b/MachineLearning/src/RotationSequences.qs index 31fa95373fd..b61ade05eb2 100644 --- a/MachineLearning/src/RotationSequences.qs +++ b/MachineLearning/src/RotationSequences.qs @@ -7,13 +7,21 @@ namespace Microsoft.Quantum.MachineLearning { open Microsoft.Quantum.Intrinsic; open Microsoft.Quantum.Canon; - /// What is the minimum number of qubits - /// to support the subject gate sequence? - /// Find the maximum qubit index m occuring - /// in a gate sequence and return m+1 - function NQubitsRequired(seq : GateSequence) : Int { + /// # Summary + /// Returns the number of qubits required to apply a given sequential + /// classifier. + /// + /// # Input + /// ## structure + /// The structure of a given sequential classifier. + /// + /// # Output + /// The minimum size of a register on which the sequential classifier + /// may be applied. + function NQubitsRequired(structure : SequentialClassifierStructure) + : Int { mutable nQubitsRequired = 0; - for (gate in seq!) { + for (gate in structure!) { set nQubitsRequired = Fold( MaxI, 0, gate::Span::ControlIndices + [ @@ -25,12 +33,24 @@ namespace Microsoft.Quantum.MachineLearning { return nQubitsRequired; } - /// Apply parameterized gate sequence to subject qubit register + /// # Summary + /// Given the structure and parameterization of a sequential classifier, + /// applies the classifier to a register of qubits. /// - operation _ApplyGates(parameters : Double[], gates: GateSequence, qubits : Qubit[]) : (Unit) is Adj + Ctl { - //dumpRegisterToConsole(qubits); - for (gate in gates!) { - // let (gsp,p,ix) = gt!; + /// # Input + /// ## structure + /// Structure of the given sequential classifier. + /// ## parameters + /// A parameterization at which the given structure is applied. + /// ## qubits + /// A target register to which the classifier should be applied. + operation ApplySequentialClassifier( + structure : SequentialClassifierStructure, + parameters : Double[], + qubits : Qubit[] + ) + : (Unit) is Adj + Ctl { + for (gate in structure!) { if (gate::Index < Length(parameters)) { let input = (gate::Axis, parameters[gate::Index], qubits[gate::Span::TargetIndex]); if (IsEmpty(gate::Span::ControlIndices)) { @@ -44,8 +64,4 @@ namespace Microsoft.Quantum.MachineLearning { } } - operation ApplyGates(parameters : Double[], gates: GateSequence): (Qubit[] => Unit is Adj + Ctl) { - return _ApplyGates(parameters,gates,_); - } - } diff --git a/MachineLearning/src/Structure.qs b/MachineLearning/src/Structure.qs index c50e4acab6f..1e03521accf 100644 --- a/MachineLearning/src/Structure.qs +++ b/MachineLearning/src/Structure.qs @@ -25,9 +25,9 @@ namespace Microsoft.Quantum.MachineLearning { return _CallFlipped(fn, _, _); } - function LocalRotationsLayer(nQubits : Int, axis : Pauli) : GateSequence { + function LocalRotationsLayer(nQubits : Int, axis : Pauli) : SequentialClassifierStructure { // [parameterIndex, pauliCode, targetQubit\,sequence of control qubits\] - return GateSequence(Mapped( + return SequentialClassifierStructure(Mapped( _Flipped(ControlledRotation(_, axis, _)), Enumerated( _UncontrolledSpanSequence(SequenceI(0, nQubits - 1)) @@ -36,9 +36,9 @@ namespace Microsoft.Quantum.MachineLearning { } - function PartialRotationsLayer(idxsQubits : Int[], axis : Pauli) : GateSequence { + function PartialRotationsLayer(idxsQubits : Int[], axis : Pauli) : SequentialClassifierStructure { // [parameterIndex, pauliCode, targetQubit\,sequence of control qubits\] - return GateSequence(Mapped( + return SequentialClassifierStructure(Mapped( _Flipped(ControlledRotation(_, axis, _)), Enumerated( _UncontrolledSpanSequence(idxsQubits) @@ -46,7 +46,7 @@ namespace Microsoft.Quantum.MachineLearning { )); } - function CyclicEntanglingLayer(nQubits : Int, axis : Pauli, stride : Int) : GateSequence { + function CyclicEntanglingLayer(nQubits : Int, axis : Pauli, stride : Int) : SequentialClassifierStructure { mutable rotations = new ControlledRotation[0]; for (idxTarget in 0..nQubits - 1) { set rotations += [ControlledRotation( @@ -57,10 +57,10 @@ namespace Microsoft.Quantum.MachineLearning { axis, idxTarget )]; } - return GateSequence(rotations); + return SequentialClassifierStructure(rotations); } - function CombinedGateSequence(layers : GateSequence[]) : GateSequence { + function CombinedGateSequence(layers : SequentialClassifierStructure[]) : SequentialClassifierStructure { mutable combined = (Head(layers))!; mutable offset = Length(combined); for (layer in Rest(layers)) { @@ -69,7 +69,7 @@ namespace Microsoft.Quantum.MachineLearning { } set offset += Length(layer!); } - return GateSequence(combined); + return SequentialClassifierStructure(combined); } } \ No newline at end of file diff --git a/MachineLearning/src/Training.qs b/MachineLearning/src/Training.qs index 24d43a2c138..c3220e87ce8 100644 --- a/MachineLearning/src/Training.qs +++ b/MachineLearning/src/Training.qs @@ -47,7 +47,7 @@ namespace Microsoft.Quantum.MachineLearning { } operation TrainSequentialClassifier( - gates: GateSequence, + gates: SequentialClassifierStructure, parameterSource: Double[][], samples: LabeledSample[], options : TrainingOptions, @@ -115,7 +115,7 @@ namespace Microsoft.Quantum.MachineLearning { operation _RunSingleTrainingStep( miniBatch : LabeledSample[], options : TrainingOptions, - param : Double[], gates : GateSequence + param : Double[], gates : SequentialClassifierStructure ) : (Double, Double[]) { mutable batchGradient = ConstantArray(Length(param), 0.0); @@ -185,7 +185,7 @@ namespace Microsoft.Quantum.MachineLearning { samples: LabeledSample[], schedule: SamplingSchedule, periodScore: Int, options : TrainingOptions, - model : SequentialModel, gates: GateSequence, + model : SequentialModel, gates: SequentialClassifierStructure, nPreviousBestMisses : Int ) : (Int, SequentialModel) { @@ -297,7 +297,7 @@ namespace Microsoft.Quantum.MachineLearning { /// ((no.hits,no.misses),(opt.bias,opt.parameters)) /// operation TrainSequentialClassifierAtModel( - gates : GateSequence, + gates : SequentialClassifierStructure, model : SequentialModel, samples : LabeledSample[], options : TrainingOptions, diff --git a/MachineLearning/src/Types.qs b/MachineLearning/src/Types.qs index 3d8ca3ab9a5..19209644076 100644 --- a/MachineLearning/src/Types.qs +++ b/MachineLearning/src/Types.qs @@ -20,7 +20,7 @@ namespace Microsoft.Quantum.MachineLearning { ); /// Abstraction for sequence of gates - newtype GateSequence = ControlledRotation[]; + newtype SequentialClassifierStructure = ControlledRotation[]; /// Abstraction for state preparation /// Fst(StateGenerator) is the number of qubits diff --git a/MachineLearning/src/Validation.qs b/MachineLearning/src/Validation.qs index f146eb4427a..e73c2814879 100644 --- a/MachineLearning/src/Validation.qs +++ b/MachineLearning/src/Validation.qs @@ -16,54 +16,8 @@ namespace Microsoft.Quantum.MachineLearning { return Length(Misclassifications(proposed, actual)); } - /// # Summary - /// Using a flat description of a trained classification model, count - /// the number of mispredictions occuring over the validation set - /// - /// # Input - /// ## nQubits - /// the number of qubits used for data encoding - /// - /// ## trainingSet - /// the set of training samples - /// - /// ## trainingLabels - /// the set of training labels - /// - /// ## validatioSchedule - /// defines a subset of training data used for validation and computation of the *bias* - /// - /// ## gates - /// Flat representation of classifier structure. Each element is - /// [parameterIndex, pauliCode, targetQubit, sequence of control qubits] - /// - /// ## parameters - /// an array of candidate parameters - /// - /// ## bias - /// candidate predition bias - /// - /// ## nMeasurenets - /// number of the measurement cycles to be used for estimation of each probability - /// - /// # Output - /// the number of misclassifications - /// - operation CountValidationMisses(tolerance: Double, nQubits: Int, trainingSet: Double[][], trainingLabels: Int[], validationSchedule: Int[][], gates: Int[][], parameters: Double[],bias:Double, nMeasurements: Int) : Int - { - let schValidate = unFlattenSchedule(validationSchedule); - let results = ValidateModel( - unFlattenGateSequence(gates), - SequentialModel(parameters, bias), - Mapped(LabeledSample, Zip(trainingSet, trainingLabels)), - tolerance, nMeasurements, - schValidate - ); - return results::NMisclassifications; - } - - operation ValidateModel( - gates: GateSequence, + operation ValidateSequentialClassifier( + gates: SequentialClassifierStructure, model : SequentialModel, samples : LabeledSample[], tolerance: Double, From d72ea84a53ca210ba311815c5f853a81589f7d16 Mon Sep 17 00:00:00 2001 From: Christopher Granade Date: Mon, 13 Jan 2020 14:32:09 -0800 Subject: [PATCH 2/4] A bit more refactoring. --- MachineLearning/src/Classification.qs | 6 +- MachineLearning/src/GradientEstimation.qs | 18 +++--- MachineLearning/src/RotationSequences.qs | 67 ----------------------- MachineLearning/src/Structure.qs | 63 ++++++++++++++++++++- 4 files changed, 73 insertions(+), 81 deletions(-) delete mode 100644 MachineLearning/src/RotationSequences.qs diff --git a/MachineLearning/src/Classification.qs b/MachineLearning/src/Classification.qs index 417c7cdaedf..868522ba2f6 100644 --- a/MachineLearning/src/Classification.qs +++ b/MachineLearning/src/Classification.qs @@ -10,13 +10,13 @@ namespace Microsoft.Quantum.MachineLearning { operation _PrepareClassification( encoder : (LittleEndian => Unit is Adj + Ctl), - parameters : Double[], structure : SequentialClassifierStructure, + parameters : Double[], target : Qubit[] ) : Unit is Adj { encoder(LittleEndian(target)); - ApplySequentialClassifier(parameters, structure, target); + ApplySequentialClassifier(structure, parameters, target); } operation EstimateClassificationProbability( @@ -31,7 +31,7 @@ namespace Microsoft.Quantum.MachineLearning { let circEnc = ApproximateInputEncoder(tolerance / IntAsDouble(Length(structure!)), sample); let encodedSample = StateGenerator(nQubits, circEnc); return 1.0 - EstimateFrequencyA( - _PrepareClassification(encodedSample::Apply, parameters, structure, _), + _PrepareClassification(encodedSample::Apply, structure, parameters, _), _TailMeasurement(encodedSample::NQubits), encodedSample::NQubits, nMeasurements diff --git a/MachineLearning/src/GradientEstimation.qs b/MachineLearning/src/GradientEstimation.qs index d6b12e3b3a3..9843328dc25 100644 --- a/MachineLearning/src/GradientEstimation.qs +++ b/MachineLearning/src/GradientEstimation.qs @@ -19,15 +19,15 @@ namespace Microsoft.Quantum.MachineLearning { operation _EstimateDerivativeWithParameterShift( inputEncoder : StateGenerator, - gates : SequentialClassifierStructure, + structure : SequentialClassifierStructure, parameters : (Double[], Double[]), nQubits : Int, nMeasurements : Int ) : Double { return EstimateRealOverlapBetweenStates( _ApplyLEOperationToRawRegister(inputEncoder::Apply, _), - ApplySequentialClassifier(Fst(parameters), gates, _), - ApplySequentialClassifier(Snd(parameters), gates, _), + ApplySequentialClassifier(structure, Fst(parameters), _), + ApplySequentialClassifier(structure, Snd(parameters), _), nQubits, nMeasurements ); } @@ -37,7 +37,7 @@ namespace Microsoft.Quantum.MachineLearning { /// particular set of parameters and for a given encoded input. /// /// # Input - /// ## gates + /// ## structure /// The structure of the sequential classifier as a sequence of quantum /// operations. /// ## param @@ -56,7 +56,7 @@ namespace Microsoft.Quantum.MachineLearning { /// This operation uses a Hadamard test and the parameter shift technique /// together to estimate the gradient. operation EstimateGradient( - gates : SequentialClassifierStructure, + structure : SequentialClassifierStructure, param : Double[], sg : StateGenerator, nMeasurements : Int @@ -76,16 +76,16 @@ namespace Microsoft.Quantum.MachineLearning { // and we want a unitary description of its \theta-derivative. It can be written as // 1/2 {(Controlled R(\theta'))([k0,k1,...,kr],[target]) - (Controlled Z)([k1,...,kr],[k0])(Controlled R(\theta'))([k0,k1,...,kr],[target])} mutable grad = ConstantArray(Length(param), 0.0); - let nQubits = MaxI(NQubitsRequired(gates), sg::NQubits); + let nQubits = MaxI(NQubitsRequired(structure), sg::NQubits); - for (gate in gates!) { + for (gate in structure!) { let paramShift = (param + [0.0]) // Shift the corresponding parameter. w/ gate::Index <- (param[gate::Index] + PI()); // NB: This the *antiderivative* of the bracket let newDer = _EstimateDerivativeWithParameterShift( - sg, gates, (param, paramShift), nQubits, nMeasurements + sg, structure, (param, paramShift), nQubits, nMeasurements ); if (IsEmpty(gate::Span::ControlIndices)) { //uncontrolled gate @@ -97,7 +97,7 @@ namespace Microsoft.Quantum.MachineLearning { // Assumption: any rotation R has the property that R(\theta + 2 Pi) = (-1) R(\theta). // NB: This the *antiderivative* of the bracket let newDer1 = _EstimateDerivativeWithParameterShift( - sg, gates, (param, controlledShift), nQubits, nMeasurements + sg, structure, (param, controlledShift), nQubits, nMeasurements ); set grad w/= gate::Index <- (grad[gate::Index] + 0.5 * (newDer - newDer1)); } diff --git a/MachineLearning/src/RotationSequences.qs b/MachineLearning/src/RotationSequences.qs deleted file mode 100644 index b61ade05eb2..00000000000 --- a/MachineLearning/src/RotationSequences.qs +++ /dev/null @@ -1,67 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -namespace Microsoft.Quantum.MachineLearning { - open Microsoft.Quantum.Math; - open Microsoft.Quantum.Arrays; - open Microsoft.Quantum.Intrinsic; - open Microsoft.Quantum.Canon; - - /// # Summary - /// Returns the number of qubits required to apply a given sequential - /// classifier. - /// - /// # Input - /// ## structure - /// The structure of a given sequential classifier. - /// - /// # Output - /// The minimum size of a register on which the sequential classifier - /// may be applied. - function NQubitsRequired(structure : SequentialClassifierStructure) - : Int { - mutable nQubitsRequired = 0; - for (gate in structure!) { - set nQubitsRequired = Fold( - MaxI, 0, - gate::Span::ControlIndices + [ - gate::Span::TargetIndex, - nQubitsRequired - ] - ); - } - return nQubitsRequired; - } - - /// # Summary - /// Given the structure and parameterization of a sequential classifier, - /// applies the classifier to a register of qubits. - /// - /// # Input - /// ## structure - /// Structure of the given sequential classifier. - /// ## parameters - /// A parameterization at which the given structure is applied. - /// ## qubits - /// A target register to which the classifier should be applied. - operation ApplySequentialClassifier( - structure : SequentialClassifierStructure, - parameters : Double[], - qubits : Qubit[] - ) - : (Unit) is Adj + Ctl { - for (gate in structure!) { - if (gate::Index < Length(parameters)) { - let input = (gate::Axis, parameters[gate::Index], qubits[gate::Span::TargetIndex]); - if (IsEmpty(gate::Span::ControlIndices)) { - // Uncontrolled rotation of target - R(input); - } else { - //TODO: should one validate the control indices first? - (Controlled R)(Subarray(gate::Span::ControlIndices, qubits), input); - } - } - } - } - -} diff --git a/MachineLearning/src/Structure.qs b/MachineLearning/src/Structure.qs index 1e03521accf..41823f43782 100644 --- a/MachineLearning/src/Structure.qs +++ b/MachineLearning/src/Structure.qs @@ -2,8 +2,67 @@ // Licensed under the MIT License. namespace Microsoft.Quantum.MachineLearning { - + open Microsoft.Quantum.Math; open Microsoft.Quantum.Arrays; + open Microsoft.Quantum.Intrinsic; + open Microsoft.Quantum.Canon; + + /// # Summary + /// Returns the number of qubits required to apply a given sequential + /// classifier. + /// + /// # Input + /// ## structure + /// The structure of a given sequential classifier. + /// + /// # Output + /// The minimum size of a register on which the sequential classifier + /// may be applied. + function NQubitsRequired(structure : SequentialClassifierStructure) + : Int { + mutable nQubitsRequired = 0; + for (gate in structure!) { + set nQubitsRequired = Fold( + MaxI, 0, + gate::Span::ControlIndices + [ + gate::Span::TargetIndex, + nQubitsRequired + ] + ); + } + return nQubitsRequired; + } + + /// # Summary + /// Given the structure and parameterization of a sequential classifier, + /// applies the classifier to a register of qubits. + /// + /// # Input + /// ## structure + /// Structure of the given sequential classifier. + /// ## parameters + /// A parameterization at which the given structure is applied. + /// ## qubits + /// A target register to which the classifier should be applied. + operation ApplySequentialClassifier( + structure : SequentialClassifierStructure, + parameters : Double[], + qubits : Qubit[] + ) + : (Unit) is Adj + Ctl { + for (gate in structure!) { + if (gate::Index < Length(parameters)) { + let input = (gate::Axis, parameters[gate::Index], qubits[gate::Span::TargetIndex]); + if (IsEmpty(gate::Span::ControlIndices)) { + // Uncontrolled rotation of target + R(input); + } else { + //TODO: should one validate the control indices first? + (Controlled R)(Subarray(gate::Span::ControlIndices, qubits), input); + } + } + } + } function _UncontrolledSpanSequence(idxsQubits : Int[]) : GateSpan[] { return Mapped( @@ -60,7 +119,7 @@ namespace Microsoft.Quantum.MachineLearning { return SequentialClassifierStructure(rotations); } - function CombinedGateSequence(layers : SequentialClassifierStructure[]) : SequentialClassifierStructure { + function CombinedStructure(layers : SequentialClassifierStructure[]) : SequentialClassifierStructure { mutable combined = (Head(layers))!; mutable offset = Length(combined); for (layer in Rest(layers)) { From d460d9d5852382a610359accc4b456cdb01c6a77 Mon Sep 17 00:00:00 2001 From: Christopher Granade Date: Mon, 13 Jan 2020 17:52:44 -0800 Subject: [PATCH 3/4] A bit more refactoring, more /// comments. --- MachineLearning/src/Training.qs | 172 ++++++++++++++++-------------- MachineLearning/src/Types.qs | 14 +++ MachineLearning/src/Validation.qs | 38 +++++++ 3 files changed, 143 insertions(+), 81 deletions(-) diff --git a/MachineLearning/src/Training.qs b/MachineLearning/src/Training.qs index c3220e87ce8..ac0fa94eb52 100644 --- a/MachineLearning/src/Training.qs +++ b/MachineLearning/src/Training.qs @@ -46,13 +46,45 @@ namespace Microsoft.Quantum.MachineLearning { return optimum::Coordinate; } + /// # Summary + /// Given the structure of a sequential classifier, trains the classifier + /// on a given labeled training set. + /// + /// # Input + /// ## structure + /// Structure of the sequential classifier to be trained. + /// ## parameterSource + /// A list of parameter vectors to use as starting points during training. + /// ## samples + /// A set of labeled training data that will be used to perform training. + /// ## options + /// Configuration to be used when training; see + /// @"microsoft.quantum.machinelearning.trainingoptions" and + /// @"microsoft.quantum.machinelearning.defaulttrainingoptions" for more + /// details. + /// ## trainingSchedule + /// A sampling schedule to use when selecting samples from the training + /// data during training steps. + /// ## validationSchedule + /// A sampling schedule to use when selecting samples from the training + /// data when selecting which start point resulted in the best classifier + /// score. + /// + /// # Output + /// A parameterization of the given classifier and a bias between the two + /// classes, together corresponding to the best result from each of the + /// given start points. + /// + /// # See Also + /// - Microsoft.Quantum.MachineLearning.TrainSequentialClassifierAtModel + /// - Microsoft.Quantum.MachineLearning.ValidateSequentialClassifier operation TrainSequentialClassifier( - gates: SequentialClassifierStructure, - parameterSource: Double[][], - samples: LabeledSample[], + structure : SequentialClassifierStructure, + parameterSource : Double[][], + samples : LabeledSample[], options : TrainingOptions, - trainingSchedule: SamplingSchedule, - validationSchedule: SamplingSchedule + trainingSchedule : SamplingSchedule, + validationSchedule : SamplingSchedule ) : SequentialModel { mutable bestSoFar = SequentialModel([-1E12], -2.0); mutable bestValidation = Length(samples) + 1; @@ -60,16 +92,16 @@ namespace Microsoft.Quantum.MachineLearning { let features = Mapped(_Features, samples); let labels = Mapped(_Label, samples); - for (idxStart in 0..(Length(parameterSource) - 1)) { + for ((idxStart, parameters) in Enumerated(parameterSource)) { Message($"Beginning training at start point #{idxStart}..."); let proposedUpdate = TrainSequentialClassifierAtModel( - gates, SequentialModel(parameterSource[idxStart], 0.0), + structure, SequentialModel(parameters, 0.0), samples, options, trainingSchedule, 1 ); let probabilities = EstimateClassificationProbabilities( options::Tolerance, proposedUpdate::Parameters, - gates, + structure, Sampled(validationSchedule, features), options::NMeasurements ); @@ -148,49 +180,42 @@ namespace Microsoft.Quantum.MachineLearning { } /// # Summary - /// Perform one epoch of circuit training on a subset of data samples to a quantum simulator + /// Perform one epoch of sequential classifier training on a subset of + /// data samples. /// /// # Input /// ## samples - /// a container of available data samples - /// + /// The samples to be trained on. /// ## sched - /// a schedule of the data subset for this training loop - /// + /// A sampling schedule defining a subset of samples to be included in training. /// ## schedScore - /// defines a (possibly different) data subset on which accuracy scoring is performed - /// + /// A sampling schedule defining a subset of samples to be used in + /// accuracy scoring. /// ## periodScore - /// number of blind gradient steps between scoring points (performance tool, set to 1 for best accuracy) - /// - /// ## miniBatchSize - /// number of samples in a gradient mini batch - /// - /// ## param - /// initial parameter vector - /// - /// ## gates - /// sequence of gates in the circuit - /// - /// ## bias - /// reserved for future use; originally - initial prediction bias - /// - /// ## lrate - /// learning rate - /// - /// ## measCount - /// number of true quantum measurements to estimate probabilities. + /// The number of gradient steps to be taken between scoring points. + /// For best accuracy, set to 1. + /// ## options + /// Options to be used in training. + /// ## structure + /// The structure of the sequential classifier to be trained. + /// ## model + /// The parameterization and bias of the sequential model to be trained. + /// ## nPreviousBestMisses + /// The best number of misclassifications observed in previous epochs. /// + /// # Output + /// - The smallest number of misclassifications observed through to this + /// epoch. + /// - The new best sequential model found. operation _RunSingleTrainingEpoch( - samples: LabeledSample[], - schedule: SamplingSchedule, periodScore: Int, + samples : LabeledSample[], + schedule : SamplingSchedule, periodScore: Int, options : TrainingOptions, - model : SequentialModel, gates: SequentialClassifierStructure, + structure : SequentialClassifierStructure, + model : SequentialModel, nPreviousBestMisses : Int ) : (Int, SequentialModel) { - let HARDCODEDunderage = 3; // 4/26 slack greater than 3 is not recommended - mutable nBestMisses = nPreviousBestMisses; mutable bestSoFar = model; let features = Mapped(_Features, samples); @@ -199,7 +224,7 @@ namespace Microsoft.Quantum.MachineLearning { let inferredLabels = InferredLabels( model::Bias, EstimateClassificationProbabilities( - options::Tolerance, model::Parameters, gates, + options::Tolerance, model::Parameters, structure, features, options::NMeasurements ) ); @@ -214,14 +239,14 @@ namespace Microsoft.Quantum.MachineLearning { ); for (minibatch in minibatches) { let (utility, updatedParameters) = _RunSingleTrainingStep( - minibatch, options, bestSoFar::Parameters, gates + minibatch, options, bestSoFar::Parameters, structure ); if (utility > 0.0000001) { // There has been some good parameter update. // Check if it actually improves things, and if so, // commit it. let probabilities = EstimateClassificationProbabilities( - options::Tolerance, updatedParameters, gates, + options::Tolerance, updatedParameters, structure, features, options::NMeasurements ); let updatedBias = _UpdatedBias( @@ -252,50 +277,36 @@ namespace Microsoft.Quantum.MachineLearning { ); } + + /// # Summary - /// Run a full circuit training loop on a subset of data samples + /// Given the structure of a sequential classifier, trains the classifier + /// on a given labeled training set, starting from a particular model. /// /// # Input + /// ## structure + /// Structure of the sequential classifier to be trained. + /// ## model + /// The sequential model to be used as a starting point for training. /// ## samples - /// a container of available data samples - /// - /// ## sched - /// a schedule of the data subset for this training loop - /// - /// ## schedScore - /// defines a (possibly different) data subset on which accuracy scoring is performed - /// - /// ## periodScore - /// number of blind gradient steps between scoring points (performance tool, set to 1 for best accuracy) - /// - /// ## miniBatchSize - /// number of samples in a gradient mini batch - /// - /// ## param - /// initial parameter vector - /// - /// ## gates - /// sequence of gates in the circuit - /// - /// ## bias - /// reserved for future use; originally - initial prediction bias - /// - /// ## lrate - /// learning rate - /// - /// ## maxEpochs - /// maximum number of epochs in this loop - /// - /// ## tol - /// tolerance: acceptable misprediction rate in training - /// - /// ## measCount - /// number of true quantum measurements to estimate probabilities. - /// IMPORTANT: measCount==0 implies simulator deployment + /// A set of labeled training data that will be used to perform training. + /// ## options + /// Configuration to be used when training; see + /// @"microsoft.quantum.machinelearning.trainingoptions" and + /// @"microsoft.quantum.machinelearning.defaulttrainingoptions" for more + /// details. + /// ## schedule + /// A sampling schedule to use when selecting samples from the training + /// data during training steps. /// /// # Output - /// ((no.hits,no.misses),(opt.bias,opt.parameters)) + /// A parameterization of the given classifier and a bias between the two + /// classes, together corresponding to the best result from each of the + /// given start points. /// + /// # See Also + /// - Microsoft.Quantum.MachineLearning.TrainSequentialClassifier + /// - Microsoft.Quantum.MachineLearning.ValidateSequentialClassifier operation TrainSequentialClassifierAtModel( gates : SequentialClassifierStructure, model : SequentialModel, @@ -305,7 +316,6 @@ namespace Microsoft.Quantum.MachineLearning { periodScore : Int ) : SequentialModel { - //const let nSamples = Length(samples); let features = Mapped(_Features, samples); let actualLabels = Mapped(_Label, samples); @@ -339,7 +349,7 @@ namespace Microsoft.Quantum.MachineLearning { options w/ LearningRate <- lrate w/ MinibatchSize <- batchSize, - current, gates, + gates, current, nBestMisses ); if (nMisses < nBestMisses) { diff --git a/MachineLearning/src/Types.qs b/MachineLearning/src/Types.qs index 19209644076..3174cad8db5 100644 --- a/MachineLearning/src/Types.qs +++ b/MachineLearning/src/Types.qs @@ -99,6 +99,20 @@ namespace Microsoft.Quantum.MachineLearning { StochasticRescaleFactor: Double ); + /// # Summary + /// Returns a default set of options for training classifiers. + /// + /// # Output + /// A reasonable set of default training options for use when training + /// classifiers. + /// + /// # Example + /// To use the default options, but with additional measurements, use the + /// `w/` operator: + /// ```Q# + /// let options = DefaultTrainingOptions() + /// w/ NMeasurements <- 1000000; + /// ``` function DefaultTrainingOptions() : TrainingOptions { return TrainingOptions( 0.1, 0.005, 15, 10000, 16, 8, 0.01 diff --git a/MachineLearning/src/Validation.qs b/MachineLearning/src/Validation.qs index e73c2814879..99eefbc8273 100644 --- a/MachineLearning/src/Validation.qs +++ b/MachineLearning/src/Validation.qs @@ -4,6 +4,25 @@ namespace Microsoft.Quantum.MachineLearning { open Microsoft.Quantum.Logical; open Microsoft.Quantum.Canon; + /// # Summary + /// Given a set of inferred labels and a set of correct labels, returns + /// indices for where each set of labels differs. + /// + /// # Input + /// ## inferredLabels + /// The labels inferred for a given training or validation set. + /// ## actualLabels + /// The true labels for a given training or validation set. + /// + /// # Output + /// An array of indices `idx` such that + /// `inferredLabels[idx] != actualLabels[idx]`. + /// + /// # Example + /// ```Q# + /// let misclassifications = Misclassifications([0, 1, 0, 0], [0, 1, 1, 0]); + /// Message($"{misclassifications}"); // Will print [2]. + /// ``` function Misclassifications(inferredLabels : Int[], actualLabels : Int[]) : Int[] { return Where( @@ -12,6 +31,25 @@ namespace Microsoft.Quantum.MachineLearning { ); } + /// # Summary + /// Given a set of inferred labels and a set of correct labels, returns + /// the number of indices at which each set of labels differ. + /// + /// # Input + /// ## inferredLabels + /// The labels inferred for a given training or validation set. + /// ## actualLabels + /// The true labels for a given training or validation set. + /// + /// # Output + /// The number of indices `idx` such that + /// `inferredLabels[idx] != actualLabels[idx]`. + /// + /// # Example + /// ```Q# + /// let nMisclassifications = NMisclassifications([1, 1, 0, 0], [0, 1, 1, 0]); + /// Message($"{nMisclassifications}"); // Will print 2. + /// ``` function NMisclassifications(proposed: Int[], actual: Int[]): Int { return Length(Misclassifications(proposed, actual)); } From 6b20b81d1342bf71968e903460d58214cca581e0 Mon Sep 17 00:00:00 2001 From: Christopher Granade Date: Thu, 16 Jan 2020 09:59:32 -0800 Subject: [PATCH 4/4] Progress on ///, code quality. --- MachineLearning/src/Classification.qs | 41 ++++++++++++ MachineLearning/src/InputEncoding.qs | 12 ++++ Standard/src/Preparation/Arbitrary.qs | 89 ++++++++++++++++++++++++--- 3 files changed, 133 insertions(+), 9 deletions(-) diff --git a/MachineLearning/src/Classification.qs b/MachineLearning/src/Classification.qs index 868522ba2f6..4f998eea008 100644 --- a/MachineLearning/src/Classification.qs +++ b/MachineLearning/src/Classification.qs @@ -19,6 +19,26 @@ namespace Microsoft.Quantum.MachineLearning { ApplySequentialClassifier(structure, parameters, target); } + /// # Summary + /// Given a sample and a sequential classifier, estimates the + /// classification probability for that sample by repeatedly measuring + /// the output of the classifier on the given sample. + /// + /// # Input + /// ## tolerance + /// The tolerance to allow in encoding the sample into a state preparation + /// operation. + /// ## parameters + /// A parameterization of the given sequential classifier. + /// ## structure + /// The structure of the given sequential classifier. + /// ## sample + /// The feature vector for the sample to be classified. + /// ## nMeasurements + /// The number of measusrements to use in estimating the classification + /// probability. + /// # Output + /// An estimate of the classification probability for the given sample. operation EstimateClassificationProbability( tolerance : Double, parameters : Double[], @@ -38,6 +58,27 @@ namespace Microsoft.Quantum.MachineLearning { ); } + /// # Summary + /// Given a set of samples and a sequential classifier, estimates the + /// classification probability for those samples by repeatedly measuring + /// the output of the classifier on each sample. + /// + /// # Input + /// ## tolerance + /// The tolerance to allow in encoding the sample into a state preparation + /// operation. + /// ## parameters + /// A parameterization of the given sequential classifier. + /// ## structure + /// The structure of the given sequential classifier. + /// ## samples + /// An array of feature vectors for each sample to be classified. + /// ## nMeasurements + /// The number of measusrements to use in estimating the classification + /// probability. + /// # Output + /// An array of estimates of the classification probability for each given + /// sample. operation EstimateClassificationProbabilities( tolerance : Double, parameters : Double[], diff --git a/MachineLearning/src/InputEncoding.qs b/MachineLearning/src/InputEncoding.qs index 0091b8b6876..d997987ba19 100644 --- a/MachineLearning/src/InputEncoding.qs +++ b/MachineLearning/src/InputEncoding.qs @@ -79,6 +79,18 @@ namespace Microsoft.Quantum.MachineLearning { return Ceiling(Lg(IntAsDouble(Length(sample)))); } + /// # Summary + /// Given a set of coefficients and a tolerance, returns a state preparation + /// operation that prepares each coefficient as the corresponding amplitude + /// of a computational basis state, up to the given tolerance. + /// + /// # Input + /// ## tolerance + /// // TODO + /// ## coefficients + /// // TODO + /// # Output + /// // TODO function ApproximateInputEncoder(tolerance : Double, coefficients : Double[]) : (LittleEndian => Unit is Adj + Ctl) { //First quantize the coefficients: for a coef x find such y*tolerance, where y is integer and |x-y*tolerance| \neq tolerance/2 diff --git a/Standard/src/Preparation/Arbitrary.qs b/Standard/src/Preparation/Arbitrary.qs index 8b12642d855..7115ca75c48 100644 --- a/Standard/src/Preparation/Arbitrary.qs +++ b/Standard/src/Preparation/Arbitrary.qs @@ -111,15 +111,25 @@ namespace Microsoft.Quantum.Preparation { /// # Summary - /// Returns an operation that prepares a given quantum state. + /// Given a set of coefficients and a little-endian encoded quantum register, + /// prepares an state on that register described by the given coefficients. /// - /// The returned operation $U$ prepares an arbitrary quantum + /// # Description + /// This operation prepares an arbitrary quantum /// state $\ket{\psi}$ with complex coefficients $r_j e^{i t_j}$ from - /// the $n$-qubit computational basis state $\ket{0...0}$. + /// the $n$-qubit computational basis state $\ket{0 \cdots 0}$. + /// In particular, the action of this operation can be simulated by the + /// a unitary transformation $U$ which acts on the all-zeros state as /// /// $$ /// \begin{align} - /// U\ket{0...0}=\ket{\psi}=\frac{\sum_{j=0}^{2^n-1}r_j e^{i t_j}\ket{j}}{\sqrt{\sum_{j=0}^{2^n-1}|r_j|^2}}. + /// U\ket{0...0} + /// & = \ket{\psi} \\\\ + /// & = \frac{ + /// \sum_{j=0}^{2^n-1} r_j e^{i t_j} \ket{j} + /// }{ + /// \sqrt{\sum_{j=0}^{2^n-1} |r_j|^2} + /// }. /// \end{align} /// $$ /// @@ -144,12 +154,70 @@ namespace Microsoft.Quantum.Preparation { /// - Synthesis of Quantum Logic Circuits /// Vivek V. Shende, Stephen S. Bullock, Igor L. Markov /// https://arxiv.org/abs/quant-ph/0406176 + /// + /// # See Also + /// - Microsoft.Quantum.Preparation.ApproximatelyPrepareArbitraryState operation PrepareArbitraryState(coefficients : ComplexPolar[], qubits : LittleEndian) : Unit is Adj + Ctl { ApproximatelyPrepareArbitraryState(0.0, coefficients, qubits); } - /// TODO - operation ApproximatelyPrepareArbitraryState(tolerance : Double, coefficients : ComplexPolar[], qubits : LittleEndian) : Unit is Adj + Ctl { + /// # Summary + /// Given a set of coefficients and a little-endian encoded quantum register, + /// prepares an state on that register described by the given coefficients, + /// up to a given approximation tolerance. + /// + /// # Description + /// This operation prepares an arbitrary quantum + /// state $\ket{\psi}$ with complex coefficients $r_j e^{i t_j}$ from + /// the $n$-qubit computational basis state $\ket{0 \cdots 0}$. + /// In particular, the action of this operation can be simulated by the + /// a unitary transformation $U$ which acts on the all-zeros state as + /// + /// $$ + /// \begin{align} + /// U\ket{0...0} + /// & = \ket{\psi} \\\\ + /// & = \frac{ + /// \sum_{j=0}^{2^n-1} r_j e^{i t_j} \ket{j} + /// }{ + /// \sqrt{\sum_{j=0}^{2^n-1} |r_j|^2} + /// }. + /// \end{align} + /// $$ + /// + /// # Input + /// ## tolerance + /// The approximation tolerance to be used when preparing the given state. + /// + /// ## coefficients + /// Array of up to $2^n$ complex coefficients represented by their + /// absolute value and phase $(r_j, t_j)$. The $j$th coefficient + /// indexes the number state $\ket{j}$ encoded in little-endian format. + /// + /// ## qubits + /// Qubit register encoding number states in little-endian format. This is + /// expected to be initialized in the computational basis state + /// $\ket{0...0}$. + /// + /// # Remarks + /// Negative input coefficients $r_j < 0$ will be treated as though + /// positive with value $|r_j|$. `coefficients` will be padded with + /// elements $(r_j, t_j) = (0.0, 0.0)$ if fewer than $2^n$ are + /// specified. + /// + /// # References + /// - Synthesis of Quantum Logic Circuits + /// Vivek V. Shende, Stephen S. Bullock, Igor L. Markov + /// https://arxiv.org/abs/quant-ph/0406176 + /// + /// # See Also + /// - Microsoft.Quantum.Preparation.ApproximatelyPrepareArbitraryState + operation ApproximatelyPrepareArbitraryState( + tolerance : Double, + coefficients : ComplexPolar[], + qubits : LittleEndian + ) + : Unit is Adj + Ctl { // pad coefficients at tail length to a power of 2. let coefficientsPadded = Padded(-2 ^ Length(qubits!), ComplexPolar(0.0, 0.0), coefficients); let target = (qubits!)[0]; @@ -168,7 +236,10 @@ namespace Microsoft.Quantum.Preparation { /// # See Also /// - PrepareArbitraryState /// - Microsoft.Quantum.Canon.MultiplexPauli - operation _ApproximatelyPrepareArbitraryState(tolerance: Double, coefficients : ComplexPolar[], control : LittleEndian, target : Qubit) + operation _ApproximatelyPrepareArbitraryState( + tolerance : Double, coefficients : ComplexPolar[], + control : LittleEndian, target : Qubit + ) : Unit is Adj + Ctl { // For each 2D block, compute disentangling single-qubit rotation parameters let (disentanglingY, disentanglingZ, newCoefficients) = _StatePreparationSBMComputeCoefficients(coefficients); @@ -188,9 +259,9 @@ namespace Microsoft.Quantum.Preparation { } } else { if (_AnyOutsideToleranceCP(tolerance, newCoefficients)) { - let newControl = LittleEndian((control!)[1 .. Length(control!) - 1]); + let newControl = LittleEndian(Rest(control!)); let newTarget = (control!)[0]; - _ApproximatelyPrepareArbitraryState(tolerance,newCoefficients, newControl, newTarget); + _ApproximatelyPrepareArbitraryState(tolerance, newCoefficients, newControl, newTarget); } } }