diff --git a/MachineLearning/src/DataModel/Interop.cs b/MachineLearning/src/DataModel/Interop.cs index 262b17365b2..a59d812d233 100644 --- a/MachineLearning/src/DataModel/Interop.cs +++ b/MachineLearning/src/DataModel/Interop.cs @@ -176,8 +176,8 @@ public static List PartialLocalLayer(long[] indices, char pauli) /// /// Creates a cyclic block of nQubits controlled rotations that starts - /// with contol qubit (nQubits-1), target qubit (cspan-1) % n , followed by the - /// ladder of entanglers with control qubit iq and target qubit (iq+cspan) % n + /// with control qubit (nQubits-1), target qubit (cspan-1) % nQubits , followed by a + /// ladder of entanglers with control qubits iq and target qubit (iq+cspan) % nQubits /// /// Number of qubits to entangle /// @@ -220,68 +220,6 @@ public static void reindex(List struc) } } - public void QcccTrainSequential(IQArray> parameterSource, IQArray> trainingSet, IQArray trainingLabels, IQArray> trainingSchedule, - IQArray> validationSchedule, double learningRate, double tolerance, long miniBatchSize, long maxEpochs, long nMeasurements, uint randomizationSeed) - { - var sim = new QuantumSimulator(false, randomizationSeed); - (this._cachedParameters, this._bias) = - TrainQcccSequential.Run(sim, this._nQubits, this._structure, parameterSource, trainingSet, trainingLabels, trainingSchedule, validationSchedule, learningRate, tolerance, miniBatchSize, maxEpochs, nMeasurements).Result; - } - public void QcccTrainSequential(List parameterSource, List trainingSet, List trainingLabels, List trainingSchedule, - List validationSchedule, double learningRate, double tolerance, long miniBatchSize, long maxEpochs, long nMeasurements, uint randomizationSeed) - { - QcccTrainSequential(Qonvert.ToQ(parameterSource), Qonvert.ToQ(trainingSet), Qonvert.ToQ(trainingLabels), Qonvert.ToQ(trainingSchedule), - Qonvert.ToQ(validationSchedule), learningRate, tolerance, miniBatchSize, maxEpochs, nMeasurements, randomizationSeed); - } - - public void QcccTrainParallel(IQArray> parameterSource, IQArray> trainingSet, IQArray trainingLabels, IQArray> trainingSchedule, - IQArray> validationSchedule, double learningRate, double tolerance, long miniBatchSize, long maxEpochs, long nMeasurements, uint randomizationSeed) - { - var simAll = new List(parameterSource.Count); - var resultsAll = new List<(IQArray, double)>(parameterSource.Count); - var parameterComb = new List>>(parameterSource.Count); - - var indices = new int[parameterSource.Count]; - for (int j = 0; j < parameterSource.Count; j++) - { - indices[j] = j; - simAll.Add(new QuantumSimulator(false, randomizationSeed)); - resultsAll.Add((new QArray(),0.0)); - parameterComb.Add(new QArray>(new IQArray[] { parameterSource[j] })); //Isolating parameter starts - one per thread - } - Parallel.ForEach(indices, - (j) => - { - - var rslt = - TrainQcccSequential.Run(simAll[j], this._nQubits, this._structure, parameterComb[j], trainingSet, trainingLabels, trainingSchedule, validationSchedule, learningRate, tolerance, miniBatchSize, maxEpochs, nMeasurements).Result; - resultsAll[j] = rslt; - } - ); - //Estimated parameters and biases for each proposed parameter start. Now postprocess - long bestValidation = long.MaxValue; - int bestJ = -1; - var sim = new QuantumSimulator(false, randomizationSeed); - for (int j = 0; j < parameterSource.Count; j++) - { - var (pars, bias) = resultsAll[j]; - long misses = CountValidationMisses.Run(sim, tolerance, this._nQubits, trainingSet, trainingLabels, validationSchedule, this._structure, pars, bias, nMeasurements).Result; - if (bestValidation > misses) - { - bestValidation = misses; - bestJ = j; - } - } - (this._cachedParameters, this._bias) = resultsAll[bestJ]; - } //QcccTrainParallel - - public void QcccTrainParallel(List parameterSource, List trainingSet, List trainingLabels, List trainingSchedule, - List validationSchedule, double learningRate, double tolerance, long miniBatchSize, long maxEpochs, long nMeasurements, uint randomizationSeed) - { - QcccTrainParallel(Qonvert.ToQ(parameterSource), Qonvert.ToQ(trainingSet), Qonvert.ToQ(trainingLabels), Qonvert.ToQ(trainingSchedule), - Qonvert.ToQ(validationSchedule), learningRate, tolerance, miniBatchSize, maxEpochs, nMeasurements, randomizationSeed); - } - public long CountMisclassifications(double tolerance, IQArray> samples, IQArray knownLabels, IQArray> validationSchedule, long nMeasurements, uint randomizationSeed) { if (this.isTrained) @@ -304,31 +242,6 @@ public long CountMisclassifications(double tolerance, List samples, Li return CountMisclassifications(tolerance, Qonvert.ToQ(samples), Qonvert.ToQ(knownLabels), Qonvert.ToQ(validationSchedule), nMeasurements, randomizationSeed); } - //EstimateClassificationProbabilitiesClassicalDataAdapter(samples: Double[][], schedule: Int[][], nQubits: Int, gates: Int[][], param: Double[], measCount: Int): Double[] - public double[] EstimateClassificationProbabilities(double tolerance, IQArray> samples, IQArray> schedule, long nMeasurements, uint randomizationSeed) - { - if (this.isTrained) - { - var sim = new QuantumSimulator(false, randomizationSeed); - IQArray probs = EstimateClassificationProbabilitiesClassicalDataAdapter.Run(sim, tolerance, samples, schedule, this._nQubits, this._structure, this.CachedParameters, nMeasurements).Result; - return probs.ToArray(); - } - return new double[] { -1.0 }; - } - - public double[] EstimateClassificationProbabilities(double tolerance, List samples, List schedule, long nMeasurements, uint randomizationSeed) - { - return EstimateClassificationProbabilities(tolerance, Qonvert.ToQ(samples), Qonvert.ToQ(schedule), nMeasurements, randomizationSeed); - } - - public double[] EstimateClassificationProbabilities(double tolerance, List samples, long nMeasurements, uint randomizationSeed) - { - List sched = new List(1); - sched.Add(new long[] { 0L, 1L, (long)(samples.Count - 1) }); - return EstimateClassificationProbabilities(tolerance, Qonvert.ToQ(samples), Qonvert.ToQ(sched), nMeasurements, randomizationSeed); - } - - } //class ClassificationModel } diff --git a/MachineLearning/src/Runtime/Circuits.qs b/MachineLearning/src/Runtime/Circuits.qs deleted file mode 100644 index 49df95cd18e..00000000000 --- a/MachineLearning/src/Runtime/Circuits.qs +++ /dev/null @@ -1,386 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -namespace Microsoft.Quantum.MachineLearning { - open Microsoft.Quantum.Math; - open Microsoft.Quantum.Arrays; - open Microsoft.Quantum.Arithmetic; - open Microsoft.Quantum.Canon; - open Microsoft.Quantum.Intrinsic; - open Microsoft.Quantum.Convert; - open Microsoft.Quantum.Diagnostics; - open Microsoft.Quantum.Preparation; - open Microsoft.Quantum.Characterization; - - /// WARNING: the downstream EstimateFrequencyA counts the frequency of Zero - - operation measureLastQubit(nQubits : Int): (Qubit[] => Result) { - let paulis = ConstantArray(nQubits, PauliI) w/ (nQubits - 1) <- PauliZ; - return Measure(paulis, _); - } - - operation _endToEndPreparation(enc: (LittleEndian => Unit is Adj + Ctl), parameters: Double[], gates: GateSequence, reg: Qubit[]): Unit is Adj - { - enc(LittleEndian(reg)); - _ApplyGates(parameters, gates, reg); - } - - operation endToEndPreparation(enc: (LittleEndian => Unit is Adj + Ctl), parameters: Double[], gates: GateSequence) : (Qubit[] => Unit is Adj) - { - return _endToEndPreparation(enc,parameters, gates, _); - } - - function collectNegativeLocs(cNegative: Int, coefficients : ComplexPolar[]) : Int[] - { - mutable negLocs = ConstantArray(cNegative, -1); - mutable nlx = 0; - for (idx in 0 .. Length(coefficients) - 1) - { - let (r,a) = (coefficients[idx])!; - if (AbsD(a - PI()) < 1E-9) { - if (nlx < cNegative) - { - set negLocs w/= nlx <- idx; - set nlx = nlx+1; - } - } - } - return negLocs; - } //collectNegativeLocs - - // NOTE: the last qubit of 'reg' in this context is the auxillary qubit used in the Hadamard test. - operation _endToEndHTcircuit(enc: (LittleEndian => Unit is Adj + Ctl), param1 : Double[], gates1: GateSequence, param2 : Double[], gates2: GateSequence, reg: Qubit[]): Unit is Adj + Ctl { - let L = Length(reg) - 1; - let g1 = _ApplyGates(param1,gates1,_); - let g2 = _ApplyGates(param2,gates2,_); - - enc(LittleEndian(reg[0..(L-1)])); - within { - H(Tail(reg)); - } apply { - (Controlled g1) ([reg[L]], reg[0..(L-1)]); - within { - X(Tail(reg)); - } apply { - (Controlled g2) ([reg[L]], reg[0..(L-1)]); - (Controlled Z) ([reg[L]], reg[(L-1)]); - } - } - } - - operation endToEndHTcircuit(enc: (LittleEndian => Unit is Adj + Ctl),param1 : Double[], gates1: GateSequence, param2 : Double[], gates2: GateSequence) : (Qubit[] => Unit is Adj) { - return _endToEndHTcircuit(enc,param1, gates1, param2, gates2, _); - } - - operation HardamardTestPhysical(enc2: (LittleEndian => Unit is Adj + Ctl), param1 : Double[], gates1: GateSequence, param2 : Double[], gates2: GateSequence, nQubits: Int, nMeasurements : Int): Double - { - return 1.0-EstimateFrequencyA(endToEndHTcircuit(enc2,param1,gates1,param2,gates2),measureLastQubit(nQubits), nQubits, nMeasurements); - } - - - - /// # Summary - /// polymorphic classical/quantum gradient estimator - /// - /// # Input - /// ## param - /// circuit parameters - /// - /// ## gates - /// sequence of gates in the circuits - /// - /// ## sg - /// generates quantum encoding of a subject sample (either simulated or true) - /// - /// ## measCount - /// number of true quantum measurements to estimate probabilities. - /// IMPORTANT: measCount==0 implies simulator deployment - /// - /// # Output - /// the gradient - /// - operation EstimateGradient(param : Double[], gates: GateSequence, sg: StateGenerator, nMeasurements : Int) : (Double[]) { - //Synopsis: Suppose (param,gates) define Circ0 - //Suppose (param1,gates1) define Circ1 that implements one-gate derivative of Circ0 - //The expectation derivative is then 2 Re[] = - // Re[] - Re[] - //We observe SEE THEORY that for (Circ1)=(Circ0)' , Re[]==0 - //Thus we are left to compute Re[] = - // 1 - 1/2 < (Z \otimes Id) Circ0 psi - Circ1 psi | (Z \otimes Id) Circ0 psi - Circ1 psi> - //i.e., 1 - HadamardTestResultHack(Circ1,[Z],Circ0) - - - //Now, suppose a gate at which we differentiate is the (Controlled R(\theta))([k0,k1,...,kr],[target]) - //and we want a unitary description of its \theta-derivative. It can be written as - // 1/2 {(Controlled R(\theta'))([k0,k1,...,kr],[target]) - (Controlled Z)([k1,...,kr],[k0])(Controlled R(\theta'))([k0,k1,...,kr],[target])} - let pC = Length(param); - mutable grad = ConstantArray(pC, 0.0); - mutable paramShift = param + [0.0]; - let nQubits = MaxI(NQubitsRequired(gates), sg::NQubits); - - for (gate in gates!) { - set paramShift w/= gate::Index <- (param[gate::Index] + PI()); //Shift the corresponding parameter - // NB: This the *antiderivative* of the bracket - let newDer = 2.0 * HardamardTestPhysical( - sg::Apply, param, gates, paramShift, gates, nQubits + 1, nMeasurements - ) - 1.0; - if (IsEmpty(gate::Span::ControlIndices)) { - //uncontrolled gate - set grad w/= gate::Index <- grad[gate::Index] + newDer; - } else { - //controlled gate - set paramShift w/=gate::Index<-(param[gate::Index]+3.0 * PI()); - //Assumption: any rotation R has the property that R(\theta+2 Pi)=(-1).R(\theta) - // NB: This the *antiderivative* of the bracket - let newDer1 = 2.0 * HardamardTestPhysical( - sg::Apply, param, gates, paramShift, gates, nQubits + 1, - nMeasurements - ) - 1.0; - set grad w/= gate::Index <- (grad[gate::Index] + 0.5* (newDer - newDer1)); - set paramShift w/= gate::Index <-( param[gate::Index] + PI()); //unshift by 2 Pi (for debugging purposes) - } - set paramShift w/= gate::Index <- param[gate::Index]; //unshift this parameter - } - return grad; - - } //GradientHack - - - /// # Summary - /// computes stochastic gradient on one classical sample - /// - /// # Input - /// ## param - /// circuit parameters - /// - /// ## gates - /// sequence of gates in the circuits - /// - /// ## sample - /// sample vector as a raw array - /// - /// ## nMeasurements - /// number of true quantum measurements to estimate probabilities - /// - /// # Output - /// the gradient - /// - operation EstimateGradientFromClassicalSample(tolerance: Double, param : Double[], gates: GateSequence, sample: Double[], nMeasurements : Int) : (Double[]) { - let nQubits = MaxI(FeatureRegisterSize(sample), NQubitsRequired(gates)); - let circEnc = NoisyInputEncoder(tolerance / IntAsDouble(Length(gates!)), sample); - let sg = StateGenerator(nQubits, circEnc); - return EstimateGradient(param, gates, sg, nMeasurements); - } - - //Csharp-frendly adapter for gradient estimation - //'gates' is a array of "flattened" controlled rotation defitions - //each such definition is Int[no.controls+3] in the format [parameter index, Pauli index, target index <,control qubit indices>] - //Pauli index is: 0 for I, 1 for X, 2 for y, 3 for Z - //target index is the index of the target qubit of the rotation - //Sequence of can be empty for uncontroled - operation GradientClassicalSimulationAdapter(tolerance: Double, param : Double[], gates: Int[][], sample: Double[]) : (Double[]) - { - - return EstimateGradientFromClassicalSample(tolerance, param,unFlattenGateSequence(gates),sample,0); - - } - - /// # Summary - /// Get a list of all the classification probabilities. In the from of (prob1,label) pairs. THIS operation is IN DEPRECATION - /// - /// # Input - /// ## samples - /// a container of labeled samples - /// - /// ## sched - /// a schedule to define a subset of samples - /// - /// ## param - /// parameters of the circuits - /// - /// ## gates - /// the sequence of gates in the circuit - /// - /// ## nMeasurements - /// the maximum number of quantum measurements used in the probability estimation - /// - /// # Output - /// TODO - operation ClassificationProbabilitiesClassicalData(samples: LabeledSample[], sched: SamplingSchedule, param: Double[], gates: GateSequence, nMeasurements: Int): - (Double,Int)[] { - mutable N = IsEmpty(samples) - ? NQubitsRequired(gates) - | MaxI(NQubitsRequired(gates), FeatureRegisterSize(_Features(Head(samples)))); - mutable ret = new (Double, Int)[0]; - for (rg in sched!) { - for (ix in rg) { - let sample = samples[ix]; - //agnostic w.r.t. simulator (may still be simulable) - let prob1 = EstimateClassificationProbabilityFromSample(1E-12, param, gates, sample::Features, nMeasurements); - set ret += [(prob1, sample::Label)]; - } - } - - return ret; - } - - operation EstimateClassificationProbabilitiesClassicalDataAdapter(tolerance: Double, samples: Double[][], schedule: Int[][], nQubits: Int, gates: Int[][], param: Double[], measCount: Int): Double[] - { - return EstimateClassificationProbabilitiesClassicalData(tolerance, samples, unFlattenSchedule(schedule), nQubits, unFlattenGateSequence(gates), param, measCount); - } - - - /// # Summary - /// generate a flat list of sample indices where mispredictions occur - /// - /// # Input - /// ## sched - /// a sampling schedule - /// - /// ## pls - /// a list of estimated probabilities with the corresponding class labels - /// - /// ## bias - /// bias on record - /// - /// # Output - /// the list of indices where mispredictions occur - /// - function MissLocations(sched : SamplingSchedule, pls : (Double, Int)[], bias: Double) : Int[] { - mutable ret = new Int[0]; - mutable ir = 0; - - for (rg in sched!) { - for (ix in rg) { - let (prob1, lab) = pls[ir]; - set ir += 1; - if (prob1 + bias > 0.5) { - if (lab < 1) { - set ret += [ix]; - } - } else { - if (lab > 0) { - set ret += [ix]; - } - } - } - } - return ret; - } - - /// # Summary - /// C#-friendly adapter to misclassification tally - /// - /// # Input - /// ## vectors - /// data vectors in flat encoding - /// - /// ## labels - /// array of corresponding class lables - /// - /// ## schedule - /// flat representation of index subset on which the circuit is scored - /// - /// ## param - /// circuit parameters - /// - /// ## gateStructure - /// gate structure in flat representation - /// - /// ## bias - /// prediction bias to be tested - /// - /// ## measCount - /// maximum number of quantum measurements per estimation (measCount==0 implies simulator deployment) - /// - /// # Output - /// the number of misclassifications - /// - operation MisclassificationScoreAdapter(vectors: Double[][], labels: Int[], schedule: Int[][], param: Double[], gateStructure: Int[][], bias: Double, measCount: Int) : Int { - mutable misses = 0; - let samples = unFlattenLabeledSamples(vectors,labels); - let gates = unFlattenGateSequence(gateStructure); - let sched = unFlattenSchedule(schedule); - - let pls = ClassificationProbabilitiesClassicalData(samples,sched,param,gates,measCount); - let biasCurrent = _UpdatedBias(pls, bias, 0.01); - let (h1,m1) = TallyHitsMisses(pls,biasCurrent); - return m1; - } - - /// # Summary - /// Extract a mini batch of samples and wrap the batch as a LabeledSampleContainer - /// - /// # Input - /// ## size - /// desired number of samples in the mini batch - /// - /// ## ixLoc - /// starting index for the batch in the list of locations - /// - /// ## locations - /// list of indices of samples of interest - /// - /// ## samples - /// the container to extract the samples from - /// - /// # Output - /// the mini batched wrapped as a LabeledSampleContainer - /// - /// # Remarks - /// the resulting mini batch can be occasionally shorter than the requested 'size' - /// (when it falls on the tail end of the list of 'locations') - /// - function ExtractMiniBatch(size: Int, ixLoc: Int, locations: Int[], samples: LabeledSample[]): LabeledSample[] { - mutable cnt = Length(locations)-ixLoc; - if (cnt > size) - { - set cnt = size; - } - mutable rgSamples = new LabeledSample[0]; - if (cnt > 0) - { - set rgSamples = new LabeledSample[cnt]; - for (isa in 0..(cnt-1)) - { - set rgSamples w/=isa<- samples[locations[ixLoc+isa]]; - } - } - return rgSamples; - } - - /// # Summary - /// (Randomly) inflate of deflate the source number - operation randomize(src : Double, relativeFuzz : Double) : Double { - return src * ( - 1.0 + relativeFuzz * (Random([0.5, 0.5]) > 0 ? 1.0 | -1.0) - ); - } - - - - /// Summary - /// One possible C#-friendly wrap around the StochasticTrainingLoop - /// - operation StochasticTrainingLoopPlainAdapter(vectors: Double[][], labels: Int[], sched: Int[][], schedScore: Int[][], periodScore: Int, - miniBatchSize: Int, param: Double[],gates: Int[][], bias: Double, lrate: Double, maxEpochs: Int, tol: Double, measCount: Int ) : Double[] // - { - let samples = unFlattenLabeledSamples(vectors,labels); - let sch = unFlattenSchedule(sched); - let schScore = unFlattenSchedule(sched); - let gts = unFlattenGateSequence(gates); - let ((h,m),(b,parpar)) = StochasticTrainingLoop(samples, sch, schScore, periodScore, - miniBatchSize, param, gts, bias, lrate, maxEpochs, tol, measCount); - mutable ret = new Double[Length(parpar)+3]; - set ret w/=0<-IntAsDouble (h); - set ret w/=1<-IntAsDouble (m); - set ret w/=2<-b; - for (j in 0..(Length(parpar)-1)) - { - set ret w/=(j+3)<-parpar[j]; - } - return ret; - } - - -} diff --git a/MachineLearning/src/Runtime/Classification.qs b/MachineLearning/src/Runtime/Classification.qs index 65c3e67293e..aa3d9fae7e2 100644 --- a/MachineLearning/src/Runtime/Classification.qs +++ b/MachineLearning/src/Runtime/Classification.qs @@ -5,146 +5,90 @@ namespace Microsoft.Quantum.MachineLearning { open Microsoft.Quantum.Arrays; open Microsoft.Quantum.Intrinsic; open Microsoft.Quantum.Canon; - open Microsoft.Quantum.Convert; + open Microsoft.Quantum.Convert; - operation EstimateClassificationProbabilityFromEncodedSample( - encodedSample : StateGenerator, - parameters: Double[], - gates: GateSequence, nMeasurements : Int - ) - : Double { - return 1.0 - EstimateFrequencyA( - endToEndPreparation(encodedSample::Apply, parameters,gates), - measureLastQubit(encodedSample::NQubits), - encodedSample::NQubits, - nMeasurements - ); - } + + operation _PrepareClassification( + encoder : (LittleEndian => Unit is Adj + Ctl), + parameters : Double[], + gates : GateSequence, + target : Qubit[] + ) + : Unit is Adj { + encoder(LittleEndian(target)); + _ApplyGates(parameters, gates, target); + } - operation EstimateClassificationProbabilityFromSample(tolerance: Double, parameters : Double[], gates: GateSequence, sample: Double[], nMeasurements: Int) - : Double { - let nQubits = FeatureRegisterSize(sample); - let circEnc = NoisyInputEncoder(tolerance / IntAsDouble(Length(gates!)), sample); - return EstimateClassificationProbabilityFromEncodedSample( - StateGenerator(nQubits, circEnc), parameters, gates, nMeasurements - ); + operation EstimateClassificationProbability( + tolerance: Double, + parameters : Double[], + gates: GateSequence, + sample: Double[], + nMeasurements: Int + ) + : Double { + let nQubits = FeatureRegisterSize(sample); + let circEnc = NoisyInputEncoder(tolerance / IntAsDouble(Length(gates!)), sample); + let encodedSample = StateGenerator(nQubits, circEnc); + return 1.0 - EstimateFrequencyA( + _PrepareClassification(encodedSample::Apply, parameters, gates, _), + _TailMeasurement(encodedSample::NQubits), + encodedSample::NQubits, + nMeasurements + ); + } - } + operation EstimateClassificationProbabilities( + tolerance : Double, + parameters : Double[], + structure : GateSequence, + samples : Double[][], + nMeasurements : Int + ) + : Double[] { + let effectiveTolerance = tolerance / IntAsDouble(Length(structure!)); + return ForEach( + EstimateClassificationProbability( + effectiveTolerance, parameters, structure, _, nMeasurements + ), + samples + ); + } - /// # Summary - /// Given a of classification probability and a bias, returns the - /// label inferred from that probability. - /// - /// # Input - /// ## bias - /// The bias between two classes, typically the result of training a - /// classifier. - /// ## probability - /// A classification probabilities for a particular sample, typicaly - /// resulting from estimating its classification frequency. - /// - /// # Output - /// The label inferred from the given classification probability. - function InferredLabel(bias : Double, probability : Double) : Int { - return probability + bias > 0.5 ? 1 | 0; - } + /// # Summary + /// Given a of classification probability and a bias, returns the + /// label inferred from that probability. + /// + /// # Input + /// ## bias + /// The bias between two classes, typically the result of training a + /// classifier. + /// ## probability + /// A classification probabilities for a particular sample, typicaly + /// resulting from estimating its classification frequency. + /// + /// # Output + /// The label inferred from the given classification probability. + function InferredLabel(bias : Double, probability : Double) : Int { + return probability + bias > 0.5 ? 1 | 0; + } - /// # Summary - /// Given an array of classification probabilities and a bias, returns the - /// label inferred from each probability. - /// - /// # Input - /// ## bias - /// The bias between two classes, typically the result of training a - /// classifier. - /// ## probabilities - /// An array of classification probabilities for a set of samples, typicaly - /// resulting from estimating classification frequencies. - /// - /// # Output - /// The label inferred from each classification probability. - function InferredLabels(bias : Double, probabilities : Double[]): Int[] { - return Mapped(InferredLabel(bias, _), probabilities); - } - - /// # Summary - /// Estimates all classification probabilities for a given dataset. - /// - /// # Input - /// ## samples - /// a container of labeled samples - /// - /// ## sched - /// a schedule to define a subset of samples - /// - /// ## nQubits - /// number of qubits in the classification circuit - /// - /// ## gates - /// the sequence of gates in the circuit - /// - /// ## param - /// parameters of the circuits - /// - /// ## measCount - /// - /// # Output - /// array of corresponding estimated probabilities of the top class label - /// - operation EstimateClassificationProbabilitiesClassicalData( - tolerance : Double, samples : Double[][], sched : SamplingSchedule, - nQubits : Int, gates : GateSequence, param : Double[], - nMeasurements : Int - ) : Double[] { - let effectiveTolerance = tolerance / IntAsDouble(Length(gates!)); - mutable ret = new Double[0]; - for (rg in sched!) { - for (ix in rg) { - let samp = samples[ix]; - set ret += [EstimateClassificationProbabilityFromEncodedSample( - StateGenerator(nQubits, NoisyInputEncoder(effectiveTolerance, samp)), - param, gates, nMeasurements - )]; - } - } - - return ret; - } - - /// # Summary - /// Using a flat description of a classification model, assign estimated probability of top class label - /// to each vector in the test set - /// - /// # Input - /// ## nQubits - /// the number of qubits used for data encoding - /// - /// ## gates - /// Flattened representation of classifier structure. Each element is - /// [parameterIndex, pauliCode, targetQubit, sequence of control qubits] - /// - /// ## parameters - /// an array of circuit parameters - /// - /// ## samples - /// the set of vectors to be labeled - /// - /// ## bias - /// top class bias - /// - /// ## nMeasurenets - /// number of the measurement cycles to be used for estimation of each probability - /// - /// # Output - /// Array of predicted class labels for each sample of the test set - /// - operation DoClassification(tolerance: Double, nQubits: Int, gates: Int[][], parameters: Double[], bias: Double, samples : Double[][], nMeasurements: Int) : Int[] { - let schedule = SamplingSchedule([0..Length(samples) - 1]); - let sequence = unFlattenGateSequence(gates); - let probs = EstimateClassificationProbabilitiesClassicalData( - tolerance, samples, schedule, nQubits, sequence, parameters, nMeasurements - ); - return InferredLabels(bias, probs); - } + /// # Summary + /// Given an array of classification probabilities and a bias, returns the + /// label inferred from each probability. + /// + /// # Input + /// ## bias + /// The bias between two classes, typically the result of training a + /// classifier. + /// ## probabilities + /// An array of classification probabilities for a set of samples, typicaly + /// resulting from estimating classification frequencies. + /// + /// # Output + /// The label inferred from each classification probability. + function InferredLabels(bias : Double, probabilities : Double[]): Int[] { + return Mapped(InferredLabel(bias, _), probabilities); + } } diff --git a/MachineLearning/src/Runtime/Deprecated.qs b/MachineLearning/src/Runtime/Deprecated.qs deleted file mode 100644 index a27d203ba34..00000000000 --- a/MachineLearning/src/Runtime/Deprecated.qs +++ /dev/null @@ -1,84 +0,0 @@ -namespace Microsoft.Quantum.MachineLearning { - open Microsoft.Quantum.Logical; - open Microsoft.Quantum.Arithmetic; - open Microsoft.Quantum.Intrinsic; - open Microsoft.Quantum.Canon; - open Microsoft.Quantum.Math; - - /// Sample container access method - @Deprecated("") - function getSample(samples: LabeledSampleContainer, ix: Int): LabeledSample { - return (samples!)[ix]; - } - - /// Access the raw data in a labeled sample - @Deprecated("") - function getData(samp: LabeledSample): Double[] { - return Fst(samp!); - } - - /// Access the label in a labeled sample - @Deprecated("") - function getLabel(samp:LabeledSample) : Int - { - return Snd(samp!); - } - - - /// Abstraction for a container of labeled samples - @Deprecated("") - newtype LabeledSampleContainer = LabeledSample[]; - - @Deprecated("Microsoft.Quantum.Diagnostics.DumpRegister") - function dumpRegisterToConsole ( qs: Qubit[]) : Unit - {} - //{DumpRegister((),qs);} //Swap for empty body when some dumping of registers is needed - - @Deprecated("Microsoft.Quantum.MachineLearning.NQubitsRequired") - function qubitSpan(seq : GateSequence) : Int { - return NQubitsRequired(seq); - } - - /// Set force a qubit into a desired basis state - @Deprecated("Microsoft.Quantum.Measurement.SetToBasisState") - operation Set (desired: Result, q1: Qubit) : Unit - { - //body - //{ - let current = M(q1); - if (desired != current) - { - X(q1); - } - //} - } - - @Deprecated("Microsoft.Quantum.Math.SquaredNorm") - function squareNorm(v:Double[]):Double - { - mutable ret = 0.0; - for (u in v) - { - set ret = ret + u*u; - } - return ret; - } - - @Deprecated("") // replace with ForEach. - operation randomizeArray(src:Double[], relativeFuzz: Double) : Double[] - { - mutable ret = new Double[Length(src)]; - for (ix in 0..(Length(src)-1)) - { - set ret w/=ix<-randomize(src[ix], relativeFuzz); - } - return ret; - } - - @Deprecated("Microsoft.Quantum.Math.NearlyEqualD") - function nearIdenticalDoubles(x:Double,y:Double):Bool { - return NearlyEqualD(x, y); //Note key tolerance constant here - } - - -} diff --git a/MachineLearning/src/Runtime/Examples.qs b/MachineLearning/src/Runtime/Examples.qs index 5c9d863f18d..b7c3ccd4129 100644 --- a/MachineLearning/src/Runtime/Examples.qs +++ b/MachineLearning/src/Runtime/Examples.qs @@ -1,96 +1,94 @@ namespace Microsoft.Quantum.MachineLearning { open Microsoft.Quantum.Primitive; - open Microsoft.Quantum.Convert; + open Microsoft.Quantum.Convert; open Microsoft.Quantum.Math; - operation IrisTrainingData() : LabeledSampleContainer { - let ret = - [LabeledSample(([0.581557, 0.562824, 0.447721, 0.380219], 1)), - LabeledSample(([0.570241, 0.544165, 0.503041, 0.354484], - 1)), LabeledSample(([0.510784, 0.475476, 0.453884, 0.554087], - 0)), LabeledSample(([0.492527, 0.473762, 0.471326, 0.557511], - 0)), LabeledSample(([0.543273, 0.501972, 0.518341, 0.429186], - 0)), LabeledSample(([0.520013, 0.485702, 0.440061, 0.547747], - 0)), LabeledSample(([0.585261, 0.545431, 0.462126, 0.382641], - 1)), LabeledSample(([0.541059, 0.479438, 0.568697, 0.392401], - 0)), LabeledSample(([0.555604, 0.517196, 0.474722, 0.445479], - 1)), LabeledSample(([0.592542, 0.537541, 0.468725, 0.374486], - 1)), LabeledSample(([0.552254, 0.51027, 0.511855, 0.415505], - 0)), LabeledSample(([0.530874, 0.465606, 0.503344, 0.498025], - 0)), LabeledSample(([0.568502, 0.492452, 0.524331, 0.399215], - 0)), LabeledSample(([0.511768, 0.53197, 0.46875, 0.485156], - 0)), LabeledSample(([0.555756, 0.420141, 0.553663, 0.456152], - 0)), LabeledSample(([0.584546, 0.562276, 0.439516, 0.385976], - 1)), LabeledSample(([0.608485, 0.577022, 0.427781, 0.337336], - 1)), LabeledSample(([0.546234, 0.59768, 0.46082, 0.36339], - 1)), LabeledSample(([0.596632, 0.510739, 0.482188, 0.388162], - 1)), LabeledSample(([0.512997, 0.525043, 0.460839, 0.49879], - 0)), LabeledSample(([0.477408, 0.488846, 0.465015, 0.562914], - 0)), LabeledSample(([0.553381, 0.457028, 0.546788, 0.431182], - 0)), LabeledSample(([0.543981, 0.555533, 0.491698, 0.392047], - 1)), LabeledSample(([0.532066, 0.497762, 0.5178, 0.448354], - 1)), LabeledSample(([0.505981, 0.460209, 0.506897, 0.524639], - 0)), LabeledSample(([0.44959, 0.489591, 0.490236, 0.563772], - 0)), LabeledSample(([0.498647, 0.482584, 0.502011, 0.516187], - 0)), LabeledSample(([0.552142, 0.553439, 0.474121, 0.405035], - 1)), LabeledSample(([0.495714, 0.452003, 0.497858, 0.549635], - 0)), LabeledSample(([0.523342, 0.480002, 0.484639, 0.510722], - 0)), LabeledSample(([0.493365, 0.473391, 0.504036, 0.527673], - 0)), LabeledSample(([0.552146, 0.542635, 0.505733, 0.380679], - 1)), LabeledSample(([0.578287, 0.517882, 0.46856, 0.421704], - 1)), LabeledSample(([0.588389, 0.569435, 0.47621, 0.320571], - 1)), LabeledSample(([0.572852, 0.583312, 0.441711, 0.369431], - 1)), LabeledSample(([0.540173, 0.571013, 0.440259, 0.43397], - 1)), LabeledSample(([0.588118, 0.554021, 0.452409, 0.377498], - 1)), LabeledSample(([0.499325, 0.454156, 0.500229, 0.542391], - 0)), LabeledSample(([0.541172, 0.446455, 0.491748, 0.515746], - 0)), LabeledSample(([0.501365, 0.513378, 0.488352, 0.496577], - 0)), LabeledSample(([0.519525, 0.498491, 0.475854, 0.505137], - 0)), LabeledSample(([0.549086, 0.561405, 0.474075, 0.398223], - 1)), LabeledSample(([0.504199, 0.486123, 0.476877, 0.53109], - 0)), LabeledSample(([0.530715, 0.466196, 0.504931, 0.496032], - 0)), LabeledSample(([0.515663, 0.527232, 0.474253, 0.480835], - 0)), LabeledSample(([0.498647, 0.482584, 0.502011, 0.516187], - 0)), LabeledSample(([0.591455, 0.54028, 0.471969, 0.368136], - 1)), LabeledSample(([0.459772, 0.46144, 0.462874, 0.601191], - 0)), LabeledSample(([0.527031, 0.492257, 0.472236, 0.506867], - 0)), LabeledSample(([0.534498, 0.534498, 0.495766, 0.427598], - 0)), LabeledSample(([0.561849, 0.441966, 0.530269, 0.455857], - 0)), LabeledSample(([0.483984, 0.503088, 0.458885, 0.549624], - 0)), LabeledSample(([0.525126, 0.566848, 0.450923, 0.446761], - 1)), LabeledSample(([0.576674, 0.501348, 0.480182, 0.430723], - 1)), LabeledSample(([0.58787, 0.558697, 0.451917, 0.371534], - 1)), LabeledSample(([0.584716, 0.552543, 0.446305, 0.391937], - 1)), LabeledSample(([0.604866, 0.502993, 0.484769, 0.382275], - 1)), LabeledSample(([0.576834, 0.538774, 0.469003, 0.39626], - 1)), LabeledSample(([0.588747, 0.563029, 0.444888, 0.372089], - 1)), LabeledSample(([0.575899, 0.560012, 0.4573, 0.38158], - 1)), LabeledSample(([0.552402, 0.574207, 0.444699, 0.409123], - 1)), LabeledSample(([0.589006, 0.546658, 0.46965, 0.365605], - 1)), LabeledSample(([0.540387, 0.443462, 0.537296, 0.471843], - 0)), LabeledSample(([0.570654, 0.548912, 0.458326, 0.403716], - 1)), LabeledSample(([0.544644, 0.547271, 0.467682, 0.430268], - 1)), LabeledSample(([0.525228, 0.503964, 0.508832, 0.459615], - 0)), LabeledSample(([0.462827, 0.527655, 0.461528, 0.542553], - 0)), LabeledSample(([0.50897, 0.522189, 0.507054, 0.459527], - 0)), LabeledSample(([0.546369, 0.577899, 0.460934, 0.393768], - 1)), LabeledSample(([0.615382, 0.467063, 0.492079, 0.401268], - 1)), LabeledSample(([0.573572, 0.473185, 0.510765, 0.431544], - 1)), LabeledSample(([0.510624, 0.60155, 0.43847, 0.430285], - 1)), LabeledSample(([0.563956, 0.532924, 0.469591, 0.421223], - 1)), LabeledSample(([0.581565, 0.592669, 0.391677, 0.396376], - 1)), LabeledSample(([0.533848, 0.501219, 0.4732, 0.489762], - 0)), LabeledSample(([0.530036, 0.577194, 0.452731, 0.425375], - 1)), LabeledSample(([0.595573, 0.439349, 0.494919, 0.455325], - 1)), LabeledSample(([0.584424, 0.557699, 0.438769, 0.393576], - 1)), LabeledSample(([0.544759, 0.441244, 0.494108, 0.514196], - 0)), LabeledSample(([0.552072, 0.545641, 0.487013, 0.400388], 1)) - ]; - return LabeledSampleContainer(ret); - } + operation IrisTrainingData() : LabeledSample[] { + return [LabeledSample(([0.581557, 0.562824, 0.447721, 0.380219], 1)), + LabeledSample(([0.570241, 0.544165, 0.503041, 0.354484], + 1)), LabeledSample(([0.510784, 0.475476, 0.453884, 0.554087], + 0)), LabeledSample(([0.492527, 0.473762, 0.471326, 0.557511], + 0)), LabeledSample(([0.543273, 0.501972, 0.518341, 0.429186], + 0)), LabeledSample(([0.520013, 0.485702, 0.440061, 0.547747], + 0)), LabeledSample(([0.585261, 0.545431, 0.462126, 0.382641], + 1)), LabeledSample(([0.541059, 0.479438, 0.568697, 0.392401], + 0)), LabeledSample(([0.555604, 0.517196, 0.474722, 0.445479], + 1)), LabeledSample(([0.592542, 0.537541, 0.468725, 0.374486], + 1)), LabeledSample(([0.552254, 0.51027, 0.511855, 0.415505], + 0)), LabeledSample(([0.530874, 0.465606, 0.503344, 0.498025], + 0)), LabeledSample(([0.568502, 0.492452, 0.524331, 0.399215], + 0)), LabeledSample(([0.511768, 0.53197, 0.46875, 0.485156], + 0)), LabeledSample(([0.555756, 0.420141, 0.553663, 0.456152], + 0)), LabeledSample(([0.584546, 0.562276, 0.439516, 0.385976], + 1)), LabeledSample(([0.608485, 0.577022, 0.427781, 0.337336], + 1)), LabeledSample(([0.546234, 0.59768, 0.46082, 0.36339], + 1)), LabeledSample(([0.596632, 0.510739, 0.482188, 0.388162], + 1)), LabeledSample(([0.512997, 0.525043, 0.460839, 0.49879], + 0)), LabeledSample(([0.477408, 0.488846, 0.465015, 0.562914], + 0)), LabeledSample(([0.553381, 0.457028, 0.546788, 0.431182], + 0)), LabeledSample(([0.543981, 0.555533, 0.491698, 0.392047], + 1)), LabeledSample(([0.532066, 0.497762, 0.5178, 0.448354], + 1)), LabeledSample(([0.505981, 0.460209, 0.506897, 0.524639], + 0)), LabeledSample(([0.44959, 0.489591, 0.490236, 0.563772], + 0)), LabeledSample(([0.498647, 0.482584, 0.502011, 0.516187], + 0)), LabeledSample(([0.552142, 0.553439, 0.474121, 0.405035], + 1)), LabeledSample(([0.495714, 0.452003, 0.497858, 0.549635], + 0)), LabeledSample(([0.523342, 0.480002, 0.484639, 0.510722], + 0)), LabeledSample(([0.493365, 0.473391, 0.504036, 0.527673], + 0)), LabeledSample(([0.552146, 0.542635, 0.505733, 0.380679], + 1)), LabeledSample(([0.578287, 0.517882, 0.46856, 0.421704], + 1)), LabeledSample(([0.588389, 0.569435, 0.47621, 0.320571], + 1)), LabeledSample(([0.572852, 0.583312, 0.441711, 0.369431], + 1)), LabeledSample(([0.540173, 0.571013, 0.440259, 0.43397], + 1)), LabeledSample(([0.588118, 0.554021, 0.452409, 0.377498], + 1)), LabeledSample(([0.499325, 0.454156, 0.500229, 0.542391], + 0)), LabeledSample(([0.541172, 0.446455, 0.491748, 0.515746], + 0)), LabeledSample(([0.501365, 0.513378, 0.488352, 0.496577], + 0)), LabeledSample(([0.519525, 0.498491, 0.475854, 0.505137], + 0)), LabeledSample(([0.549086, 0.561405, 0.474075, 0.398223], + 1)), LabeledSample(([0.504199, 0.486123, 0.476877, 0.53109], + 0)), LabeledSample(([0.530715, 0.466196, 0.504931, 0.496032], + 0)), LabeledSample(([0.515663, 0.527232, 0.474253, 0.480835], + 0)), LabeledSample(([0.498647, 0.482584, 0.502011, 0.516187], + 0)), LabeledSample(([0.591455, 0.54028, 0.471969, 0.368136], + 1)), LabeledSample(([0.459772, 0.46144, 0.462874, 0.601191], + 0)), LabeledSample(([0.527031, 0.492257, 0.472236, 0.506867], + 0)), LabeledSample(([0.534498, 0.534498, 0.495766, 0.427598], + 0)), LabeledSample(([0.561849, 0.441966, 0.530269, 0.455857], + 0)), LabeledSample(([0.483984, 0.503088, 0.458885, 0.549624], + 0)), LabeledSample(([0.525126, 0.566848, 0.450923, 0.446761], + 1)), LabeledSample(([0.576674, 0.501348, 0.480182, 0.430723], + 1)), LabeledSample(([0.58787, 0.558697, 0.451917, 0.371534], + 1)), LabeledSample(([0.584716, 0.552543, 0.446305, 0.391937], + 1)), LabeledSample(([0.604866, 0.502993, 0.484769, 0.382275], + 1)), LabeledSample(([0.576834, 0.538774, 0.469003, 0.39626], + 1)), LabeledSample(([0.588747, 0.563029, 0.444888, 0.372089], + 1)), LabeledSample(([0.575899, 0.560012, 0.4573, 0.38158], + 1)), LabeledSample(([0.552402, 0.574207, 0.444699, 0.409123], + 1)), LabeledSample(([0.589006, 0.546658, 0.46965, 0.365605], + 1)), LabeledSample(([0.540387, 0.443462, 0.537296, 0.471843], + 0)), LabeledSample(([0.570654, 0.548912, 0.458326, 0.403716], + 1)), LabeledSample(([0.544644, 0.547271, 0.467682, 0.430268], + 1)), LabeledSample(([0.525228, 0.503964, 0.508832, 0.459615], + 0)), LabeledSample(([0.462827, 0.527655, 0.461528, 0.542553], + 0)), LabeledSample(([0.50897, 0.522189, 0.507054, 0.459527], + 0)), LabeledSample(([0.546369, 0.577899, 0.460934, 0.393768], + 1)), LabeledSample(([0.615382, 0.467063, 0.492079, 0.401268], + 1)), LabeledSample(([0.573572, 0.473185, 0.510765, 0.431544], + 1)), LabeledSample(([0.510624, 0.60155, 0.43847, 0.430285], + 1)), LabeledSample(([0.563956, 0.532924, 0.469591, 0.421223], + 1)), LabeledSample(([0.581565, 0.592669, 0.391677, 0.396376], + 1)), LabeledSample(([0.533848, 0.501219, 0.4732, 0.489762], + 0)), LabeledSample(([0.530036, 0.577194, 0.452731, 0.425375], + 1)), LabeledSample(([0.595573, 0.439349, 0.494919, 0.455325], + 1)), LabeledSample(([0.584424, 0.557699, 0.438769, 0.393576], + 1)), LabeledSample(([0.544759, 0.441244, 0.494108, 0.514196], + 0)), LabeledSample(([0.552072, 0.545641, 0.487013, 0.400388], 1)) + ]; + } - operation Examples () : Unit + operation Examples () : Unit { - + } } diff --git a/MachineLearning/src/Runtime/GradientEstimation.qs b/MachineLearning/src/Runtime/GradientEstimation.qs new file mode 100644 index 00000000000..92b340a6e19 --- /dev/null +++ b/MachineLearning/src/Runtime/GradientEstimation.qs @@ -0,0 +1,122 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +namespace Microsoft.Quantum.MachineLearning { + open Microsoft.Quantum.Math; + open Microsoft.Quantum.Arrays; + open Microsoft.Quantum.Arithmetic; + open Microsoft.Quantum.Canon; + open Microsoft.Quantum.Intrinsic; + open Microsoft.Quantum.Convert; + open Microsoft.Quantum.Diagnostics; + open Microsoft.Quantum.Preparation; + open Microsoft.Quantum.Characterization; + + // NOTE: the last qubit of 'reg' in this context is the auxillary qubit used in the Hadamard test. + operation _endToEndHTcircuit(enc: (LittleEndian => Unit is Adj + Ctl), param1 : Double[], gates1: GateSequence, param2 : Double[], gates2: GateSequence, reg: Qubit[]): Unit is Adj + Ctl { + let L = Length(reg) - 1; + let g1 = _ApplyGates(param1, gates1, _); + let g2 = _ApplyGates(param2, gates2, _); + + enc(LittleEndian(reg[0..(L-1)])); + within { + H(Tail(reg)); + } apply { + (Controlled g1) ([reg[L]], reg[0..(L-1)]); + within { + X(Tail(reg)); + } apply { + (Controlled g2) ([reg[L]], reg[0..(L-1)]); + (Controlled Z) ([reg[L]], reg[(L-1)]); + } + } + } + + operation endToEndHTcircuit(enc: (LittleEndian => Unit is Adj + Ctl),param1 : Double[], gates1: GateSequence, param2 : Double[], gates2: GateSequence) : (Qubit[] => Unit is Adj) { + return _endToEndHTcircuit(enc,param1, gates1, param2, gates2, _); + } + + operation HardamardTestPhysical(enc2: (LittleEndian => Unit is Adj + Ctl), param1 : Double[], gates1: GateSequence, param2 : Double[], gates2: GateSequence, nQubits: Int, nMeasurements : Int): Double { + return 1.0 - EstimateFrequencyA( + endToEndHTcircuit(enc2,param1,gates1,param2,gates2), + _TailMeasurement(nQubits), + nQubits, + nMeasurements + ); + } + + + + /// # Summary + /// polymorphic classical/quantum gradient estimator + /// + /// # Input + /// ## param + /// circuit parameters + /// + /// ## gates + /// sequence of gates in the circuits + /// + /// ## sg + /// generates quantum encoding of a subject sample (either simulated or true) + /// + /// ## measCount + /// number of true quantum measurements to estimate probabilities. + /// IMPORTANT: measCount==0 implies simulator deployment + /// + /// # Output + /// the gradient + /// + operation EstimateGradient( + gates : GateSequence, + param : Double[], + sg : StateGenerator, + nMeasurements : Int + ) + : (Double[]) { + //Synopsis: Suppose (param,gates) define Circ0 + //Suppose (param1,gates1) define Circ1 that implements one-gate derivative of Circ0 + //The expectation derivative is then 2 Re[] = + // Re[] - Re[] + //We observe SEE THEORY that for (Circ1)=(Circ0)' , Re[]==0 + //Thus we are left to compute Re[] = + // 1 - 1/2 < (Z \otimes Id) Circ0 psi - Circ1 psi | (Z \otimes Id) Circ0 psi - Circ1 psi> + //i.e., 1 - HadamardTestResultHack(Circ1,[Z],Circ0) + + + //Now, suppose a gate at which we differentiate is the (Controlled R(\theta))([k0,k1,...,kr],[target]) + //and we want a unitary description of its \theta-derivative. It can be written as + // 1/2 {(Controlled R(\theta'))([k0,k1,...,kr],[target]) - (Controlled Z)([k1,...,kr],[k0])(Controlled R(\theta'))([k0,k1,...,kr],[target])} + mutable grad = ConstantArray(Length(param), 0.0); + let nQubits = MaxI(NQubitsRequired(gates), sg::NQubits); + + for (gate in gates!) { + let paramShift = (param + [0.0]) + // Shift the corresponding parameter. + w/ gate::Index <- (param[gate::Index] + PI()); + + // NB: This the *antiderivative* of the bracket + let newDer = 2.0 * HardamardTestPhysical( + sg::Apply, param, gates, paramShift, gates, nQubits + 1, nMeasurements + ) - 1.0; + if (IsEmpty(gate::Span::ControlIndices)) { + //uncontrolled gate + set grad w/= gate::Index <- grad[gate::Index] + newDer; + } else { + //controlled gate + let controlledShift = paramShift + w/ gate::Index <- (param[gate::Index] + 3.0 * PI()); + //Assumption: any rotation R has the property that R(\theta+2 Pi)=(-1).R(\theta) + // NB: This the *antiderivative* of the bracket + let newDer1 = 2.0 * HardamardTestPhysical( + sg::Apply, param, gates, controlledShift, gates, nQubits + 1, + nMeasurements + ) - 1.0; + set grad w/= gate::Index <- (grad[gate::Index] + 0.5 * (newDer - newDer1)); + } + } + return grad; + + } + +} diff --git a/MachineLearning/src/Runtime/InputEncoding.qs b/MachineLearning/src/Runtime/InputEncoding.qs index 201b1d5e3be..730fa2d98e8 100644 --- a/MachineLearning/src/Runtime/InputEncoding.qs +++ b/MachineLearning/src/Runtime/InputEncoding.qs @@ -2,6 +2,7 @@ // Licensed under the MIT License. namespace Microsoft.Quantum.MachineLearning { + open Microsoft.Quantum.Arrays; open Microsoft.Quantum.Preparation; open Microsoft.Quantum.Convert; open Microsoft.Quantum.Math; @@ -35,19 +36,31 @@ namespace Microsoft.Quantum.MachineLearning { return ret; } + function _NegativeLocations(cNegative: Int, coefficients : ComplexPolar[]) : Int[] { + mutable negLocs = new Int[0]; + for ((idx, coefficient) in Enumerated(coefficients)) { + if (AbsD(coefficient::Argument - PI()) < 1E-9) { + set negLocs += [idx]; + } + } + return Length(negLocs) > cNegative ? negLocs[...cNegative - 1] | negLocs; + } + /// Do special processing on the first cNegative entries - operation _EncodeSparseNegativeInput(cNegative: Int, tolerance: Double,coefficients : ComplexPolar[], reg: LittleEndian): Unit is Adj + Ctl - { - let negLocs = collectNegativeLocs(cNegative, coefficients); + operation _EncodeSparseNegativeInput( + cNegative: Int, + tolerance: Double, + coefficients : ComplexPolar[], + reg: LittleEndian + ) + : Unit is Adj + Ctl { + let negLocs = _NegativeLocations(cNegative, coefficients); // Prepare the state disregarding the sign of negative components. ApproximatelyPrepareArbitraryState(tolerance, _Unnegate(negLocs, coefficients), reg); // Reflect about the negative coefficients to apply the negative signs // at the end. - for (ineg in 0..(cNegative - 1)) { - let jx = negLocs[ineg]; - if (jx > -1) { - ReflectAboutInteger(jx, reg); //TODO:REVIEW: this assumes that 2^Length(reg) is the minimal pad to Length(coefficients) - } + for (idxNegative in negLocs) { + ReflectAboutInteger(idxNegative, reg); //TODO:REVIEW: this assumes that 2^Length(reg) is the minimal pad to Length(coefficients) } } diff --git a/MachineLearning/src/Runtime/Training.qs b/MachineLearning/src/Runtime/Training.qs index a6072d499da..9acea0d7dc6 100644 --- a/MachineLearning/src/Runtime/Training.qs +++ b/MachineLearning/src/Runtime/Training.qs @@ -9,7 +9,7 @@ namespace Microsoft.Quantum.MachineLearning { function _MisclassificationRate(probabilities : Double[], labels : Int[], bias : Double) : Double { let proposedLabels = InferredLabels(bias, probabilities); - return IntAsDouble(NMismatches(proposedLabels, labels)) / IntAsDouble(Length(probabilities)); + return IntAsDouble(NMisclassifications(proposedLabels, labels)) / IntAsDouble(Length(probabilities)); } /// # Summary @@ -47,20 +47,14 @@ namespace Microsoft.Quantum.MachineLearning { } operation TrainSequentialClassifier( - nQubits: Int, gates: GateSequence, parameterSource: Double[][], samples: LabeledSample[], + options : TrainingOptions, trainingSchedule: SamplingSchedule, - validationSchedule: SamplingSchedule, - learningRate: Double, - tolerance: Double, - miniBatchSize: Int, - maxEpochs: Int, - nMeasurements: Int - ) : (Double[], Double) { - mutable retParam = [-1E12]; - mutable retBias = -2.0; //Indicates non-informative start + validationSchedule: SamplingSchedule + ) : SequentialModel { + mutable bestSoFar = SequentialModel([-1E12], -2.0); mutable bestValidation = Length(samples) + 1; let features = Mapped(_Features, samples); @@ -68,94 +62,34 @@ namespace Microsoft.Quantum.MachineLearning { for (idxStart in 0..(Length(parameterSource) - 1)) { Message($"Beginning training at start point #{idxStart}..."); - let ((h, m), (b, parpar)) = StochasticTrainingLoop( - samples, trainingSchedule, trainingSchedule, 1, miniBatchSize, - parameterSource[idxStart], gates, 0.0, learningRate, maxEpochs, - tolerance, nMeasurements + let proposedUpdate = TrainSequentialClassifierAtModel( + gates, SequentialModel(parameterSource[idxStart], 0.0), + samples, options, trainingSchedule, 1 ); - let probsValidation = EstimateClassificationProbabilitiesClassicalData( - tolerance, features, validationSchedule, nQubits, - gates, parpar, nMeasurements + let probabilities = EstimateClassificationProbabilities( + options::Tolerance, + proposedUpdate::Parameters, + gates, + Sampled(validationSchedule, features), + options::NMeasurements ); // Find the best bias for the new classification parameters. let localBias = _UpdatedBias( - Zip(probsValidation, Sampled(validationSchedule, labels)), + Zip(probabilities, Sampled(validationSchedule, labels)), 0.0, - tolerance + options::Tolerance ); - let localPL = InferredLabels(localBias, probsValidation); - let localMisses = NMismatches(localPL, Sampled(validationSchedule, labels)); + let localPL = InferredLabels(localBias, probabilities); + let localMisses = NMisclassifications(localPL, Sampled(validationSchedule, labels)); if (bestValidation > localMisses) { set bestValidation = localMisses; - set retParam = parpar; - set retBias = localBias; + set bestSoFar = proposedUpdate; } } - return (retParam, retBias); + return bestSoFar; } - /// # Summary - /// Using a flat description of a classification model, find a good local optimum - /// for the model parameters and a related calssification bias - /// - /// # Input - /// ## nQubits - /// the number of qubits used for data encoding - /// - /// ## gates - /// flat characterization of circuit structure. Each element is [parameterIndex, pauliCode, targetQubit\,sequence of control qubits\] - /// - /// ## parameterSource - /// an array of parameter arrays, to be used as SGD starting points - /// - /// ## trainingSet - /// the set of training samples - /// - /// ## trainingLabels - /// the set of training labels - /// - /// ## trainingSchedule - /// defines a subset of training data actually used in the training process - /// - /// ## validatioSchedule - /// defines a subset of training data used for validation and computation of the *bias* - /// - /// ## learningRate - /// initial learning rate for stochastic gradient descent - /// - /// ## tolerance - /// sufficient absolute precision of parameter updates - /// - /// ## learningRate - /// initial learning rate for stochastic gradient descent - /// - /// ## miniBatchSize - /// maximum size of SGD mini batches - /// - /// ## maxEpochs - /// limit to the number of training epochs - /// - /// ## nMeasurenets - /// number of the measurement cycles to be used for estimation of each probability - /// - /// # Output - /// (Array of optimal parameters, optimal validation *bias*) - /// - operation TrainQcccSequential(nQubits: Int, gates: Int[][], parameterSource: Double[][], trainingSet: Double[][], trainingLabels: Int[], trainingSchedule: Int[][], validationSchedule: Int[][], - learningRate: Double, tolerance: Double, miniBatchSize: Int, maxEpochs: Int, nMeasurements: Int) : (Double[],Double) { - let samples = unFlattenLabeledSamples(trainingSet,trainingLabels); - let sch = unFlattenSchedule(trainingSchedule); - let schValidate = unFlattenSchedule(validationSchedule); - let gateSequence = unFlattenGateSequence(gates); - - return TrainSequentialClassifier( - nQubits, gateSequence, parameterSource, samples, - sch, schValidate, learningRate, tolerance, miniBatchSize, - maxEpochs, nMeasurements - ); - } //TrainQcccSequential - /// # Summary /// attempts a single parameter update in the direction of mini batch gradient /// @@ -178,31 +112,40 @@ namespace Microsoft.Quantum.MachineLearning { /// # Output /// (utility, (new)parameters) pair /// - operation OneStochasticTrainingStep( - tolerance: Double, miniBatch: LabeledSample[], param: Double[], gates: GateSequence, - lrate: Double, measCount: Int - ) : (Double, Double[]) { - mutable upParam = new Double[Length(param)]; + operation _RunSingleTrainingStep( + miniBatch : LabeledSample[], + options : TrainingOptions, + param : Double[], gates : GateSequence + ) + : (Double, Double[]) { mutable batchGradient = ConstantArray(Length(param), 0.0); + let nQubits = MaxI(FeatureRegisterSize(miniBatch[0]::Features), NQubitsRequired(gates)); + let effectiveTolerance = options::Tolerance / IntAsDouble(Length(gates!)); - for (samp in miniBatch) { - mutable err = IntAsDouble(samp::Label); + for (sample in miniBatch) { + mutable err = IntAsDouble(sample::Label); if (err < 1.0) { set err = -1.0; //class 0 misclassified to class 1; strive to reduce the probability } - let grad = EstimateGradientFromClassicalSample(tolerance, param, gates, samp::Features, measCount); + let stateGenerator = StateGenerator( + nQubits, + NoisyInputEncoder(effectiveTolerance, sample::Features) + ); + let grad = EstimateGradient( + gates, param, stateGenerator, + options::NMeasurements + ); for (ip in 0..(Length(param) - 1)) { // GradientClassicalSample actually computes antigradient, but err*grad corrects it back to gradient - set batchGradient w/= ip <- (batchGradient[ip] + lrate * err * grad[ip]); + set batchGradient w/= ip <- (batchGradient[ip] + options::LearningRate * err * grad[ip]); } } - for (ip in 0..(Length(param)-1)) { - set upParam w/= ip <- (param[ip] + batchGradient[ip]); - } - return (SquaredNorm(batchGradient), upParam); //TODO:REVIEW: Ok to interpret utility as size of the overall move? - } + let updatedParameters = Mapped(PlusD, Zip(param, batchGradient)); + // TODO:REVIEW: Ok to interpret utility as size of the overall move? + return (SquaredNorm(batchGradient), updatedParameters); + } /// # Summary /// Perform one epoch of circuit training on a subset of data samples to a quantum simulator @@ -238,92 +181,76 @@ namespace Microsoft.Quantum.MachineLearning { /// ## measCount /// number of true quantum measurements to estimate probabilities. /// - operation OneStochasticTrainingEpoch(samples: LabeledSample[], sched: SamplingSchedule, schedScore: SamplingSchedule, periodScore: Int, - miniBatchSize: Int, param: Double[], gates: GateSequence, bias: Double, lrate: Double, tolerance: Double, measCount: Int, - h0: Int, m0: Int): ((Int,Int),(Double,Double[])) - { - let HARDCODEDunderage = 3; //4/26 slack greater than 3 is not recommended - - - mutable hBest = h0; - mutable mBest = m0; - mutable biasBest = bias; - - let pls = ClassificationProbabilitiesClassicalData(samples, schedScore, param, gates, measCount); - let (h2,m2) = TallyHitsMisses(pls,biasBest); - let missLocations = MissLocations(schedScore, pls, biasBest); - - mutable paramBest = param; - mutable paramCurrent = paramBest; - mutable biasCurrent = biasBest; + operation _RunSingleTrainingEpoch( + samples: LabeledSample[], + schedule: SamplingSchedule, periodScore: Int, + options : TrainingOptions, + model : SequentialModel, gates: GateSequence, + nPreviousBestMisses : Int + ) + : (Int, SequentialModel) { + let HARDCODEDunderage = 3; // 4/26 slack greater than 3 is not recommended + + mutable nBestMisses = nPreviousBestMisses; + mutable bestSoFar = model; + let features = Mapped(_Features, samples); + let actualLabels = Mapped(_Label, samples); + + let inferredLabels = InferredLabels( + model::Bias, + EstimateClassificationProbabilities( + options::Tolerance, model::Parameters, gates, + features, options::NMeasurements + ) + ); //An epoch is just an attempt to update the parameters by learning from misses based on LKG parameters - for (ixLoc in 0..miniBatchSize..(Length(missLocations) - 1)) { - let miniBatch = ExtractMiniBatch(miniBatchSize, ixLoc, missLocations, samples); - let (utility,upParam) = OneStochasticTrainingStep(tolerance, miniBatch, paramCurrent, gates, lrate, measCount); - if (Microsoft.Quantum.Math.AbsD(utility) > 0.0000001) { - //There had been some parameter update - if (utility > 0.0) { //good parameter update - set paramCurrent = upParam; - let plsCurrent = ClassificationProbabilitiesClassicalData(samples, schedScore, paramCurrent, gates, measCount); - set biasCurrent = _UpdatedBias(plsCurrent, bias, tolerance); - let (h1,m1) = TallyHitsMisses(plsCurrent,biasCurrent); - if (m1 < mBest + HARDCODEDunderage) { - //we allow limited non-greediness - if (m1 < mBest) { - set hBest = h1; - set mBest = m1; - set paramBest = paramCurrent; - set biasBest = biasCurrent; - } - } else { - //otherwise we scrap the parameter update - set paramCurrent = paramBest; - set biasCurrent = biasBest; - } + let minibatches = Mapped( + Subarray(_, samples), + Chunks( + options::MinibatchSize, + Misclassifications(inferredLabels, actualLabels) + ) + ); + for (minibatch in minibatches) { + let (utility, updatedParameters) = _RunSingleTrainingStep( + minibatch, options, bestSoFar::Parameters, gates + ); + if (utility > 0.0000001) { + // There has been some good parameter update. + // Check if it actually improves things, and if so, + // commit it. + let probabilities = EstimateClassificationProbabilities( + options::Tolerance, updatedParameters, gates, + features, options::NMeasurements + ); + let updatedBias = _UpdatedBias( + Zip(probabilities, actualLabels), model::Bias, options::Tolerance + ); + let updatedLabels = InferredLabels( + updatedBias, probabilities + ); + let nMisses = Length(Misclassifications( + updatedLabels, actualLabels + )); + if (nMisses < nBestMisses) { + set nBestMisses = nMisses; + set bestSoFar = SequentialModel(updatedParameters, updatedBias); } } } - return ((hBest, mBest), (biasBest, paramBest)); + return (nBestMisses, bestSoFar); } - //Make some oblivious gradien descent steps without checking the prediction quality - operation OneUncontrolledStochasticTrainingEpoch(samples: LabeledSample[], sched: SamplingSchedule, schedScore: SamplingSchedule, periodScore: Int, - miniBatchSize: Int, param: Double[], gates: GateSequence, bias: Double, lrate: Double, tolerance: Double, measCount: Int): ((Int,Int),(Double,Double[])) - { - let pls = ClassificationProbabilitiesClassicalData(samples, schedScore, param, gates, measCount); - mutable biasBest = _UpdatedBias(pls, bias, tolerance); - let (h0,m0) = TallyHitsMisses(pls,biasBest); // ClassificationScoreSimulated(samples, schedScore, param, gates, bias); //Deprecated - mutable hCur = h0; - mutable mCur = m0; - let missLocations = MissLocations(schedScore, pls, biasBest); - - mutable paramBest = param; - mutable paramCurrent = paramBest; - mutable biasCurrent = biasBest; - - //An epoch is just an attempt to update the parameters by learning from misses based on LKG parameters - for (ixLoc in 0..miniBatchSize..(Length(missLocations) - 1)) { - let miniBatch = ExtractMiniBatch(miniBatchSize,ixLoc,missLocations,samples); - let (utility,upParam) = OneStochasticTrainingStep(tolerance, miniBatch, paramCurrent, gates, lrate, measCount); - if (AbsD(utility) > 0.0000001) { - //There had been some parameter update - if (utility > 0.0) { //good parameter update - set paramCurrent = upParam; - let plsCurrent = ClassificationProbabilitiesClassicalData(samples, schedScore, paramCurrent, gates, measCount); - set biasCurrent = _UpdatedBias(plsCurrent, bias, tolerance); - let (h1,m1) = TallyHitsMisses(plsCurrent,biasCurrent); - set hCur = h1; - set mCur = m1; - } - - } - - } - return ((hCur, mCur),(biasCurrent,paramCurrent)); - } //OneUncontrolledStochasticTrainingEpoch + /// # Summary + /// Randomly rescales an input to either grow or shrink by a given factor. + operation _RandomlyRescale(scale : Double, value : Double) : Double { + return value * ( + 1.0 + scale * (Random([0.5, 0.5]) > 0 ? 1.0 | -1.0) + ); + } /// # Summary /// Run a full circuit training loop on a subset of data samples @@ -369,78 +296,100 @@ namespace Microsoft.Quantum.MachineLearning { /// # Output /// ((no.hits,no.misses),(opt.bias,opt.parameters)) /// - operation StochasticTrainingLoop(samples: LabeledSample[], sched: SamplingSchedule, schedScore: SamplingSchedule, periodScore: Int, - miniBatchSizeInital: Int, param: Double[], gates: GateSequence, bias: Double, lrateInitial: Double, maxEpochs: Int, tol: Double, measCount: Int): ((Int,Int),(Double,Double[])) - { - //const - let manyNoops = 4; + operation TrainSequentialClassifierAtModel( + gates : GateSequence, + model : SequentialModel, + samples : LabeledSample[], + options : TrainingOptions, + schedule : SamplingSchedule, + periodScore : Int + ) + : SequentialModel { //const - let relFuzz = 0.01; - let HARDCODEDmaxNoops = 2*manyNoops; - mutable pls = ClassificationProbabilitiesClassicalData(samples, schedScore, param, gates, measCount); - mutable biasBest = _UpdatedBias(pls, bias, tol); - let (h0, m0) = TallyHitsMisses(pls,biasBest); - mutable hBest = h0; - mutable mBest = m0; - mutable paramBest = param; - mutable paramCurrent = param; - mutable biasCurrent = biasBest; + let nSamples = Length(samples); + let features = Mapped(_Features, samples); + let actualLabels = Mapped(_Label, samples); + let probabilities = EstimateClassificationProbabilities( + options::Tolerance, model::Parameters, gates, + features, options::NMeasurements + ); + mutable bestSoFar = model + w/ Bias <- _UpdatedBias( + Zip(probabilities, actualLabels), + model::Bias, options::Tolerance + ); + let inferredLabels = InferredLabels( + bestSoFar::Bias, probabilities + ); + mutable nBestMisses = Length( + Misclassifications(inferredLabels, actualLabels) + ); + mutable current = bestSoFar; //reintroducing learning rate heuristics - mutable lrate = lrateInitial; - mutable batchSize = miniBatchSizeInital; - mutable noopCount = 0; - mutable upBias = biasCurrent; - mutable upParam = paramCurrent; - for (ep in 1..maxEpochs) { - let ((h1,m1),(upB,upP)) = OneStochasticTrainingEpoch(samples, sched, schedScore, periodScore, - batchSize, paramCurrent, gates, biasCurrent, lrate, tol, measCount, hBest, mBest); - set upBias = upB; - set upParam = upP; - if (m1 < mBest) - { - set hBest = h1; - set mBest = m1; - set paramBest = upParam; - set biasBest = upBias; - if (IntAsDouble (mBest)/IntAsDouble (mBest+hBest)< tol) //Terminate based on tolerance - { - return ((hBest,mBest),(biasBest,paramBest)); + mutable lrate = options::LearningRate; + mutable batchSize = options::MinibatchSize; + + // Keep track of how many times a bias update has stalled out. + mutable nStalls = 0; + + for (ep in 1..options::MaxEpochs) { + let (nMisses, proposedUpdate) = _RunSingleTrainingEpoch( + samples, schedule, periodScore, + options + w/ LearningRate <- lrate + w/ MinibatchSize <- batchSize, + current, gates, + nBestMisses + ); + if (nMisses < nBestMisses) { + set nBestMisses = nMisses; + set bestSoFar = proposedUpdate; + if (IntAsDouble(nMisses) / IntAsDouble(nSamples) < options::Tolerance) { //Terminate based on tolerance + return bestSoFar; } - set noopCount = 0; //Reset the counter of consequtive noops - set lrate = lrateInitial; - set batchSize = miniBatchSizeInital; + set nStalls = 0; //Reset the counter of consequtive noops + set lrate = options::LearningRate; + set batchSize = options::MinibatchSize; } - if (NearlyEqualD(biasCurrent,upBias) and _AllNearlyEqualD(paramCurrent,upParam)) - { - set noopCount = noopCount+1; - if (noopCount > manyNoops) - { - if (noopCount > HARDCODEDmaxNoops) - { - return ((hBest,mBest),(biasBest,paramBest)); //Too many non-steps. Continuation makes no sense - } - else - { - set upBias = randomize(upBias, relFuzz); - set upParam = ForEach(randomize(_, relFuzz), upParam); - } + + if ( + NearlyEqualD(current::Bias, proposedUpdate::Bias) and _AllNearlyEqualD(current::Parameters, proposedUpdate::Parameters) + ) { + set nStalls += 1; + // If we're more than halfway through our maximum allowed number of stalls, + // exit early with the best we actually found. + if (nStalls > options::MaxStalls) { + return bestSoFar; //Too many non-steps. Continuation makes no sense } - set batchSize = noopCount; //batchSize + 1; //Try to fuzz things up with smaller batch count + + // Otherwise, heat up the learning rate and batch size. + set batchSize = nStalls; //batchSize + 1; //Try to fuzz things up with smaller batch count //and heat up a bit - set lrate = 1.25*lrate; - } - else - { - set noopCount = 0; //Reset the counter of consequtive noops - set lrate = lrateInitial; - set batchSize = miniBatchSizeInital; + set lrate *= 1.25; + + // If we stalled out, we'll also randomly rescale our parameters + // and bias before updating. + if (nStalls > options::MaxStalls / 2) { + set current = SequentialModel( + ForEach(_RandomlyRescale(options::StochasticRescaleFactor, _), proposedUpdate::Parameters), + _RandomlyRescale(options::StochasticRescaleFactor, proposedUpdate::Bias) + ); + } + } else { + // If we learned successfully this iteration, reset the number of + // stalls so far. + set nStalls = 0; //Reset the counter of consequtive noops + set lrate = options::LearningRate; + set batchSize = options::MinibatchSize; + + // Since we didn't stall out, we can set the parameters and bias + // as normal, without randomizing. + set current = proposedUpdate; } - set paramCurrent = upParam; - set biasCurrent = upBias; } - return ((hBest,mBest),(biasBest,paramBest)); + return bestSoFar; } } diff --git a/MachineLearning/src/Runtime/Types.qs b/MachineLearning/src/Runtime/Types.qs index 759acbc4094..3d8ca3ab9a5 100644 --- a/MachineLearning/src/Runtime/Types.qs +++ b/MachineLearning/src/Runtime/Types.qs @@ -89,6 +89,25 @@ namespace Microsoft.Quantum.MachineLearning { NMisclassifications: Int ); + newtype TrainingOptions = ( + LearningRate: Double, + Tolerance: Double, + MinibatchSize: Int, + NMeasurements: Int, + MaxEpochs: Int, + MaxStalls: Int, + StochasticRescaleFactor: Double + ); + + function DefaultTrainingOptions() : TrainingOptions { + return TrainingOptions( + 0.1, 0.005, 15, 10000, 16, 8, 0.01 + ); + } + newtype SequentialModel = ( + Parameters: Double[], + Bias: Double + ); } diff --git a/MachineLearning/src/Runtime/Utils.qs b/MachineLearning/src/Runtime/Utils.qs index 6a97180404d..c412f05461d 100644 --- a/MachineLearning/src/Runtime/Utils.qs +++ b/MachineLearning/src/Runtime/Utils.qs @@ -9,4 +9,9 @@ namespace Microsoft.Quantum.MachineLearning { return Length(v1) == Length(v2) and All(NearlyEqualD, Zip(v1, v2)); } + operation _TailMeasurement(nQubits : Int) : (Qubit[] => Result) { + let paulis = ConstantArray(nQubits, PauliI) w/ (nQubits - 1) <- PauliZ; + return Measure(paulis, _); + } + } diff --git a/MachineLearning/src/Runtime/Validation.qs b/MachineLearning/src/Runtime/Validation.qs index fca2e87397a..f146eb4427a 100644 --- a/MachineLearning/src/Runtime/Validation.qs +++ b/MachineLearning/src/Runtime/Validation.qs @@ -1,42 +1,19 @@ namespace Microsoft.Quantum.MachineLearning { open Microsoft.Quantum.Arrays; open Microsoft.Quantum.Intrinsic; + open Microsoft.Quantum.Logical; open Microsoft.Quantum.Canon; - function NMismatches(proposed: Int[], actual: Int[]): Int { - mutable count = 0; - for ((proposedLabel, actualLabel) in Zip(proposed, actual)) { - if (proposedLabel != actualLabel) { - set count += 1; - } - } - return count; + function Misclassifications(inferredLabels : Int[], actualLabels : Int[]) + : Int[] { + return Where( + NotEqualI, + Zip(inferredLabels, actualLabels) + ); } - /// # Summary - /// tallies hits and misses off a list of probability estimates - /// - /// # Input - /// ## pls - /// a list of estimated probabilities with the corresponding class labels - /// - /// ## bias - /// bias on record - /// - /// # Output - /// (no.hits, no.misses) pair - /// - function TallyHitsMisses(pls : (Double, Int)[], bias : Double) : (Int, Int) { - mutable hits = 0; - mutable misses = 0; - for ((classificationProbability, label) in pls) { - if (label == InferredLabel(bias, classificationProbability)) { - set hits += 1; - } else { - set misses += 1; - } - } - return (hits, misses); + function NMisclassifications(proposed: Int[], actual: Int[]): Int { + return Length(Misclassifications(proposed, actual)); } /// # Summary @@ -76,22 +53,34 @@ namespace Microsoft.Quantum.MachineLearning { { let schValidate = unFlattenSchedule(validationSchedule); let results = ValidateModel( - tolerance, nQubits, Mapped(LabeledSample, Zip(trainingSet, trainingLabels)), - schValidate, unFlattenGateSequence(gates), - parameters, bias, nMeasurements + unFlattenGateSequence(gates), + SequentialModel(parameters, bias), + Mapped(LabeledSample, Zip(trainingSet, trainingLabels)), + tolerance, nMeasurements, + schValidate ); return results::NMisclassifications; } - operation ValidateModel(tolerance: Double, nQubits: Int, samples : LabeledSample[], validationSchedule: SamplingSchedule, gates: GateSequence, parameters: Double[], bias:Double, nMeasurements: Int) : ValidationResults - { + operation ValidateModel( + gates: GateSequence, + model : SequentialModel, + samples : LabeledSample[], + tolerance: Double, + nMeasurements: Int, + validationSchedule: SamplingSchedule + ) + : ValidationResults { let features = Mapped(_Features, samples); let labels = Sampled(validationSchedule, Mapped(_Label, samples)); - let probsValidation = EstimateClassificationProbabilitiesClassicalData(tolerance, features, validationSchedule, nQubits, gates, parameters, nMeasurements); - let localPL = InferredLabels(bias, probsValidation); - let nMismatches = NMismatches(localPL, labels); + let probabilities = EstimateClassificationProbabilities( + tolerance, model::Parameters, gates, + Sampled(validationSchedule, features), nMeasurements + ); + let localPL = InferredLabels(model::Bias, probabilities); + let nMisclassifications = NMisclassifications(localPL, labels); return ValidationResults( - nMismatches + nMisclassifications ); } diff --git a/Standard/src/Arrays/Arrays.qs b/Standard/src/Arrays/Arrays.qs index 13a111729e5..bb45ed9bcc3 100644 --- a/Standard/src/Arrays/Arrays.qs +++ b/Standard/src/Arrays/Arrays.qs @@ -249,6 +249,33 @@ namespace Microsoft.Quantum.Arrays { | inputArray + padArray; // Padded at tail. } + + /// # Summary + /// Splits an array into multiple parts of equal length. + /// + /// # Input + /// ## nElements + /// The length of each chunk. + /// ## arr + /// The array to be split. + /// + /// # Output + /// A array containing each chunk of the original array. + /// + /// # Remarks + /// Note that the last element of the output may be shorter + /// than `nElements` if `Length(arr)` is not divisible by `nElements`. + function Chunks<'T>(nElements : Int, arr : 'T[]) : 'T[][] { + mutable output = new 'T[][0]; + mutable remaining = arr; + while (not IsEmpty(remaining)) { + let nElementsToTake = MinI(Length(remaining), nElements); + set output += [remaining[...nElementsToTake - 1]]; + set remaining = remaining[nElementsToTake...]; + } + return output; + } + /// # Summary /// Splits an array into multiple parts. /// diff --git a/Standard/src/Arrays/Filter.qs b/Standard/src/Arrays/Filter.qs index baa86f3c742..cadf8d58dae 100644 --- a/Standard/src/Arrays/Filter.qs +++ b/Standard/src/Arrays/Filter.qs @@ -2,6 +2,7 @@ // Licensed under the MIT License. namespace Microsoft.Quantum.Arrays { + open Microsoft.Quantum.Canon; /// # Summary /// Given an array and a predicate that is defined @@ -38,4 +39,30 @@ namespace Microsoft.Quantum.Arrays { return Subarray(idxArray[0 .. totalFound - 1], array); } + /// # Summary + /// Given a predicate and an array, returns the indices of that + /// array where the predicate is true. + /// + /// # Type Parameters + /// ## 'T + /// The type of `array` elements. + /// + /// # Input + /// ## predicate + /// A function from `'T` to Boolean that is used to filter elements. + /// ## array + /// An array of elements over `'T`. + /// + /// # Output + /// An array of indices where `predicate` is true. + function Where<'T>(predicate : ('T -> Bool), array : 'T[]) : Int[] { + return Mapped( + Fst, + Filtered( + Snd, + Enumerated(Mapped(predicate, array)) + ) + ); + } + } diff --git a/Standard/tests/ArrayTests.qs b/Standard/tests/ArrayTests.qs index 256b42468dd..556c3c51fe6 100644 --- a/Standard/tests/ArrayTests.qs +++ b/Standard/tests/ArrayTests.qs @@ -1,13 +1,14 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. namespace Microsoft.Quantum.Tests { + open Microsoft.Quantum.Logical; open Microsoft.Quantum.Diagnostics; open Microsoft.Quantum.Canon; open Microsoft.Quantum.Intrinsic; open Microsoft.Quantum.Arrays; - - function ZipTest () : Unit { + @Test("QuantumSimulator") + function ZipTest() : Unit { let left = [1, 2, 101]; let right = [PauliY, PauliI]; @@ -26,6 +27,7 @@ namespace Microsoft.Quantum.Tests { } + @Test("QuantumSimulator") function LookupTest () : Unit { let array = [1, 12, 71, 103]; @@ -38,8 +40,28 @@ namespace Microsoft.Quantum.Tests { EqualityFactI(fn(1), 12, $"fn(1) did not return array[1]"); } + function _AllEqualI(expected : Int[], actual : Int[]) : Bool { + return All(EqualI, Zip(expected, actual)); + } + + @Test("QuantumSimulator") + function ChunksTest() : Unit { + let data = [10, 11, 12, 13, 14, 15]; + + // 2 × 3 case. + Fact(All(_AllEqualI, Zip( + [[10, 11], [12, 13], [14, 15]], + Chunks(2, data) + )), "Wrong chunks in 2x3 case."); + + // Case with some leftovers. + Fact(All(_AllEqualI, Zip( + [[10, 11, 12, 13], [14, 15]], + Chunks(4, data) + )), "Wrong chunks in case with leftover elements."); + } - function ConstantArrayTestHelper (x : Int) : Int { + function _Squared(x : Int) : Int { return x * x; } @@ -52,7 +74,7 @@ namespace Microsoft.Quantum.Tests { let ignore = Mapped(NearEqualityFactD(_, 2.17), dblArray); // Stress test by making an array of Int -> Int. - let fnArray = ConstantArray(7, ConstantArrayTestHelper); + let fnArray = ConstantArray(7, _Squared); EqualityFactI(Length(fnArray), 7, $"ConstantArray(Int, Int -> Int) had the wrong length."); EqualityFactI(fnArray[3](7), 49, $"ConstantArray(Int, Int -> Int) had the wrong value."); }