diff --git a/Chemistry/src/Runtime/Runtime.csproj b/Chemistry/src/Runtime/Runtime.csproj index 329b463b03b..00894345ae6 100644 --- a/Chemistry/src/Runtime/Runtime.csproj +++ b/Chemistry/src/Runtime/Runtime.csproj @@ -1,4 +1,4 @@ - + netstandard2.1 @@ -16,7 +16,7 @@ - + diff --git a/Chemistry/tests/ChemistryTests/QSharpTests.csproj b/Chemistry/tests/ChemistryTests/QSharpTests.csproj index 9196080d248..dd14fae3e1c 100644 --- a/Chemistry/tests/ChemistryTests/QSharpTests.csproj +++ b/Chemistry/tests/ChemistryTests/QSharpTests.csproj @@ -1,4 +1,4 @@ - + diff --git a/Chemistry/tests/SystemTests/SystemTests.csproj b/Chemistry/tests/SystemTests/SystemTests.csproj index 83477b28bb7..4173aa7eca5 100644 --- a/Chemistry/tests/SystemTests/SystemTests.csproj +++ b/Chemistry/tests/SystemTests/SystemTests.csproj @@ -1,4 +1,4 @@ - + diff --git a/MachineLearning/src/InputEncoding.qs b/MachineLearning/src/InputEncoding.qs index 62adda62358..d0dca7dcc05 100644 --- a/MachineLearning/src/InputEncoding.qs +++ b/MachineLearning/src/InputEncoding.qs @@ -47,18 +47,12 @@ namespace Microsoft.Quantum.MachineLearning { } /// Do special processing on the first cNegative entries - operation _EncodeSparseNegativeInput( - cNegative: Int, - tolerance: Double, + operation _ReflectAboutNegativeCoefficients( + negLocs : Int[], coefficients : ComplexPolar[], reg: LittleEndian ) : Unit is Adj + Ctl { - let negLocs = _NegativeLocations(cNegative, coefficients); - // Prepare the state disregarding the sign of negative components. - ApproximatelyPrepareArbitraryState(tolerance, _Unnegate(negLocs, coefficients), reg); - // Reflect about the negative coefficients to apply the negative signs - // at the end. for (idxNegative in negLocs) { ReflectAboutInteger(idxNegative, reg); } @@ -126,16 +120,25 @@ namespace Microsoft.Quantum.MachineLearning { // Here, by a "few," we mean fewer than the number of qubits required // to encode features. if ((cNegative > 0) and (IntAsDouble(cNegative) < Lg(IntAsDouble(Length(coefficients))) + 1.0)) { + let negLocs = _NegativeLocations(cNegative, complexCoefficients); return StateGenerator( nQubits, - _EncodeSparseNegativeInput(cNegative, tolerance, complexCoefficients, _) + BoundCA([ + // Prepare the state disregarding the sign of negative components. + _CompileApproximateArbitraryStatePreparation( + tolerance, _Unnegate(negLocs, complexCoefficients), nQubits + ), + // Reflect about the negative coefficients to apply the negative signs + // at the end. + _ReflectAboutNegativeCoefficients(negLocs, complexCoefficients, _) + ]) ); } // Finally, we fall back to arbitrary state preparation. return StateGenerator( nQubits, - ApproximatelyPrepareArbitraryState(tolerance, complexCoefficients, _) + _CompileApproximateArbitraryStatePreparation(tolerance, complexCoefficients, nQubits) ); } diff --git a/MachineLearning/src/MachineLearning.csproj b/MachineLearning/src/MachineLearning.csproj index 881c3d8b5e2..b34d067af4d 100644 --- a/MachineLearning/src/MachineLearning.csproj +++ b/MachineLearning/src/MachineLearning.csproj @@ -1,4 +1,4 @@ - + netstandard2.1 Microsoft.Quantum.MachineLearning diff --git a/MachineLearning/src/Training.qs b/MachineLearning/src/Training.qs index a7cc57c9723..c1459ddee3c 100644 --- a/MachineLearning/src/Training.qs +++ b/MachineLearning/src/Training.qs @@ -93,7 +93,7 @@ namespace Microsoft.Quantum.MachineLearning { let labels = Mapped(_Label, samples); for ((idxModel, model) in Enumerated(models)) { - Message($"Beginning training at start point #{idxModel}..."); + options::VerboseMessage($" Beginning training at start point #{idxModel}..."); let proposedUpdate = TrainSequentialClassifierAtModel( model, samples, options, trainingSchedule, 1 @@ -144,25 +144,20 @@ namespace Microsoft.Quantum.MachineLearning { /// (utility, (new)parameters) pair /// operation _RunSingleTrainingStep( - miniBatch : LabeledSample[], + miniBatch : (LabeledSample, StateGenerator)[], options : TrainingOptions, model : SequentialModel ) : (Double, SequentialModel) { mutable batchGradient = ConstantArray(Length(model::Parameters), 0.0); - let nQubits = MaxI(FeatureRegisterSize(miniBatch[0]::Features), NQubitsRequired(model)); - let effectiveTolerance = options::Tolerance / IntAsDouble(Length(model::Structure)); - for (sample in miniBatch) { + for ((idxSample, (sample, stateGenerator)) in Enumerated(miniBatch)) { mutable err = IntAsDouble(sample::Label); if (err < 1.0) { // Class 0 misclassified to class 1; strive to reduce the probability. set err = -1.0; } - let stateGenerator = ApproximateInputEncoder(effectiveTolerance, sample::Features) - // Force the number of qubits in case something else in the - // minibatch requires a larger register. - w/ NQubits <- nQubits; + options::VerboseMessage($" Estimating gradient at sample {idxSample}..."); let grad = EstimateGradient( model, stateGenerator, options::NMeasurements @@ -212,7 +207,7 @@ namespace Microsoft.Quantum.MachineLearning { /// epoch. /// - The new best sequential model found. operation _RunSingleTrainingEpoch( - samples : LabeledSample[], + encodedSamples : (LabeledSample, StateGenerator)[], schedule : SamplingSchedule, periodScore: Int, options : TrainingOptions, model : SequentialModel, @@ -221,6 +216,8 @@ namespace Microsoft.Quantum.MachineLearning { : (Int, SequentialModel) { mutable nBestMisses = nPreviousBestMisses; mutable bestSoFar = model; + let samples = Mapped(Fst, encodedSamples); + let stateGenerators = Mapped(Snd, encodedSamples); let features = Mapped(_Features, samples); let actualLabels = Mapped(_Label, samples); @@ -232,19 +229,21 @@ namespace Microsoft.Quantum.MachineLearning { ) ); - //An epoch is just an attempt to update the parameters by learning from misses based on LKG parameters + // An epoch is just an attempt to update the parameters by learning from misses based on LKG parameters let minibatches = Mapped( - Subarray(_, samples), + Subarray(_, encodedSamples), Chunks( options::MinibatchSize, Misclassifications(inferredLabels, actualLabels) ) ); - for (minibatch in minibatches) { + for ((idxMinibatch, minibatch) in Enumerated(minibatches)) { + options::VerboseMessage($" Beginning minibatch {idxMinibatch} of {Length(minibatches)}."); let (utility, updatedModel) = _RunSingleTrainingStep( minibatch, options, bestSoFar ); if (utility > 1e-7) { + options::VerboseMessage($" Observed good parameter update... estimating and possibly commiting."); // There has been some good parameter update. // Check if it actually improves things, and if so, // commit it. @@ -280,7 +279,16 @@ namespace Microsoft.Quantum.MachineLearning { ); } - + function _EncodeSample(effectiveTolerance : Double, nQubits : Int, sample : LabeledSample) + : (LabeledSample, StateGenerator) { + return ( + sample, + ApproximateInputEncoder(effectiveTolerance, sample::Features) + // Force the number of qubits in case something else in the + // minibatch requires a larger register. + w/ NQubits <- nQubits + ); + } /// # Summary /// Given the structure of a sequential classifier, trains the classifier @@ -338,6 +346,12 @@ namespace Microsoft.Quantum.MachineLearning { ); mutable current = bestSoFar; + // Encode samples first. + options::VerboseMessage(" Pre-encoding samples..."); + let effectiveTolerance = options::Tolerance / IntAsDouble(Length(model::Structure)); + let nQubits = MaxI(FeatureRegisterSize(samples[0]::Features), NQubitsRequired(model)); + let encodedSamples = Mapped(_EncodeSample(effectiveTolerance, nQubits, _), samples); + //reintroducing learning rate heuristics mutable lrate = options::LearningRate; mutable batchSize = options::MinibatchSize; @@ -346,11 +360,11 @@ namespace Microsoft.Quantum.MachineLearning { mutable nStalls = 0; for (ep in 1..options::MaxEpochs) { + options::VerboseMessage($" Beginning epoch {ep}."); let (nMisses, proposedUpdate) = _RunSingleTrainingEpoch( - samples, schedule, periodScore, - options - w/ LearningRate <- lrate - w/ MinibatchSize <- batchSize, + encodedSamples, schedule, periodScore, + options w/ LearningRate <- lrate + w/ MinibatchSize <- batchSize, current, nBestMisses ); @@ -366,7 +380,8 @@ namespace Microsoft.Quantum.MachineLearning { } if ( - NearlyEqualD(current::Bias, proposedUpdate::Bias) and _AllNearlyEqualD(current::Parameters, proposedUpdate::Parameters) + NearlyEqualD(current::Bias, proposedUpdate::Bias) and + _AllNearlyEqualD(current::Parameters, proposedUpdate::Parameters) ) { set nStalls += 1; // If we're more than halfway through our maximum allowed number of stalls, diff --git a/MachineLearning/src/Types.qs b/MachineLearning/src/Types.qs index e62e70a152a..25b15e6de69 100644 --- a/MachineLearning/src/Types.qs +++ b/MachineLearning/src/Types.qs @@ -2,6 +2,7 @@ // Licensed under the MIT License. namespace Microsoft.Quantum.MachineLearning { + open Microsoft.Quantum.Intrinsic; open Microsoft.Quantum.Canon; open Microsoft.Quantum.Arithmetic; @@ -181,6 +182,8 @@ namespace Microsoft.Quantum.MachineLearning { /// (approximately zero gradient) before failing. /// ## StochasticRescaleFactor /// The amount to rescale stalled models by before retrying an update. + /// ## VerboseMessage + /// A function that can be used to provide verbose feedback. /// /// # Remarks /// This UDT should not be created directly, but rather should be specified @@ -204,7 +207,8 @@ namespace Microsoft.Quantum.MachineLearning { NMeasurements: Int, MaxEpochs: Int, MaxStalls: Int, - StochasticRescaleFactor: Double + StochasticRescaleFactor: Double, + VerboseMessage: (String -> Unit) ); /// # Summary @@ -223,7 +227,8 @@ namespace Microsoft.Quantum.MachineLearning { /// ``` function DefaultTrainingOptions() : TrainingOptions { return TrainingOptions( - 0.1, 0.005, 15, 10000, 16, 8, 0.01 + 0.1, 0.005, 15, 10000, 16, 8, 0.01, + Ignore ); } diff --git a/MachineLearning/tests/MachineLearningTests.csproj b/MachineLearning/tests/MachineLearningTests.csproj index 5762a1a6054..19c6d85219c 100644 --- a/MachineLearning/tests/MachineLearningTests.csproj +++ b/MachineLearning/tests/MachineLearningTests.csproj @@ -1,4 +1,4 @@ - + diff --git a/Numerics/src/Numerics.csproj b/Numerics/src/Numerics.csproj index ab4026b5ef1..20509f6c218 100644 --- a/Numerics/src/Numerics.csproj +++ b/Numerics/src/Numerics.csproj @@ -1,4 +1,4 @@ - + netstandard2.1 @@ -37,7 +37,7 @@ - + diff --git a/Numerics/tests/NumericsTests.csproj b/Numerics/tests/NumericsTests.csproj index a6ae4fb2aba..ae3f330d512 100644 --- a/Numerics/tests/NumericsTests.csproj +++ b/Numerics/tests/NumericsTests.csproj @@ -1,4 +1,4 @@ - + diff --git a/Standard/src/Preparation/Arbitrary.qs b/Standard/src/Preparation/Arbitrary.qs index 7115ca75c48..90fbd0b4a2b 100644 --- a/Standard/src/Preparation/Arbitrary.qs +++ b/Standard/src/Preparation/Arbitrary.qs @@ -218,16 +218,57 @@ namespace Microsoft.Quantum.Preparation { qubits : LittleEndian ) : Unit is Adj + Ctl { + (_CompileApproximateArbitraryStatePreparation(tolerance, coefficients, Length(qubits!)))(qubits); + } + + operation _ApplyToLittleEndian(bareOp : ((Qubit[]) => Unit is Adj + Ctl), register : LittleEndian) + : Unit is Adj + Ctl { + bareOp(register!); + } + + function _CompileApproximateArbitraryStatePreparation( + tolerance : Double, + coefficients : ComplexPolar[], + nQubits : Int + ) + : (LittleEndian => Unit is Adj + Ctl) { // pad coefficients at tail length to a power of 2. - let coefficientsPadded = Padded(-2 ^ Length(qubits!), ComplexPolar(0.0, 0.0), coefficients); - let target = (qubits!)[0]; - let op = (Adjoint _ApproximatelyPrepareArbitraryState(tolerance, coefficientsPadded, _, _))(_, target); - op( + let coefficientsPadded = Padded(-2 ^ nQubits, ComplexPolar(0.0, 0.0), coefficients); + let idxTarget = 0; + let rngControl = // Determine what controls to apply to `op`. - Length(qubits!) > 1 - ? LittleEndian((qubits!)[1 .. Length(qubits!) - 1]) - | LittleEndian(new Qubit[0]) + nQubits > 1 + ? (1 .. (nQubits - 1)) + | (1..0); + let plan = _ApproximatelyUnprepareArbitraryStatePlan( + tolerance, coefficientsPadded, (rngControl, idxTarget) ); + let unprepare = BoundCA(plan); + return _ApplyToLittleEndian(Adjoint unprepare, _); + } + + operation _ApplyMultiplexStep( + tolerance : Double, disentangling : Double[], axis : Pauli, + (rngControl : Range, idxTarget : Int), + register : Qubit[] + ) + : Unit is Adj + Ctl { + let actualControl = LittleEndian(register[rngControl]); + ApproximatelyMultiplexPauli(tolerance, disentangling, axis, actualControl, register[idxTarget]); + } + + function _RangeLength(rng : Range) : Int { + mutable len = 0; + for (idx in rng) { + set len += 1; + } + return len; + } + + operation _ApplyGlobalRotationStep( + angle : Double, idxTarget : Int, register : Qubit[] + ) : Unit is Adj + Ctl { + Exp([PauliI], angle, [register[idxTarget]]); } /// # Summary @@ -236,34 +277,39 @@ namespace Microsoft.Quantum.Preparation { /// # See Also /// - PrepareArbitraryState /// - Microsoft.Quantum.Canon.MultiplexPauli - operation _ApproximatelyPrepareArbitraryState( + function _ApproximatelyUnprepareArbitraryStatePlan( tolerance : Double, coefficients : ComplexPolar[], - control : LittleEndian, target : Qubit + (rngControl : Range, idxTarget : Int) ) - : Unit is Adj + Ctl { + : (Qubit[] => Unit is Adj + Ctl)[] { + mutable plan = new (Qubit[] => Unit is Adj + Ctl)[0]; + // For each 2D block, compute disentangling single-qubit rotation parameters let (disentanglingY, disentanglingZ, newCoefficients) = _StatePreparationSBMComputeCoefficients(coefficients); if (_AnyOutsideToleranceD(tolerance, disentanglingZ)) { - ApproximatelyMultiplexPauli(tolerance, disentanglingZ, PauliZ, control, target); + set plan += [_ApplyMultiplexStep(tolerance, disentanglingZ, PauliZ, (rngControl, idxTarget), _)]; } if (_AnyOutsideToleranceD(tolerance, disentanglingY)) { - ApproximatelyMultiplexPauli(tolerance, disentanglingY, PauliY, control, target); + set plan += [_ApplyMultiplexStep(tolerance, disentanglingY, PauliY, (rngControl, idxTarget), _)]; } + // target is now in |0> state up to the phase given by arg of newCoefficients. // Continue recursion while there are control qubits. - if (Length(control!) == 0) { + if (_RangeLength(rngControl) == 0) { let (abs, arg) = newCoefficients[0]!; if (AbsD(arg) > tolerance) { - Exp([PauliI], -1.0 * arg, [target]); + set plan += [_ApplyGlobalRotationStep(-1.0 * arg, idxTarget, _)]; } } else { if (_AnyOutsideToleranceCP(tolerance, newCoefficients)) { - let newControl = LittleEndian(Rest(control!)); - let newTarget = (control!)[0]; - _ApproximatelyPrepareArbitraryState(tolerance, newCoefficients, newControl, newTarget); + let newControl = (RangeStart(rngControl) + 1)..RangeStep(rngControl)..RangeEnd(rngControl); + let newTarget = RangeStart(rngControl); + set plan += _ApproximatelyUnprepareArbitraryStatePlan(tolerance, newCoefficients, (newControl, newTarget)); } } + + return plan; } diff --git a/Standard/src/Standard.csproj b/Standard/src/Standard.csproj index fb04b33a3e9..c4255e9d92d 100644 --- a/Standard/src/Standard.csproj +++ b/Standard/src/Standard.csproj @@ -1,4 +1,4 @@ - + netstandard2.1 @@ -30,7 +30,7 @@ - + diff --git a/Standard/tests/Standard.Tests.csproj b/Standard/tests/Standard.Tests.csproj index 28689687eb9..359cd229d09 100644 --- a/Standard/tests/Standard.Tests.csproj +++ b/Standard/tests/Standard.Tests.csproj @@ -1,4 +1,4 @@ - +